repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
sequence | docstring
stringlengths 1
46.9k
| docstring_tokens
sequence | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
chatfirst/chatfirst | chatfirst/models.py | https://github.com/chatfirst/chatfirst/blob/11e023fc372e034dfd3417b61b67759ef8c37ad6/chatfirst/models.py#L209-L221 | def to_json(self):
"""
Serialize object to json dict
:return: dict
"""
data = dict()
data['InterlocutorId'] = self.id_
data['Text'] = self.text
data['Username'] = self.username
data['FirstName'] = self.first_name
data['LastName'] = self.last_name
return data | [
"def",
"to_json",
"(",
"self",
")",
":",
"data",
"=",
"dict",
"(",
")",
"data",
"[",
"'InterlocutorId'",
"]",
"=",
"self",
".",
"id_",
"data",
"[",
"'Text'",
"]",
"=",
"self",
".",
"text",
"data",
"[",
"'Username'",
"]",
"=",
"self",
".",
"username",
"data",
"[",
"'FirstName'",
"]",
"=",
"self",
".",
"first_name",
"data",
"[",
"'LastName'",
"]",
"=",
"self",
".",
"last_name",
"return",
"data"
] | Serialize object to json dict
:return: dict | [
"Serialize",
"object",
"to",
"json",
"dict"
] | python | train |
softlayer/softlayer-python | SoftLayer/managers/vs_capacity.py | https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/managers/vs_capacity.py#L100-L123 | def create(self, name, backend_router_id, flavor, instances, test=False):
"""Orders a Virtual_ReservedCapacityGroup
:param string name: Name for the new reserved capacity
:param int backend_router_id: This selects the pod. See create_options for a list
:param string flavor: Capacity KeyName, see create_options for a list
:param int instances: Number of guest this capacity can support
:param bool test: If True, don't actually order, just test.
"""
# Since orderManger needs a DC id, just send in 0, the API will ignore it
args = (self.capacity_package, 0, [flavor])
extras = {"backendRouterId": backend_router_id, "name": name}
kwargs = {
'extras': extras,
'quantity': instances,
'complex_type': 'SoftLayer_Container_Product_Order_Virtual_ReservedCapacity',
'hourly': True
}
if test:
receipt = self.ordering_manager.verify_order(*args, **kwargs)
else:
receipt = self.ordering_manager.place_order(*args, **kwargs)
return receipt | [
"def",
"create",
"(",
"self",
",",
"name",
",",
"backend_router_id",
",",
"flavor",
",",
"instances",
",",
"test",
"=",
"False",
")",
":",
"# Since orderManger needs a DC id, just send in 0, the API will ignore it",
"args",
"=",
"(",
"self",
".",
"capacity_package",
",",
"0",
",",
"[",
"flavor",
"]",
")",
"extras",
"=",
"{",
"\"backendRouterId\"",
":",
"backend_router_id",
",",
"\"name\"",
":",
"name",
"}",
"kwargs",
"=",
"{",
"'extras'",
":",
"extras",
",",
"'quantity'",
":",
"instances",
",",
"'complex_type'",
":",
"'SoftLayer_Container_Product_Order_Virtual_ReservedCapacity'",
",",
"'hourly'",
":",
"True",
"}",
"if",
"test",
":",
"receipt",
"=",
"self",
".",
"ordering_manager",
".",
"verify_order",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"receipt",
"=",
"self",
".",
"ordering_manager",
".",
"place_order",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"receipt"
] | Orders a Virtual_ReservedCapacityGroup
:param string name: Name for the new reserved capacity
:param int backend_router_id: This selects the pod. See create_options for a list
:param string flavor: Capacity KeyName, see create_options for a list
:param int instances: Number of guest this capacity can support
:param bool test: If True, don't actually order, just test. | [
"Orders",
"a",
"Virtual_ReservedCapacityGroup"
] | python | train |
openfisca/openfisca-core | openfisca_core/parameters.py | https://github.com/openfisca/openfisca-core/blob/92ce9396e29ae5d9bac5ea604cfce88517c6b35c/openfisca_core/parameters.py#L423-L430 | def merge(self, other):
"""
Merges another ParameterNode into the current node.
In case of child name conflict, the other node child will replace the current node child.
"""
for child_name, child in other.children.items():
self.add_child(child_name, child) | [
"def",
"merge",
"(",
"self",
",",
"other",
")",
":",
"for",
"child_name",
",",
"child",
"in",
"other",
".",
"children",
".",
"items",
"(",
")",
":",
"self",
".",
"add_child",
"(",
"child_name",
",",
"child",
")"
] | Merges another ParameterNode into the current node.
In case of child name conflict, the other node child will replace the current node child. | [
"Merges",
"another",
"ParameterNode",
"into",
"the",
"current",
"node",
"."
] | python | train |
mpg-age-bioinformatics/AGEpy | AGEpy/gtf.py | https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/gtf.py#L111-L130 | def GTFtoBED(inGTF,name):
"""
Transform a GTF dataframe into a bed dataframe
:param inGTF: GTF dataframe for transformation
:param name: field of the GTF data frame to be use for the bed 'name' positon
returns: a bed dataframe with the corresponding bed fiels: 'chrom','chromStart','chromEnd','name','score','strand'
"""
bed=inGTF.copy()
bed.reset_index(inplace=True, drop=True)
if name not in bed.columns.tolist():
field=retrieve_GTF_field(name, bed)
bed=pd.concat([bed,field],axis=1)
bed=bed[['seqname','start','end',name,'score','strand']]
bed.columns=['chrom','chromStart','chromEnd','name','score','strand']
bed.drop_duplicates(inplace=True)
bed.reset_index(inplace=True,drop=True)
return bed | [
"def",
"GTFtoBED",
"(",
"inGTF",
",",
"name",
")",
":",
"bed",
"=",
"inGTF",
".",
"copy",
"(",
")",
"bed",
".",
"reset_index",
"(",
"inplace",
"=",
"True",
",",
"drop",
"=",
"True",
")",
"if",
"name",
"not",
"in",
"bed",
".",
"columns",
".",
"tolist",
"(",
")",
":",
"field",
"=",
"retrieve_GTF_field",
"(",
"name",
",",
"bed",
")",
"bed",
"=",
"pd",
".",
"concat",
"(",
"[",
"bed",
",",
"field",
"]",
",",
"axis",
"=",
"1",
")",
"bed",
"=",
"bed",
"[",
"[",
"'seqname'",
",",
"'start'",
",",
"'end'",
",",
"name",
",",
"'score'",
",",
"'strand'",
"]",
"]",
"bed",
".",
"columns",
"=",
"[",
"'chrom'",
",",
"'chromStart'",
",",
"'chromEnd'",
",",
"'name'",
",",
"'score'",
",",
"'strand'",
"]",
"bed",
".",
"drop_duplicates",
"(",
"inplace",
"=",
"True",
")",
"bed",
".",
"reset_index",
"(",
"inplace",
"=",
"True",
",",
"drop",
"=",
"True",
")",
"return",
"bed"
] | Transform a GTF dataframe into a bed dataframe
:param inGTF: GTF dataframe for transformation
:param name: field of the GTF data frame to be use for the bed 'name' positon
returns: a bed dataframe with the corresponding bed fiels: 'chrom','chromStart','chromEnd','name','score','strand' | [
"Transform",
"a",
"GTF",
"dataframe",
"into",
"a",
"bed",
"dataframe"
] | python | train |
Mindwerks/worldengine | worldengine/draw.py | https://github.com/Mindwerks/worldengine/blob/64dff8eb7824ce46b5b6cb8006bcef21822ef144/worldengine/draw.py#L147-L194 | def _elevation_color(elevation, sea_level=1.0):
"""
Calculate color based on elevation
:param elevation:
:return:
"""
color_step = 1.5
if sea_level is None:
sea_level = -1
if elevation < sea_level/2:
elevation /= sea_level
return 0.0, 0.0, 0.75 + 0.5 * elevation
elif elevation < sea_level:
elevation /= sea_level
return 0.0, 2 * (elevation - 0.5), 1.0
else:
elevation -= sea_level
if elevation < 1.0 * color_step:
return (0.0, 0.5 +
0.5 * elevation / color_step, 0.0)
elif elevation < 1.5 * color_step:
return 2 * (elevation - 1.0 * color_step) / color_step, 1.0, 0.0
elif elevation < 2.0 * color_step:
return 1.0, 1.0 - (elevation - 1.5 * color_step) / color_step, 0
elif elevation < 3.0 * color_step:
return (1.0 - 0.5 * (elevation - 2.0 *
color_step) / color_step,
0.5 - 0.25 * (elevation - 2.0 *
color_step) / color_step, 0)
elif elevation < 5.0 * color_step:
return (0.5 - 0.125 * (elevation - 3.0 *
color_step) / (2 * color_step),
0.25 + 0.125 * (elevation - 3.0 *
color_step) / (2 * color_step),
0.375 * (elevation - 3.0 *
color_step) / (2 * color_step))
elif elevation < 8.0 * color_step:
return (0.375 + 0.625 * (elevation - 5.0 *
color_step) / (3 * color_step),
0.375 + 0.625 * (elevation - 5.0 *
color_step) / (3 * color_step),
0.375 + 0.625 * (elevation - 5.0 *
color_step) / (3 * color_step))
else:
elevation -= 8.0 * color_step
while elevation > 2.0 * color_step:
elevation -= 2.0 * color_step
return 1, 1 - elevation / 4.0, 1 | [
"def",
"_elevation_color",
"(",
"elevation",
",",
"sea_level",
"=",
"1.0",
")",
":",
"color_step",
"=",
"1.5",
"if",
"sea_level",
"is",
"None",
":",
"sea_level",
"=",
"-",
"1",
"if",
"elevation",
"<",
"sea_level",
"/",
"2",
":",
"elevation",
"/=",
"sea_level",
"return",
"0.0",
",",
"0.0",
",",
"0.75",
"+",
"0.5",
"*",
"elevation",
"elif",
"elevation",
"<",
"sea_level",
":",
"elevation",
"/=",
"sea_level",
"return",
"0.0",
",",
"2",
"*",
"(",
"elevation",
"-",
"0.5",
")",
",",
"1.0",
"else",
":",
"elevation",
"-=",
"sea_level",
"if",
"elevation",
"<",
"1.0",
"*",
"color_step",
":",
"return",
"(",
"0.0",
",",
"0.5",
"+",
"0.5",
"*",
"elevation",
"/",
"color_step",
",",
"0.0",
")",
"elif",
"elevation",
"<",
"1.5",
"*",
"color_step",
":",
"return",
"2",
"*",
"(",
"elevation",
"-",
"1.0",
"*",
"color_step",
")",
"/",
"color_step",
",",
"1.0",
",",
"0.0",
"elif",
"elevation",
"<",
"2.0",
"*",
"color_step",
":",
"return",
"1.0",
",",
"1.0",
"-",
"(",
"elevation",
"-",
"1.5",
"*",
"color_step",
")",
"/",
"color_step",
",",
"0",
"elif",
"elevation",
"<",
"3.0",
"*",
"color_step",
":",
"return",
"(",
"1.0",
"-",
"0.5",
"*",
"(",
"elevation",
"-",
"2.0",
"*",
"color_step",
")",
"/",
"color_step",
",",
"0.5",
"-",
"0.25",
"*",
"(",
"elevation",
"-",
"2.0",
"*",
"color_step",
")",
"/",
"color_step",
",",
"0",
")",
"elif",
"elevation",
"<",
"5.0",
"*",
"color_step",
":",
"return",
"(",
"0.5",
"-",
"0.125",
"*",
"(",
"elevation",
"-",
"3.0",
"*",
"color_step",
")",
"/",
"(",
"2",
"*",
"color_step",
")",
",",
"0.25",
"+",
"0.125",
"*",
"(",
"elevation",
"-",
"3.0",
"*",
"color_step",
")",
"/",
"(",
"2",
"*",
"color_step",
")",
",",
"0.375",
"*",
"(",
"elevation",
"-",
"3.0",
"*",
"color_step",
")",
"/",
"(",
"2",
"*",
"color_step",
")",
")",
"elif",
"elevation",
"<",
"8.0",
"*",
"color_step",
":",
"return",
"(",
"0.375",
"+",
"0.625",
"*",
"(",
"elevation",
"-",
"5.0",
"*",
"color_step",
")",
"/",
"(",
"3",
"*",
"color_step",
")",
",",
"0.375",
"+",
"0.625",
"*",
"(",
"elevation",
"-",
"5.0",
"*",
"color_step",
")",
"/",
"(",
"3",
"*",
"color_step",
")",
",",
"0.375",
"+",
"0.625",
"*",
"(",
"elevation",
"-",
"5.0",
"*",
"color_step",
")",
"/",
"(",
"3",
"*",
"color_step",
")",
")",
"else",
":",
"elevation",
"-=",
"8.0",
"*",
"color_step",
"while",
"elevation",
">",
"2.0",
"*",
"color_step",
":",
"elevation",
"-=",
"2.0",
"*",
"color_step",
"return",
"1",
",",
"1",
"-",
"elevation",
"/",
"4.0",
",",
"1"
] | Calculate color based on elevation
:param elevation:
:return: | [
"Calculate",
"color",
"based",
"on",
"elevation",
":",
"param",
"elevation",
":",
":",
"return",
":"
] | python | train |
wummel/linkchecker | linkcheck/parser/__init__.py | https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/parser/__init__.py#L53-L57 | def parse_opera (url_data):
"""Parse an opera bookmark file."""
from ..bookmarks.opera import parse_bookmark_data
for url, name, lineno in parse_bookmark_data(url_data.get_content()):
url_data.add_url(url, line=lineno, name=name) | [
"def",
"parse_opera",
"(",
"url_data",
")",
":",
"from",
".",
".",
"bookmarks",
".",
"opera",
"import",
"parse_bookmark_data",
"for",
"url",
",",
"name",
",",
"lineno",
"in",
"parse_bookmark_data",
"(",
"url_data",
".",
"get_content",
"(",
")",
")",
":",
"url_data",
".",
"add_url",
"(",
"url",
",",
"line",
"=",
"lineno",
",",
"name",
"=",
"name",
")"
] | Parse an opera bookmark file. | [
"Parse",
"an",
"opera",
"bookmark",
"file",
"."
] | python | train |
brocade/pynos | pynos/versions/ver_7/ver_7_1_0/yang/brocade_vswitch.py | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_vswitch.py#L1104-L1113 | def vcenter_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcenter = ET.SubElement(config, "vcenter", xmlns="urn:brocade.com:mgmt:brocade-vswitch")
id = ET.SubElement(vcenter, "id")
id.text = kwargs.pop('id')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"vcenter_id",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"vcenter",
"=",
"ET",
".",
"SubElement",
"(",
"config",
",",
"\"vcenter\"",
",",
"xmlns",
"=",
"\"urn:brocade.com:mgmt:brocade-vswitch\"",
")",
"id",
"=",
"ET",
".",
"SubElement",
"(",
"vcenter",
",",
"\"id\"",
")",
"id",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'id'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] | Auto Generated Code | [
"Auto",
"Generated",
"Code"
] | python | train |
BernardFW/bernard | src/bernard/platforms/facebook/platform.py | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/facebook/platform.py#L973-L982 | async def ensure_usable_media(self, media: BaseMedia) -> UrlMedia:
"""
So far, let's just accept URL media. We'll see in the future how it
goes.
"""
if not isinstance(media, UrlMedia):
raise ValueError('Facebook platform only accepts URL media')
return media | [
"async",
"def",
"ensure_usable_media",
"(",
"self",
",",
"media",
":",
"BaseMedia",
")",
"->",
"UrlMedia",
":",
"if",
"not",
"isinstance",
"(",
"media",
",",
"UrlMedia",
")",
":",
"raise",
"ValueError",
"(",
"'Facebook platform only accepts URL media'",
")",
"return",
"media"
] | So far, let's just accept URL media. We'll see in the future how it
goes. | [
"So",
"far",
"let",
"s",
"just",
"accept",
"URL",
"media",
".",
"We",
"ll",
"see",
"in",
"the",
"future",
"how",
"it",
"goes",
"."
] | python | train |
pyopenapi/pyswagger | pyswagger/utils.py | https://github.com/pyopenapi/pyswagger/blob/333c4ca08e758cd2194943d9904a3eda3fe43977/pyswagger/utils.py#L439-L480 | def walk(start, ofn, cyc=None):
""" Non recursive DFS to detect cycles
:param start: start vertex in graph
:param ofn: function to get the list of outgoing edges of a vertex
:param cyc: list of existing cycles, cycles are represented in a list started with minimum vertex.
:return: cycles
:rtype: list of lists
"""
ctx, stk = {}, [start]
cyc = [] if cyc == None else cyc
while len(stk):
top = stk[-1]
if top not in ctx:
ctx.update({top:list(ofn(top))})
if len(ctx[top]):
n = ctx[top][0]
if n in stk:
# cycles found,
# normalize the representation of cycles,
# start from the smallest vertex, ex.
# 4 -> 5 -> 2 -> 7 -> 9 would produce
# (2, 7, 9, 4, 5)
nc = stk[stk.index(n):]
ni = nc.index(min(nc))
nc = nc[ni:] + nc[:ni] + [min(nc)]
if nc not in cyc:
cyc.append(nc)
ctx[top].pop(0)
else:
stk.append(n)
else:
ctx.pop(top)
stk.pop()
if len(stk):
ctx[stk[-1]].remove(top)
return cyc | [
"def",
"walk",
"(",
"start",
",",
"ofn",
",",
"cyc",
"=",
"None",
")",
":",
"ctx",
",",
"stk",
"=",
"{",
"}",
",",
"[",
"start",
"]",
"cyc",
"=",
"[",
"]",
"if",
"cyc",
"==",
"None",
"else",
"cyc",
"while",
"len",
"(",
"stk",
")",
":",
"top",
"=",
"stk",
"[",
"-",
"1",
"]",
"if",
"top",
"not",
"in",
"ctx",
":",
"ctx",
".",
"update",
"(",
"{",
"top",
":",
"list",
"(",
"ofn",
"(",
"top",
")",
")",
"}",
")",
"if",
"len",
"(",
"ctx",
"[",
"top",
"]",
")",
":",
"n",
"=",
"ctx",
"[",
"top",
"]",
"[",
"0",
"]",
"if",
"n",
"in",
"stk",
":",
"# cycles found,",
"# normalize the representation of cycles,",
"# start from the smallest vertex, ex.",
"# 4 -> 5 -> 2 -> 7 -> 9 would produce",
"# (2, 7, 9, 4, 5)",
"nc",
"=",
"stk",
"[",
"stk",
".",
"index",
"(",
"n",
")",
":",
"]",
"ni",
"=",
"nc",
".",
"index",
"(",
"min",
"(",
"nc",
")",
")",
"nc",
"=",
"nc",
"[",
"ni",
":",
"]",
"+",
"nc",
"[",
":",
"ni",
"]",
"+",
"[",
"min",
"(",
"nc",
")",
"]",
"if",
"nc",
"not",
"in",
"cyc",
":",
"cyc",
".",
"append",
"(",
"nc",
")",
"ctx",
"[",
"top",
"]",
".",
"pop",
"(",
"0",
")",
"else",
":",
"stk",
".",
"append",
"(",
"n",
")",
"else",
":",
"ctx",
".",
"pop",
"(",
"top",
")",
"stk",
".",
"pop",
"(",
")",
"if",
"len",
"(",
"stk",
")",
":",
"ctx",
"[",
"stk",
"[",
"-",
"1",
"]",
"]",
".",
"remove",
"(",
"top",
")",
"return",
"cyc"
] | Non recursive DFS to detect cycles
:param start: start vertex in graph
:param ofn: function to get the list of outgoing edges of a vertex
:param cyc: list of existing cycles, cycles are represented in a list started with minimum vertex.
:return: cycles
:rtype: list of lists | [
"Non",
"recursive",
"DFS",
"to",
"detect",
"cycles"
] | python | train |
mixmastamyk/console | console/windows.py | https://github.com/mixmastamyk/console/blob/afe6c95d5a7b83d85376f450454e3769e4a5c3d0/console/windows.py#L199-L203 | def set_position(x, y, stream=STD_OUTPUT_HANDLE):
''' Sets current position of the cursor. '''
stream = kernel32.GetStdHandle(stream)
value = x + (y << 16)
kernel32.SetConsoleCursorPosition(stream, c_long(value)) | [
"def",
"set_position",
"(",
"x",
",",
"y",
",",
"stream",
"=",
"STD_OUTPUT_HANDLE",
")",
":",
"stream",
"=",
"kernel32",
".",
"GetStdHandle",
"(",
"stream",
")",
"value",
"=",
"x",
"+",
"(",
"y",
"<<",
"16",
")",
"kernel32",
".",
"SetConsoleCursorPosition",
"(",
"stream",
",",
"c_long",
"(",
"value",
")",
")"
] | Sets current position of the cursor. | [
"Sets",
"current",
"position",
"of",
"the",
"cursor",
"."
] | python | train |
wuher/devil | devil/datamapper.py | https://github.com/wuher/devil/blob/a8834d4f88d915a21754c6b96f99d0ad9123ad4d/devil/datamapper.py#L272-L286 | def _get_name_from_url(self, request):
""" Determine short name for the mapper based on the URL.
Short name can be either in query string (e.g. ?format=json)
or as an extension to the URL (e.g. myresource.json).
:returns: short name of the mapper or ``None`` if not found.
"""
format = request.GET.get('format', None)
if not format:
match = self._format_query_pattern.match(request.path)
if match and match.group('format'):
format = match.group('format')
return format | [
"def",
"_get_name_from_url",
"(",
"self",
",",
"request",
")",
":",
"format",
"=",
"request",
".",
"GET",
".",
"get",
"(",
"'format'",
",",
"None",
")",
"if",
"not",
"format",
":",
"match",
"=",
"self",
".",
"_format_query_pattern",
".",
"match",
"(",
"request",
".",
"path",
")",
"if",
"match",
"and",
"match",
".",
"group",
"(",
"'format'",
")",
":",
"format",
"=",
"match",
".",
"group",
"(",
"'format'",
")",
"return",
"format"
] | Determine short name for the mapper based on the URL.
Short name can be either in query string (e.g. ?format=json)
or as an extension to the URL (e.g. myresource.json).
:returns: short name of the mapper or ``None`` if not found. | [
"Determine",
"short",
"name",
"for",
"the",
"mapper",
"based",
"on",
"the",
"URL",
"."
] | python | train |
Xion/taipan | taipan/functional/combinators.py | https://github.com/Xion/taipan/blob/f333f0287c8bd0915182c7d5308e5f05ef0cca78/taipan/functional/combinators.py#L76-L145 | def merge(arg, *rest, **kwargs):
"""Merge a collection, with functions as items, into a single function
that takes a collection and maps its items through corresponding functions.
:param arg: A collection of functions, such as list, tuple, or dictionary
:param default: Optional default function to use for items
within merged function's arguments that do not have
corresponding functions in ``arg``
Example with two-element tuple::
>> dict_ = {'Alice': -5, 'Bob': 4}
>> func = merge((str.upper, abs))
>> dict(map(func, dict_.items()))
{'ALICE': 5, 'BOB': 4}
Example with a dictionary::
>> func = merge({'id': int, 'name': str.split})
>> data = [
{'id': '1', 'name': "John Doe"},
{'id': '2', 'name': "Anne Arbor"},
]
>> list(map(func, data))
[{'id': 1, 'name': ['John', 'Doe']},
{'id': 2, 'name': ['Anne', 'Arbor']}]
:return: Merged function
.. versionadded:: 0.0.2
"""
ensure_keyword_args(kwargs, optional=('default',))
has_default = 'default' in kwargs
if has_default:
default = ensure_callable(kwargs['default'])
# if more than one argument was given, they must all be functions;
# result will be a function that takes multiple arguments (rather than
# a single collection) and returns a tuple
unary_result = True
if rest:
fs = (ensure_callable(arg),) + tuple(imap(ensure_callable, rest))
unary_result = False
else:
fs = arg
if is_mapping(fs):
if has_default:
return lambda arg_: fs.__class__((k, fs.get(k, default)(arg_[k]))
for k in arg_)
else:
return lambda arg_: fs.__class__((k, fs[k](arg_[k]))
for k in arg_)
else:
ensure_sequence(fs)
if has_default:
# we cannot use ``izip_longest(fs, arg_, fillvalue=default)``,
# because we want to terminate the generator
# only when ``arg_`` is exhausted (not when just ``fs`` is)
func = lambda arg_: fs.__class__(
(fs[i] if i < len(fs) else default)(x)
for i, x in enumerate(arg_))
else:
# we cannot use ``izip(fs, arg_)`` because it would short-circuit
# if ``arg_`` is longer than ``fs``, rather than raising
# the required ``IndexError``
func = lambda arg_: fs.__class__(fs[i](x)
for i, x in enumerate(arg_))
return func if unary_result else lambda *args: func(args) | [
"def",
"merge",
"(",
"arg",
",",
"*",
"rest",
",",
"*",
"*",
"kwargs",
")",
":",
"ensure_keyword_args",
"(",
"kwargs",
",",
"optional",
"=",
"(",
"'default'",
",",
")",
")",
"has_default",
"=",
"'default'",
"in",
"kwargs",
"if",
"has_default",
":",
"default",
"=",
"ensure_callable",
"(",
"kwargs",
"[",
"'default'",
"]",
")",
"# if more than one argument was given, they must all be functions;",
"# result will be a function that takes multiple arguments (rather than",
"# a single collection) and returns a tuple",
"unary_result",
"=",
"True",
"if",
"rest",
":",
"fs",
"=",
"(",
"ensure_callable",
"(",
"arg",
")",
",",
")",
"+",
"tuple",
"(",
"imap",
"(",
"ensure_callable",
",",
"rest",
")",
")",
"unary_result",
"=",
"False",
"else",
":",
"fs",
"=",
"arg",
"if",
"is_mapping",
"(",
"fs",
")",
":",
"if",
"has_default",
":",
"return",
"lambda",
"arg_",
":",
"fs",
".",
"__class__",
"(",
"(",
"k",
",",
"fs",
".",
"get",
"(",
"k",
",",
"default",
")",
"(",
"arg_",
"[",
"k",
"]",
")",
")",
"for",
"k",
"in",
"arg_",
")",
"else",
":",
"return",
"lambda",
"arg_",
":",
"fs",
".",
"__class__",
"(",
"(",
"k",
",",
"fs",
"[",
"k",
"]",
"(",
"arg_",
"[",
"k",
"]",
")",
")",
"for",
"k",
"in",
"arg_",
")",
"else",
":",
"ensure_sequence",
"(",
"fs",
")",
"if",
"has_default",
":",
"# we cannot use ``izip_longest(fs, arg_, fillvalue=default)``,",
"# because we want to terminate the generator",
"# only when ``arg_`` is exhausted (not when just ``fs`` is)",
"func",
"=",
"lambda",
"arg_",
":",
"fs",
".",
"__class__",
"(",
"(",
"fs",
"[",
"i",
"]",
"if",
"i",
"<",
"len",
"(",
"fs",
")",
"else",
"default",
")",
"(",
"x",
")",
"for",
"i",
",",
"x",
"in",
"enumerate",
"(",
"arg_",
")",
")",
"else",
":",
"# we cannot use ``izip(fs, arg_)`` because it would short-circuit",
"# if ``arg_`` is longer than ``fs``, rather than raising",
"# the required ``IndexError``",
"func",
"=",
"lambda",
"arg_",
":",
"fs",
".",
"__class__",
"(",
"fs",
"[",
"i",
"]",
"(",
"x",
")",
"for",
"i",
",",
"x",
"in",
"enumerate",
"(",
"arg_",
")",
")",
"return",
"func",
"if",
"unary_result",
"else",
"lambda",
"*",
"args",
":",
"func",
"(",
"args",
")"
] | Merge a collection, with functions as items, into a single function
that takes a collection and maps its items through corresponding functions.
:param arg: A collection of functions, such as list, tuple, or dictionary
:param default: Optional default function to use for items
within merged function's arguments that do not have
corresponding functions in ``arg``
Example with two-element tuple::
>> dict_ = {'Alice': -5, 'Bob': 4}
>> func = merge((str.upper, abs))
>> dict(map(func, dict_.items()))
{'ALICE': 5, 'BOB': 4}
Example with a dictionary::
>> func = merge({'id': int, 'name': str.split})
>> data = [
{'id': '1', 'name': "John Doe"},
{'id': '2', 'name': "Anne Arbor"},
]
>> list(map(func, data))
[{'id': 1, 'name': ['John', 'Doe']},
{'id': 2, 'name': ['Anne', 'Arbor']}]
:return: Merged function
.. versionadded:: 0.0.2 | [
"Merge",
"a",
"collection",
"with",
"functions",
"as",
"items",
"into",
"a",
"single",
"function",
"that",
"takes",
"a",
"collection",
"and",
"maps",
"its",
"items",
"through",
"corresponding",
"functions",
"."
] | python | train |
asphalt-framework/asphalt | asphalt/core/context.py | https://github.com/asphalt-framework/asphalt/blob/4114b3ac9743cbd9facb374a3f53e19d3afef22d/asphalt/core/context.py#L410-L442 | def get_resources(self, type: Type[T_Resource]) -> Set[T_Resource]:
"""
Retrieve all the resources of the given type in this context and its parents.
Any matching resource factories are also triggered if necessary.
:param type: type of the resources to get
:return: a set of all found resources of the given type
"""
assert check_argument_types()
# Collect all the matching resources from this context
resources = {container.name: container.value_or_factory
for container in self._resources.values()
if not container.is_factory and type in container.types
} # type: Dict[str, T_Resource]
# Next, find all matching resource factories in the context chain and generate resources
resources.update({container.name: container.generate_value(self)
for ctx in self.context_chain
for container in ctx._resources.values()
if container.is_factory and type in container.types
and container.name not in resources})
# Finally, add the resource values from the parent contexts
resources.update({container.name: container.value_or_factory
for ctx in self.context_chain[1:]
for container in ctx._resources.values()
if not container.is_factory and type in container.types
and container.name not in resources})
return set(resources.values()) | [
"def",
"get_resources",
"(",
"self",
",",
"type",
":",
"Type",
"[",
"T_Resource",
"]",
")",
"->",
"Set",
"[",
"T_Resource",
"]",
":",
"assert",
"check_argument_types",
"(",
")",
"# Collect all the matching resources from this context",
"resources",
"=",
"{",
"container",
".",
"name",
":",
"container",
".",
"value_or_factory",
"for",
"container",
"in",
"self",
".",
"_resources",
".",
"values",
"(",
")",
"if",
"not",
"container",
".",
"is_factory",
"and",
"type",
"in",
"container",
".",
"types",
"}",
"# type: Dict[str, T_Resource]",
"# Next, find all matching resource factories in the context chain and generate resources",
"resources",
".",
"update",
"(",
"{",
"container",
".",
"name",
":",
"container",
".",
"generate_value",
"(",
"self",
")",
"for",
"ctx",
"in",
"self",
".",
"context_chain",
"for",
"container",
"in",
"ctx",
".",
"_resources",
".",
"values",
"(",
")",
"if",
"container",
".",
"is_factory",
"and",
"type",
"in",
"container",
".",
"types",
"and",
"container",
".",
"name",
"not",
"in",
"resources",
"}",
")",
"# Finally, add the resource values from the parent contexts",
"resources",
".",
"update",
"(",
"{",
"container",
".",
"name",
":",
"container",
".",
"value_or_factory",
"for",
"ctx",
"in",
"self",
".",
"context_chain",
"[",
"1",
":",
"]",
"for",
"container",
"in",
"ctx",
".",
"_resources",
".",
"values",
"(",
")",
"if",
"not",
"container",
".",
"is_factory",
"and",
"type",
"in",
"container",
".",
"types",
"and",
"container",
".",
"name",
"not",
"in",
"resources",
"}",
")",
"return",
"set",
"(",
"resources",
".",
"values",
"(",
")",
")"
] | Retrieve all the resources of the given type in this context and its parents.
Any matching resource factories are also triggered if necessary.
:param type: type of the resources to get
:return: a set of all found resources of the given type | [
"Retrieve",
"all",
"the",
"resources",
"of",
"the",
"given",
"type",
"in",
"this",
"context",
"and",
"its",
"parents",
"."
] | python | train |
tehmaze/natural | natural/number.py | https://github.com/tehmaze/natural/blob/d7a1fc9de712f9bcf68884a80826a7977df356fb/natural/number.py#L117-L134 | def percentage(value, digits=2):
'''
Converts a fraction to a formatted percentage.
:param value: number
:param digits: default ``2``
>>> print(percentage(1))
100.00 %
>>> print(percentage(0.23, digits=0))
23 %
>>> print(percentage(23.421))
2,342.10 %
'''
value = float(value) * 100.0
return u'' + '%s %%' % (_format(value, digits),) | [
"def",
"percentage",
"(",
"value",
",",
"digits",
"=",
"2",
")",
":",
"value",
"=",
"float",
"(",
"value",
")",
"*",
"100.0",
"return",
"u''",
"+",
"'%s %%'",
"%",
"(",
"_format",
"(",
"value",
",",
"digits",
")",
",",
")"
] | Converts a fraction to a formatted percentage.
:param value: number
:param digits: default ``2``
>>> print(percentage(1))
100.00 %
>>> print(percentage(0.23, digits=0))
23 %
>>> print(percentage(23.421))
2,342.10 % | [
"Converts",
"a",
"fraction",
"to",
"a",
"formatted",
"percentage",
"."
] | python | train |
dls-controls/pymalcolm | malcolm/modules/pandablocks/pandablocksclient.py | https://github.com/dls-controls/pymalcolm/blob/80ea667e4da26365a6cebc0249f52fdc744bd983/malcolm/modules/pandablocks/pandablocksclient.py#L145-L171 | def _recv_loop(self):
"""Service socket recv, returning responses to the correct queue"""
self._completed_response_lines = []
self._is_multiline = None
lines_iterator = self._get_lines()
while True:
try:
line = next(lines_iterator)
if self._is_multiline is None:
self._is_multiline = line.startswith("!") or line == "."
if line.startswith("ERR"):
self._respond(ValueError(line))
elif self._is_multiline:
if line == ".":
self._respond(self._completed_response_lines)
else:
assert line[0] == "!", \
"Multiline response {} doesn't start with !" \
.format(repr(line))
self._completed_response_lines.append(line[1:])
else:
self._respond(line)
except StopIteration:
return
except Exception:
log.exception("Exception receiving message")
raise | [
"def",
"_recv_loop",
"(",
"self",
")",
":",
"self",
".",
"_completed_response_lines",
"=",
"[",
"]",
"self",
".",
"_is_multiline",
"=",
"None",
"lines_iterator",
"=",
"self",
".",
"_get_lines",
"(",
")",
"while",
"True",
":",
"try",
":",
"line",
"=",
"next",
"(",
"lines_iterator",
")",
"if",
"self",
".",
"_is_multiline",
"is",
"None",
":",
"self",
".",
"_is_multiline",
"=",
"line",
".",
"startswith",
"(",
"\"!\"",
")",
"or",
"line",
"==",
"\".\"",
"if",
"line",
".",
"startswith",
"(",
"\"ERR\"",
")",
":",
"self",
".",
"_respond",
"(",
"ValueError",
"(",
"line",
")",
")",
"elif",
"self",
".",
"_is_multiline",
":",
"if",
"line",
"==",
"\".\"",
":",
"self",
".",
"_respond",
"(",
"self",
".",
"_completed_response_lines",
")",
"else",
":",
"assert",
"line",
"[",
"0",
"]",
"==",
"\"!\"",
",",
"\"Multiline response {} doesn't start with !\"",
".",
"format",
"(",
"repr",
"(",
"line",
")",
")",
"self",
".",
"_completed_response_lines",
".",
"append",
"(",
"line",
"[",
"1",
":",
"]",
")",
"else",
":",
"self",
".",
"_respond",
"(",
"line",
")",
"except",
"StopIteration",
":",
"return",
"except",
"Exception",
":",
"log",
".",
"exception",
"(",
"\"Exception receiving message\"",
")",
"raise"
] | Service socket recv, returning responses to the correct queue | [
"Service",
"socket",
"recv",
"returning",
"responses",
"to",
"the",
"correct",
"queue"
] | python | train |
inasafe/inasafe | safe/gui/widgets/message_viewer.py | https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/widgets/message_viewer.py#L305-L312 | def to_message(self):
"""Collate all message elements to a single message."""
my_message = m.Message()
if self.static_message is not None:
my_message.add(self.static_message)
for myDynamic in self.dynamic_messages:
my_message.add(myDynamic)
return my_message | [
"def",
"to_message",
"(",
"self",
")",
":",
"my_message",
"=",
"m",
".",
"Message",
"(",
")",
"if",
"self",
".",
"static_message",
"is",
"not",
"None",
":",
"my_message",
".",
"add",
"(",
"self",
".",
"static_message",
")",
"for",
"myDynamic",
"in",
"self",
".",
"dynamic_messages",
":",
"my_message",
".",
"add",
"(",
"myDynamic",
")",
"return",
"my_message"
] | Collate all message elements to a single message. | [
"Collate",
"all",
"message",
"elements",
"to",
"a",
"single",
"message",
"."
] | python | train |
google/google-visualization-python | gviz_api.py | https://github.com/google/google-visualization-python/blob/cbfb4d69ad2f4ca30dc55791629280aa3214c8e3/gviz_api.py#L683-L768 | def ToJSCode(self, name, columns_order=None, order_by=()):
"""Writes the data table as a JS code string.
This method writes a string of JS code that can be run to
generate a DataTable with the specified data. Typically used for debugging
only.
Args:
name: The name of the table. The name would be used as the DataTable's
variable name in the created JS code.
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
Returns:
A string of JS code that, when run, generates a DataTable with the given
name and the data stored in the DataTable object.
Example result:
"var tab1 = new google.visualization.DataTable();
tab1.addColumn("string", "a", "a");
tab1.addColumn("number", "b", "b");
tab1.addColumn("boolean", "c", "c");
tab1.addRows(10);
tab1.setCell(0, 0, "a");
tab1.setCell(0, 1, 1, null, {"foo": "bar"});
tab1.setCell(0, 2, true);
...
tab1.setCell(9, 0, "c");
tab1.setCell(9, 1, 3, "3$");
tab1.setCell(9, 2, false);"
Raises:
DataTableException: The data does not match the type.
"""
encoder = DataTableJSONEncoder()
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
# We first create the table with the given name
jscode = "var %s = new google.visualization.DataTable();\n" % name
if self.custom_properties:
jscode += "%s.setTableProperties(%s);\n" % (
name, encoder.encode(self.custom_properties))
# We add the columns to the table
for i, col in enumerate(columns_order):
jscode += "%s.addColumn(%s, %s, %s);\n" % (
name,
encoder.encode(col_dict[col]["type"]),
encoder.encode(col_dict[col]["label"]),
encoder.encode(col_dict[col]["id"]))
if col_dict[col]["custom_properties"]:
jscode += "%s.setColumnProperties(%d, %s);\n" % (
name, i, encoder.encode(col_dict[col]["custom_properties"]))
jscode += "%s.addRows(%d);\n" % (name, len(self.__data))
# We now go over the data and add each row
for (i, (row, cp)) in enumerate(self._PreparedData(order_by)):
# We add all the elements of this row by their order
for (j, col) in enumerate(columns_order):
if col not in row or row[col] is None:
continue
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
cell_cp = ""
if len(value) == 3:
cell_cp = ", %s" % encoder.encode(row[col][2])
# We have a formatted value or custom property as well
jscode += ("%s.setCell(%d, %d, %s, %s%s);\n" %
(name, i, j,
self.EscapeForJSCode(encoder, value[0]),
self.EscapeForJSCode(encoder, value[1]), cell_cp))
else:
jscode += "%s.setCell(%d, %d, %s);\n" % (
name, i, j, self.EscapeForJSCode(encoder, value))
if cp:
jscode += "%s.setRowProperties(%d, %s);\n" % (
name, i, encoder.encode(cp))
return jscode | [
"def",
"ToJSCode",
"(",
"self",
",",
"name",
",",
"columns_order",
"=",
"None",
",",
"order_by",
"=",
"(",
")",
")",
":",
"encoder",
"=",
"DataTableJSONEncoder",
"(",
")",
"if",
"columns_order",
"is",
"None",
":",
"columns_order",
"=",
"[",
"col",
"[",
"\"id\"",
"]",
"for",
"col",
"in",
"self",
".",
"__columns",
"]",
"col_dict",
"=",
"dict",
"(",
"[",
"(",
"col",
"[",
"\"id\"",
"]",
",",
"col",
")",
"for",
"col",
"in",
"self",
".",
"__columns",
"]",
")",
"# We first create the table with the given name",
"jscode",
"=",
"\"var %s = new google.visualization.DataTable();\\n\"",
"%",
"name",
"if",
"self",
".",
"custom_properties",
":",
"jscode",
"+=",
"\"%s.setTableProperties(%s);\\n\"",
"%",
"(",
"name",
",",
"encoder",
".",
"encode",
"(",
"self",
".",
"custom_properties",
")",
")",
"# We add the columns to the table",
"for",
"i",
",",
"col",
"in",
"enumerate",
"(",
"columns_order",
")",
":",
"jscode",
"+=",
"\"%s.addColumn(%s, %s, %s);\\n\"",
"%",
"(",
"name",
",",
"encoder",
".",
"encode",
"(",
"col_dict",
"[",
"col",
"]",
"[",
"\"type\"",
"]",
")",
",",
"encoder",
".",
"encode",
"(",
"col_dict",
"[",
"col",
"]",
"[",
"\"label\"",
"]",
")",
",",
"encoder",
".",
"encode",
"(",
"col_dict",
"[",
"col",
"]",
"[",
"\"id\"",
"]",
")",
")",
"if",
"col_dict",
"[",
"col",
"]",
"[",
"\"custom_properties\"",
"]",
":",
"jscode",
"+=",
"\"%s.setColumnProperties(%d, %s);\\n\"",
"%",
"(",
"name",
",",
"i",
",",
"encoder",
".",
"encode",
"(",
"col_dict",
"[",
"col",
"]",
"[",
"\"custom_properties\"",
"]",
")",
")",
"jscode",
"+=",
"\"%s.addRows(%d);\\n\"",
"%",
"(",
"name",
",",
"len",
"(",
"self",
".",
"__data",
")",
")",
"# We now go over the data and add each row",
"for",
"(",
"i",
",",
"(",
"row",
",",
"cp",
")",
")",
"in",
"enumerate",
"(",
"self",
".",
"_PreparedData",
"(",
"order_by",
")",
")",
":",
"# We add all the elements of this row by their order",
"for",
"(",
"j",
",",
"col",
")",
"in",
"enumerate",
"(",
"columns_order",
")",
":",
"if",
"col",
"not",
"in",
"row",
"or",
"row",
"[",
"col",
"]",
"is",
"None",
":",
"continue",
"value",
"=",
"self",
".",
"CoerceValue",
"(",
"row",
"[",
"col",
"]",
",",
"col_dict",
"[",
"col",
"]",
"[",
"\"type\"",
"]",
")",
"if",
"isinstance",
"(",
"value",
",",
"tuple",
")",
":",
"cell_cp",
"=",
"\"\"",
"if",
"len",
"(",
"value",
")",
"==",
"3",
":",
"cell_cp",
"=",
"\", %s\"",
"%",
"encoder",
".",
"encode",
"(",
"row",
"[",
"col",
"]",
"[",
"2",
"]",
")",
"# We have a formatted value or custom property as well",
"jscode",
"+=",
"(",
"\"%s.setCell(%d, %d, %s, %s%s);\\n\"",
"%",
"(",
"name",
",",
"i",
",",
"j",
",",
"self",
".",
"EscapeForJSCode",
"(",
"encoder",
",",
"value",
"[",
"0",
"]",
")",
",",
"self",
".",
"EscapeForJSCode",
"(",
"encoder",
",",
"value",
"[",
"1",
"]",
")",
",",
"cell_cp",
")",
")",
"else",
":",
"jscode",
"+=",
"\"%s.setCell(%d, %d, %s);\\n\"",
"%",
"(",
"name",
",",
"i",
",",
"j",
",",
"self",
".",
"EscapeForJSCode",
"(",
"encoder",
",",
"value",
")",
")",
"if",
"cp",
":",
"jscode",
"+=",
"\"%s.setRowProperties(%d, %s);\\n\"",
"%",
"(",
"name",
",",
"i",
",",
"encoder",
".",
"encode",
"(",
"cp",
")",
")",
"return",
"jscode"
] | Writes the data table as a JS code string.
This method writes a string of JS code that can be run to
generate a DataTable with the specified data. Typically used for debugging
only.
Args:
name: The name of the table. The name would be used as the DataTable's
variable name in the created JS code.
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
Returns:
A string of JS code that, when run, generates a DataTable with the given
name and the data stored in the DataTable object.
Example result:
"var tab1 = new google.visualization.DataTable();
tab1.addColumn("string", "a", "a");
tab1.addColumn("number", "b", "b");
tab1.addColumn("boolean", "c", "c");
tab1.addRows(10);
tab1.setCell(0, 0, "a");
tab1.setCell(0, 1, 1, null, {"foo": "bar"});
tab1.setCell(0, 2, true);
...
tab1.setCell(9, 0, "c");
tab1.setCell(9, 1, 3, "3$");
tab1.setCell(9, 2, false);"
Raises:
DataTableException: The data does not match the type. | [
"Writes",
"the",
"data",
"table",
"as",
"a",
"JS",
"code",
"string",
"."
] | python | train |
agabrown/PyGaia | pygaia/plot/sky.py | https://github.com/agabrown/PyGaia/blob/ae972b0622a15f713ffae471f925eac25ccdae47/pygaia/plot/sky.py#L10-L31 | def _orderGridlinePoints(x, y):
"""
This code takes care of ordering the points (x,y), calculated for a sky map parallel or merdian, such
that the drawing code can start at one end of the curve and end at the other (so no artifacts due to
connecting the disjoint ends occur).
Parameters
----------
x - Set of x coordinates
y - Set of y coordinates
Returns
-------
x, y: Order set of (x,y) points
"""
xroll=roll(x,1)
yroll=roll(y,1)
distance=(xroll-x)**2+(yroll-y)**2
indexmax=argmax(distance)
return roll(x,-indexmax), roll(y,-indexmax) | [
"def",
"_orderGridlinePoints",
"(",
"x",
",",
"y",
")",
":",
"xroll",
"=",
"roll",
"(",
"x",
",",
"1",
")",
"yroll",
"=",
"roll",
"(",
"y",
",",
"1",
")",
"distance",
"=",
"(",
"xroll",
"-",
"x",
")",
"**",
"2",
"+",
"(",
"yroll",
"-",
"y",
")",
"**",
"2",
"indexmax",
"=",
"argmax",
"(",
"distance",
")",
"return",
"roll",
"(",
"x",
",",
"-",
"indexmax",
")",
",",
"roll",
"(",
"y",
",",
"-",
"indexmax",
")"
] | This code takes care of ordering the points (x,y), calculated for a sky map parallel or merdian, such
that the drawing code can start at one end of the curve and end at the other (so no artifacts due to
connecting the disjoint ends occur).
Parameters
----------
x - Set of x coordinates
y - Set of y coordinates
Returns
-------
x, y: Order set of (x,y) points | [
"This",
"code",
"takes",
"care",
"of",
"ordering",
"the",
"points",
"(",
"x",
"y",
")",
"calculated",
"for",
"a",
"sky",
"map",
"parallel",
"or",
"merdian",
"such",
"that",
"the",
"drawing",
"code",
"can",
"start",
"at",
"one",
"end",
"of",
"the",
"curve",
"and",
"end",
"at",
"the",
"other",
"(",
"so",
"no",
"artifacts",
"due",
"to",
"connecting",
"the",
"disjoint",
"ends",
"occur",
")",
"."
] | python | test |
martinpitt/python-dbusmock | dbusmock/mockobject.py | https://github.com/martinpitt/python-dbusmock/blob/26f65f78bc0ed347233f699a8d6ee0e6880e7eb0/dbusmock/mockobject.py#L149-L161 | def GetAll(self, interface_name, *args, **kwargs):
'''Standard D-Bus API for getting all property values'''
self.log('GetAll ' + interface_name)
if not interface_name:
interface_name = self.interface
try:
return self.props[interface_name]
except KeyError:
raise dbus.exceptions.DBusException(
'no such interface ' + interface_name,
name=self.interface + '.UnknownInterface') | [
"def",
"GetAll",
"(",
"self",
",",
"interface_name",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"log",
"(",
"'GetAll '",
"+",
"interface_name",
")",
"if",
"not",
"interface_name",
":",
"interface_name",
"=",
"self",
".",
"interface",
"try",
":",
"return",
"self",
".",
"props",
"[",
"interface_name",
"]",
"except",
"KeyError",
":",
"raise",
"dbus",
".",
"exceptions",
".",
"DBusException",
"(",
"'no such interface '",
"+",
"interface_name",
",",
"name",
"=",
"self",
".",
"interface",
"+",
"'.UnknownInterface'",
")"
] | Standard D-Bus API for getting all property values | [
"Standard",
"D",
"-",
"Bus",
"API",
"for",
"getting",
"all",
"property",
"values"
] | python | train |
RudolfCardinal/pythonlib | cardinal_pythonlib/sphinxtools.py | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sphinxtools.py#L414-L440 | def write_rst(self,
prefix: str = "",
suffix: str = "",
heading_underline_char: str = "=",
method: AutodocMethod = None,
overwrite: bool = False,
mock: bool = False) -> None:
"""
Writes the RST file to our destination RST filename, making any
necessary directories.
Args:
prefix: as for :func:`rst_content`
suffix: as for :func:`rst_content`
heading_underline_char: as for :func:`rst_content`
method: as for :func:`rst_content`
overwrite: overwrite the file if it exists already?
mock: pretend to write, but don't
"""
content = self.rst_content(
prefix=prefix,
suffix=suffix,
heading_underline_char=heading_underline_char,
method=method
)
write_if_allowed(self.target_rst_filename, content,
overwrite=overwrite, mock=mock) | [
"def",
"write_rst",
"(",
"self",
",",
"prefix",
":",
"str",
"=",
"\"\"",
",",
"suffix",
":",
"str",
"=",
"\"\"",
",",
"heading_underline_char",
":",
"str",
"=",
"\"=\"",
",",
"method",
":",
"AutodocMethod",
"=",
"None",
",",
"overwrite",
":",
"bool",
"=",
"False",
",",
"mock",
":",
"bool",
"=",
"False",
")",
"->",
"None",
":",
"content",
"=",
"self",
".",
"rst_content",
"(",
"prefix",
"=",
"prefix",
",",
"suffix",
"=",
"suffix",
",",
"heading_underline_char",
"=",
"heading_underline_char",
",",
"method",
"=",
"method",
")",
"write_if_allowed",
"(",
"self",
".",
"target_rst_filename",
",",
"content",
",",
"overwrite",
"=",
"overwrite",
",",
"mock",
"=",
"mock",
")"
] | Writes the RST file to our destination RST filename, making any
necessary directories.
Args:
prefix: as for :func:`rst_content`
suffix: as for :func:`rst_content`
heading_underline_char: as for :func:`rst_content`
method: as for :func:`rst_content`
overwrite: overwrite the file if it exists already?
mock: pretend to write, but don't | [
"Writes",
"the",
"RST",
"file",
"to",
"our",
"destination",
"RST",
"filename",
"making",
"any",
"necessary",
"directories",
"."
] | python | train |
iotaledger/iota.lib.py | iota/bin/__init__.py | https://github.com/iotaledger/iota.lib.py/blob/97cdd1e241498446b46157b79b2a1ea2ec6d387a/iota/bin/__init__.py#L41-L55 | def execute(self, api, **arguments):
# type: (Iota, **Any) -> Optional[int]
"""
Executes the command and (optionally) returns an exit code (used by
the shell to determine if the application exited cleanly).
:param api:
The API object used to communicate with the node.
:param arguments:
Command-line arguments parsed by the argument parser.
"""
raise NotImplementedError(
'Not implemented in {cls}.'.format(cls=type(self).__name__),
) | [
"def",
"execute",
"(",
"self",
",",
"api",
",",
"*",
"*",
"arguments",
")",
":",
"# type: (Iota, **Any) -> Optional[int]",
"raise",
"NotImplementedError",
"(",
"'Not implemented in {cls}.'",
".",
"format",
"(",
"cls",
"=",
"type",
"(",
"self",
")",
".",
"__name__",
")",
",",
")"
] | Executes the command and (optionally) returns an exit code (used by
the shell to determine if the application exited cleanly).
:param api:
The API object used to communicate with the node.
:param arguments:
Command-line arguments parsed by the argument parser. | [
"Executes",
"the",
"command",
"and",
"(",
"optionally",
")",
"returns",
"an",
"exit",
"code",
"(",
"used",
"by",
"the",
"shell",
"to",
"determine",
"if",
"the",
"application",
"exited",
"cleanly",
")",
"."
] | python | test |
hubo1016/vlcp | vlcp/event/pqueue.py | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/pqueue.py#L855-L862 | def unblock(self, event):
'''
Remove a block
'''
if event not in self.blockEvents:
return
self.blockEvents[event].unblock(event)
del self.blockEvents[event] | [
"def",
"unblock",
"(",
"self",
",",
"event",
")",
":",
"if",
"event",
"not",
"in",
"self",
".",
"blockEvents",
":",
"return",
"self",
".",
"blockEvents",
"[",
"event",
"]",
".",
"unblock",
"(",
"event",
")",
"del",
"self",
".",
"blockEvents",
"[",
"event",
"]"
] | Remove a block | [
"Remove",
"a",
"block"
] | python | train |
openstack/quark | quark/drivers/ironic_driver.py | https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/drivers/ironic_driver.py#L369-L393 | def update_port(self, context, port_id, **kwargs):
"""Update a port.
:param context: neutron api request context.
:param port_id: neutron port id.
:param kwargs: optional kwargs.
:raises IronicException: If the client is unable to update the
downstream port for any reason, the exception will be logged
and IronicException raised.
TODO(morgabra) It does not really make sense in the context of Ironic
to allow updating ports. fixed_ips and mac_address are burned in the
configdrive on the host, and we otherwise cannot migrate a port between
instances. Eventually we will need to support security groups, but for
now it's a no-op on port data changes, and we need to rely on the
API/Nova to not allow updating data on active ports.
"""
LOG.info("update_port %s %s" % (context.tenant_id, port_id))
# TODO(morgabra): Change this when we enable security groups.
if kwargs.get("security_groups"):
msg = 'ironic driver does not support security group operations.'
raise IronicException(msg=msg)
return {"uuid": port_id} | [
"def",
"update_port",
"(",
"self",
",",
"context",
",",
"port_id",
",",
"*",
"*",
"kwargs",
")",
":",
"LOG",
".",
"info",
"(",
"\"update_port %s %s\"",
"%",
"(",
"context",
".",
"tenant_id",
",",
"port_id",
")",
")",
"# TODO(morgabra): Change this when we enable security groups.",
"if",
"kwargs",
".",
"get",
"(",
"\"security_groups\"",
")",
":",
"msg",
"=",
"'ironic driver does not support security group operations.'",
"raise",
"IronicException",
"(",
"msg",
"=",
"msg",
")",
"return",
"{",
"\"uuid\"",
":",
"port_id",
"}"
] | Update a port.
:param context: neutron api request context.
:param port_id: neutron port id.
:param kwargs: optional kwargs.
:raises IronicException: If the client is unable to update the
downstream port for any reason, the exception will be logged
and IronicException raised.
TODO(morgabra) It does not really make sense in the context of Ironic
to allow updating ports. fixed_ips and mac_address are burned in the
configdrive on the host, and we otherwise cannot migrate a port between
instances. Eventually we will need to support security groups, but for
now it's a no-op on port data changes, and we need to rely on the
API/Nova to not allow updating data on active ports. | [
"Update",
"a",
"port",
"."
] | python | valid |
wakatime/wakatime | wakatime/packages/simplejson/__init__.py | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/simplejson/__init__.py#L462-L533 | def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None,
use_decimal=False, **kw):
"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
If *use_decimal* is true (default: ``False``) then it implies
parse_float=decimal.Decimal for parity with ``dump``.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg. NOTE: You should use *object_hook* or *object_pairs_hook* instead
of subclassing whenever possible.
"""
if (cls is None and encoding is None and object_hook is None and
parse_int is None and parse_float is None and
parse_constant is None and object_pairs_hook is None
and not use_decimal and not kw):
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
if object_pairs_hook is not None:
kw['object_pairs_hook'] = object_pairs_hook
if parse_float is not None:
kw['parse_float'] = parse_float
if parse_int is not None:
kw['parse_int'] = parse_int
if parse_constant is not None:
kw['parse_constant'] = parse_constant
if use_decimal:
if parse_float is not None:
raise TypeError("use_decimal=True implies parse_float=Decimal")
kw['parse_float'] = Decimal
return cls(encoding=encoding, **kw).decode(s) | [
"def",
"loads",
"(",
"s",
",",
"encoding",
"=",
"None",
",",
"cls",
"=",
"None",
",",
"object_hook",
"=",
"None",
",",
"parse_float",
"=",
"None",
",",
"parse_int",
"=",
"None",
",",
"parse_constant",
"=",
"None",
",",
"object_pairs_hook",
"=",
"None",
",",
"use_decimal",
"=",
"False",
",",
"*",
"*",
"kw",
")",
":",
"if",
"(",
"cls",
"is",
"None",
"and",
"encoding",
"is",
"None",
"and",
"object_hook",
"is",
"None",
"and",
"parse_int",
"is",
"None",
"and",
"parse_float",
"is",
"None",
"and",
"parse_constant",
"is",
"None",
"and",
"object_pairs_hook",
"is",
"None",
"and",
"not",
"use_decimal",
"and",
"not",
"kw",
")",
":",
"return",
"_default_decoder",
".",
"decode",
"(",
"s",
")",
"if",
"cls",
"is",
"None",
":",
"cls",
"=",
"JSONDecoder",
"if",
"object_hook",
"is",
"not",
"None",
":",
"kw",
"[",
"'object_hook'",
"]",
"=",
"object_hook",
"if",
"object_pairs_hook",
"is",
"not",
"None",
":",
"kw",
"[",
"'object_pairs_hook'",
"]",
"=",
"object_pairs_hook",
"if",
"parse_float",
"is",
"not",
"None",
":",
"kw",
"[",
"'parse_float'",
"]",
"=",
"parse_float",
"if",
"parse_int",
"is",
"not",
"None",
":",
"kw",
"[",
"'parse_int'",
"]",
"=",
"parse_int",
"if",
"parse_constant",
"is",
"not",
"None",
":",
"kw",
"[",
"'parse_constant'",
"]",
"=",
"parse_constant",
"if",
"use_decimal",
":",
"if",
"parse_float",
"is",
"not",
"None",
":",
"raise",
"TypeError",
"(",
"\"use_decimal=True implies parse_float=Decimal\"",
")",
"kw",
"[",
"'parse_float'",
"]",
"=",
"Decimal",
"return",
"cls",
"(",
"encoding",
"=",
"encoding",
",",
"*",
"*",
"kw",
")",
".",
"decode",
"(",
"s",
")"
] | Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
If *use_decimal* is true (default: ``False``) then it implies
parse_float=decimal.Decimal for parity with ``dump``.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg. NOTE: You should use *object_hook* or *object_pairs_hook* instead
of subclassing whenever possible. | [
"Deserialize",
"s",
"(",
"a",
"str",
"or",
"unicode",
"instance",
"containing",
"a",
"JSON",
"document",
")",
"to",
"a",
"Python",
"object",
"."
] | python | train |
radujica/baloo | baloo/weld/weld_ops.py | https://github.com/radujica/baloo/blob/f6e05e35b73a75e8a300754c6bdc575e5f2d53b9/baloo/weld/weld_ops.py#L97-L133 | def weld_filter(array, weld_type, bool_array):
"""Returns a new array only with the elements with a corresponding True in bool_array.
Parameters
----------
array : numpy.ndarray or WeldObject
Input data.
weld_type : WeldType
Type of the elements in the input array.
bool_array : numpy.ndarray or WeldObject
Array of bool with True for elements in array desired in the result array.
Returns
-------
WeldObject
Representation of this computation.
"""
obj_id, weld_obj = create_weld_object(array)
bool_obj_id = get_weld_obj_id(weld_obj, bool_array)
weld_template = """result(
for(
zip({array}, {bool_array}),
appender[{type}],
|b: appender[{type}], i: i64, e: {{{type}, bool}}|
if (e.$1,
merge(b, e.$0),
b)
)
)"""
weld_obj.weld_code = weld_template.format(array=obj_id,
bool_array=bool_obj_id,
type=weld_type)
return weld_obj | [
"def",
"weld_filter",
"(",
"array",
",",
"weld_type",
",",
"bool_array",
")",
":",
"obj_id",
",",
"weld_obj",
"=",
"create_weld_object",
"(",
"array",
")",
"bool_obj_id",
"=",
"get_weld_obj_id",
"(",
"weld_obj",
",",
"bool_array",
")",
"weld_template",
"=",
"\"\"\"result(\n for(\n zip({array}, {bool_array}),\n appender[{type}],\n |b: appender[{type}], i: i64, e: {{{type}, bool}}| \n if (e.$1, \n merge(b, e.$0), \n b)\n )\n)\"\"\"",
"weld_obj",
".",
"weld_code",
"=",
"weld_template",
".",
"format",
"(",
"array",
"=",
"obj_id",
",",
"bool_array",
"=",
"bool_obj_id",
",",
"type",
"=",
"weld_type",
")",
"return",
"weld_obj"
] | Returns a new array only with the elements with a corresponding True in bool_array.
Parameters
----------
array : numpy.ndarray or WeldObject
Input data.
weld_type : WeldType
Type of the elements in the input array.
bool_array : numpy.ndarray or WeldObject
Array of bool with True for elements in array desired in the result array.
Returns
-------
WeldObject
Representation of this computation. | [
"Returns",
"a",
"new",
"array",
"only",
"with",
"the",
"elements",
"with",
"a",
"corresponding",
"True",
"in",
"bool_array",
"."
] | python | train |
danishabdullah/algen | algen/compilers.py | https://github.com/danishabdullah/algen/blob/642ba26d8721c588fce505ac67528070c1edc264/algen/compilers.py#L171-L206 | def compiled_foreign_keys(self):
"""Returns compiled foreign key definitions"""
def get_column_args(column):
tmp = []
for arg_name, arg_val in column.items():
if arg_name not in ('name', 'type', 'reference'):
if arg_name in ('server_default', 'server_onupdate'):
arg_val = '"{}"'.format(arg_val)
tmp.append(ALCHEMY_TEMPLATES.column_arg.safe_substitute(arg_name=arg_name,
arg_val=arg_val))
return ", ".join(tmp)
def get_fkey_args(column):
table = column['reference']['table']
column = column['reference']['column']
return ALCHEMY_TEMPLATES.foreign_key_arg.safe_substitute(reference_table=table, reference_column=column)
res = []
for column in self.foreign_key_definitions:
column_args = get_column_args(column)
column_type, type_params = ModelCompiler.get_col_type_info(column.get('type'))
column_name = column.get('name')
reference = get_fkey_args(column)
if column_type in MUTABLE_DICT_TYPES:
column_type = ALCHEMY_TEMPLATES.mutable_dict_type.safe_substitute(type=column_type,
type_params=type_params)
type_params = ''
res.append(
ALCHEMY_TEMPLATES.foreign_key.safe_substitute(column_name=column_name,
column_type=column_type,
column_args=column_args,
foreign_key_args=reference,
type_params=type_params))
join_string = "\n" + self.tab
return join_string.join(res) | [
"def",
"compiled_foreign_keys",
"(",
"self",
")",
":",
"def",
"get_column_args",
"(",
"column",
")",
":",
"tmp",
"=",
"[",
"]",
"for",
"arg_name",
",",
"arg_val",
"in",
"column",
".",
"items",
"(",
")",
":",
"if",
"arg_name",
"not",
"in",
"(",
"'name'",
",",
"'type'",
",",
"'reference'",
")",
":",
"if",
"arg_name",
"in",
"(",
"'server_default'",
",",
"'server_onupdate'",
")",
":",
"arg_val",
"=",
"'\"{}\"'",
".",
"format",
"(",
"arg_val",
")",
"tmp",
".",
"append",
"(",
"ALCHEMY_TEMPLATES",
".",
"column_arg",
".",
"safe_substitute",
"(",
"arg_name",
"=",
"arg_name",
",",
"arg_val",
"=",
"arg_val",
")",
")",
"return",
"\", \"",
".",
"join",
"(",
"tmp",
")",
"def",
"get_fkey_args",
"(",
"column",
")",
":",
"table",
"=",
"column",
"[",
"'reference'",
"]",
"[",
"'table'",
"]",
"column",
"=",
"column",
"[",
"'reference'",
"]",
"[",
"'column'",
"]",
"return",
"ALCHEMY_TEMPLATES",
".",
"foreign_key_arg",
".",
"safe_substitute",
"(",
"reference_table",
"=",
"table",
",",
"reference_column",
"=",
"column",
")",
"res",
"=",
"[",
"]",
"for",
"column",
"in",
"self",
".",
"foreign_key_definitions",
":",
"column_args",
"=",
"get_column_args",
"(",
"column",
")",
"column_type",
",",
"type_params",
"=",
"ModelCompiler",
".",
"get_col_type_info",
"(",
"column",
".",
"get",
"(",
"'type'",
")",
")",
"column_name",
"=",
"column",
".",
"get",
"(",
"'name'",
")",
"reference",
"=",
"get_fkey_args",
"(",
"column",
")",
"if",
"column_type",
"in",
"MUTABLE_DICT_TYPES",
":",
"column_type",
"=",
"ALCHEMY_TEMPLATES",
".",
"mutable_dict_type",
".",
"safe_substitute",
"(",
"type",
"=",
"column_type",
",",
"type_params",
"=",
"type_params",
")",
"type_params",
"=",
"''",
"res",
".",
"append",
"(",
"ALCHEMY_TEMPLATES",
".",
"foreign_key",
".",
"safe_substitute",
"(",
"column_name",
"=",
"column_name",
",",
"column_type",
"=",
"column_type",
",",
"column_args",
"=",
"column_args",
",",
"foreign_key_args",
"=",
"reference",
",",
"type_params",
"=",
"type_params",
")",
")",
"join_string",
"=",
"\"\\n\"",
"+",
"self",
".",
"tab",
"return",
"join_string",
".",
"join",
"(",
"res",
")"
] | Returns compiled foreign key definitions | [
"Returns",
"compiled",
"foreign",
"key",
"definitions"
] | python | train |
eerimoq/bincopy | bincopy.py | https://github.com/eerimoq/bincopy/blob/5e02cd001c3e9b54729425db6bffad5f03e1beac/bincopy.py#L146-L156 | def pack_ihex(type_, address, size, data):
"""Create a Intel HEX record of given data.
"""
line = '{:02X}{:04X}{:02X}'.format(size, address, type_)
if data:
line += binascii.hexlify(data).decode('ascii').upper()
return ':{}{:02X}'.format(line, crc_ihex(line)) | [
"def",
"pack_ihex",
"(",
"type_",
",",
"address",
",",
"size",
",",
"data",
")",
":",
"line",
"=",
"'{:02X}{:04X}{:02X}'",
".",
"format",
"(",
"size",
",",
"address",
",",
"type_",
")",
"if",
"data",
":",
"line",
"+=",
"binascii",
".",
"hexlify",
"(",
"data",
")",
".",
"decode",
"(",
"'ascii'",
")",
".",
"upper",
"(",
")",
"return",
"':{}{:02X}'",
".",
"format",
"(",
"line",
",",
"crc_ihex",
"(",
"line",
")",
")"
] | Create a Intel HEX record of given data. | [
"Create",
"a",
"Intel",
"HEX",
"record",
"of",
"given",
"data",
"."
] | python | train |
manns/pyspread | pyspread/src/lib/vlc.py | https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/lib/vlc.py#L5785-L5794 | def libvlc_video_get_chapter_description(p_mi, i_title):
'''Get the description of available chapters for specific title.
@param p_mi: the media player.
@param i_title: selected title.
@return: list containing description of available chapter for title i_title.
'''
f = _Cfunctions.get('libvlc_video_get_chapter_description', None) or \
_Cfunction('libvlc_video_get_chapter_description', ((1,), (1,),), None,
ctypes.POINTER(TrackDescription), MediaPlayer, ctypes.c_int)
return f(p_mi, i_title) | [
"def",
"libvlc_video_get_chapter_description",
"(",
"p_mi",
",",
"i_title",
")",
":",
"f",
"=",
"_Cfunctions",
".",
"get",
"(",
"'libvlc_video_get_chapter_description'",
",",
"None",
")",
"or",
"_Cfunction",
"(",
"'libvlc_video_get_chapter_description'",
",",
"(",
"(",
"1",
",",
")",
",",
"(",
"1",
",",
")",
",",
")",
",",
"None",
",",
"ctypes",
".",
"POINTER",
"(",
"TrackDescription",
")",
",",
"MediaPlayer",
",",
"ctypes",
".",
"c_int",
")",
"return",
"f",
"(",
"p_mi",
",",
"i_title",
")"
] | Get the description of available chapters for specific title.
@param p_mi: the media player.
@param i_title: selected title.
@return: list containing description of available chapter for title i_title. | [
"Get",
"the",
"description",
"of",
"available",
"chapters",
"for",
"specific",
"title",
"."
] | python | train |
scheibler/khard | khard/khard.py | https://github.com/scheibler/khard/blob/0f69430c2680f1ff5f073a977a3c5b753b96cc17/khard/khard.py#L1046-L1115 | def email_subcommand(search_terms, vcard_list, parsable, remove_first_line):
"""Print a mail client friendly contacts table that is compatible with the
default format used by mutt.
Output format:
single line of text
email_address\tname\ttype
email_address\tname\ttype
[...]
:param search_terms: used as search term to filter the contacts before
printing
:type search_terms: str
:param vcard_list: the vcards to search for matching entries which should
be printed
:type vcard_list: list of carddav_object.CarddavObject
:param parsable: machine readable output: columns devided by tabulator (\t)
:type parsable: bool
:param remove_first_line: remove first line (searching for '' ...)
:type remove_first_line: bool
:returns: None
:rtype: None
"""
matching_email_address_list = []
all_email_address_list = []
for vcard in vcard_list:
for type, email_list in sorted(vcard.get_email_addresses().items(),
key=lambda k: k[0].lower()):
for email in sorted(email_list):
if config.display_by_name() == "first_name":
name = vcard.get_first_name_last_name()
else:
name = vcard.get_last_name_first_name()
# create output lines
line_formatted = "\t".join([name, type, email])
line_parsable = "\t".join([email, name, type])
if parsable:
# parsable option: start with email address
email_address_line = line_parsable
else:
# else: start with name
email_address_line = line_formatted
if re.search(search_terms,
"%s\n%s" % (line_formatted, line_parsable),
re.IGNORECASE | re.DOTALL):
matching_email_address_list.append(email_address_line)
# collect all email addresses in a different list as fallback
all_email_address_list.append(email_address_line)
if matching_email_address_list:
if parsable:
if not remove_first_line:
# at least mutt requires that line
print("searching for '%s' ..." % search_terms)
print('\n'.join(matching_email_address_list))
else:
list_email_addresses(matching_email_address_list)
elif all_email_address_list:
if parsable:
if not remove_first_line:
# at least mutt requires that line
print("searching for '%s' ..." % search_terms)
print('\n'.join(all_email_address_list))
else:
list_email_addresses(all_email_address_list)
else:
if not parsable:
print("Found no email addresses")
elif not remove_first_line:
print("searching for '%s' ..." % search_terms)
sys.exit(1) | [
"def",
"email_subcommand",
"(",
"search_terms",
",",
"vcard_list",
",",
"parsable",
",",
"remove_first_line",
")",
":",
"matching_email_address_list",
"=",
"[",
"]",
"all_email_address_list",
"=",
"[",
"]",
"for",
"vcard",
"in",
"vcard_list",
":",
"for",
"type",
",",
"email_list",
"in",
"sorted",
"(",
"vcard",
".",
"get_email_addresses",
"(",
")",
".",
"items",
"(",
")",
",",
"key",
"=",
"lambda",
"k",
":",
"k",
"[",
"0",
"]",
".",
"lower",
"(",
")",
")",
":",
"for",
"email",
"in",
"sorted",
"(",
"email_list",
")",
":",
"if",
"config",
".",
"display_by_name",
"(",
")",
"==",
"\"first_name\"",
":",
"name",
"=",
"vcard",
".",
"get_first_name_last_name",
"(",
")",
"else",
":",
"name",
"=",
"vcard",
".",
"get_last_name_first_name",
"(",
")",
"# create output lines",
"line_formatted",
"=",
"\"\\t\"",
".",
"join",
"(",
"[",
"name",
",",
"type",
",",
"email",
"]",
")",
"line_parsable",
"=",
"\"\\t\"",
".",
"join",
"(",
"[",
"email",
",",
"name",
",",
"type",
"]",
")",
"if",
"parsable",
":",
"# parsable option: start with email address",
"email_address_line",
"=",
"line_parsable",
"else",
":",
"# else: start with name",
"email_address_line",
"=",
"line_formatted",
"if",
"re",
".",
"search",
"(",
"search_terms",
",",
"\"%s\\n%s\"",
"%",
"(",
"line_formatted",
",",
"line_parsable",
")",
",",
"re",
".",
"IGNORECASE",
"|",
"re",
".",
"DOTALL",
")",
":",
"matching_email_address_list",
".",
"append",
"(",
"email_address_line",
")",
"# collect all email addresses in a different list as fallback",
"all_email_address_list",
".",
"append",
"(",
"email_address_line",
")",
"if",
"matching_email_address_list",
":",
"if",
"parsable",
":",
"if",
"not",
"remove_first_line",
":",
"# at least mutt requires that line",
"print",
"(",
"\"searching for '%s' ...\"",
"%",
"search_terms",
")",
"print",
"(",
"'\\n'",
".",
"join",
"(",
"matching_email_address_list",
")",
")",
"else",
":",
"list_email_addresses",
"(",
"matching_email_address_list",
")",
"elif",
"all_email_address_list",
":",
"if",
"parsable",
":",
"if",
"not",
"remove_first_line",
":",
"# at least mutt requires that line",
"print",
"(",
"\"searching for '%s' ...\"",
"%",
"search_terms",
")",
"print",
"(",
"'\\n'",
".",
"join",
"(",
"all_email_address_list",
")",
")",
"else",
":",
"list_email_addresses",
"(",
"all_email_address_list",
")",
"else",
":",
"if",
"not",
"parsable",
":",
"print",
"(",
"\"Found no email addresses\"",
")",
"elif",
"not",
"remove_first_line",
":",
"print",
"(",
"\"searching for '%s' ...\"",
"%",
"search_terms",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] | Print a mail client friendly contacts table that is compatible with the
default format used by mutt.
Output format:
single line of text
email_address\tname\ttype
email_address\tname\ttype
[...]
:param search_terms: used as search term to filter the contacts before
printing
:type search_terms: str
:param vcard_list: the vcards to search for matching entries which should
be printed
:type vcard_list: list of carddav_object.CarddavObject
:param parsable: machine readable output: columns devided by tabulator (\t)
:type parsable: bool
:param remove_first_line: remove first line (searching for '' ...)
:type remove_first_line: bool
:returns: None
:rtype: None | [
"Print",
"a",
"mail",
"client",
"friendly",
"contacts",
"table",
"that",
"is",
"compatible",
"with",
"the",
"default",
"format",
"used",
"by",
"mutt",
".",
"Output",
"format",
":",
"single",
"line",
"of",
"text",
"email_address",
"\\",
"tname",
"\\",
"ttype",
"email_address",
"\\",
"tname",
"\\",
"ttype",
"[",
"...",
"]"
] | python | test |
jedie/DragonPy | dragonpy/utils/srecord_utils.py | https://github.com/jedie/DragonPy/blob/6659e5b5133aab26979a498ee7453495773a4f6c/dragonpy/utils/srecord_utils.py#L95-L114 | def get_readable_string(integer):
r"""
Convert an integer to a readable 2-character representation. This is useful for reversing
examples: 41 == ".A", 13 == "\n", 20 (space) == "__"
Returns a readable 2-char representation of an int.
"""
if integer == 9: #\t
readable_string = "\\t"
elif integer == 10: #\r
readable_string = "\\r"
elif integer == 13: #\n
readable_string = "\\n"
elif integer == 32: # space
readable_string = '__'
elif integer >= 33 and integer <= 126: # Readable ascii
readable_string = ''.join([chr(integer), '.'])
else: # rest
readable_string = int_to_padded_hex_byte(integer)
return readable_string | [
"def",
"get_readable_string",
"(",
"integer",
")",
":",
"if",
"integer",
"==",
"9",
":",
"#\\t",
"readable_string",
"=",
"\"\\\\t\"",
"elif",
"integer",
"==",
"10",
":",
"#\\r",
"readable_string",
"=",
"\"\\\\r\"",
"elif",
"integer",
"==",
"13",
":",
"#\\n",
"readable_string",
"=",
"\"\\\\n\"",
"elif",
"integer",
"==",
"32",
":",
"# space",
"readable_string",
"=",
"'__'",
"elif",
"integer",
">=",
"33",
"and",
"integer",
"<=",
"126",
":",
"# Readable ascii",
"readable_string",
"=",
"''",
".",
"join",
"(",
"[",
"chr",
"(",
"integer",
")",
",",
"'.'",
"]",
")",
"else",
":",
"# rest",
"readable_string",
"=",
"int_to_padded_hex_byte",
"(",
"integer",
")",
"return",
"readable_string"
] | r"""
Convert an integer to a readable 2-character representation. This is useful for reversing
examples: 41 == ".A", 13 == "\n", 20 (space) == "__"
Returns a readable 2-char representation of an int. | [
"r",
"Convert",
"an",
"integer",
"to",
"a",
"readable",
"2",
"-",
"character",
"representation",
".",
"This",
"is",
"useful",
"for",
"reversing",
"examples",
":",
"41",
"==",
".",
"A",
"13",
"==",
"\\",
"n",
"20",
"(",
"space",
")",
"==",
"__",
"Returns",
"a",
"readable",
"2",
"-",
"char",
"representation",
"of",
"an",
"int",
"."
] | python | train |
yandex/yandex-tank | yandextank/core/tankcore.py | https://github.com/yandex/yandex-tank/blob/d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b/yandextank/core/tankcore.py#L417-L437 | def __collect_file(self, filename, keep_original=False):
"""
Move or copy single file to artifacts dir
"""
dest = self.artifacts_dir + '/' + os.path.basename(filename)
logger.debug("Collecting file: %s to %s", filename, dest)
if not filename or not os.path.exists(filename):
logger.warning("File not found to collect: %s", filename)
return
if os.path.exists(dest):
# FIXME: 3 find a way to store artifacts anyway
logger.warning("File already exists: %s", dest)
return
if keep_original:
shutil.copy(filename, self.artifacts_dir)
else:
shutil.move(filename, self.artifacts_dir)
os.chmod(dest, 0o644) | [
"def",
"__collect_file",
"(",
"self",
",",
"filename",
",",
"keep_original",
"=",
"False",
")",
":",
"dest",
"=",
"self",
".",
"artifacts_dir",
"+",
"'/'",
"+",
"os",
".",
"path",
".",
"basename",
"(",
"filename",
")",
"logger",
".",
"debug",
"(",
"\"Collecting file: %s to %s\"",
",",
"filename",
",",
"dest",
")",
"if",
"not",
"filename",
"or",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"filename",
")",
":",
"logger",
".",
"warning",
"(",
"\"File not found to collect: %s\"",
",",
"filename",
")",
"return",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"dest",
")",
":",
"# FIXME: 3 find a way to store artifacts anyway",
"logger",
".",
"warning",
"(",
"\"File already exists: %s\"",
",",
"dest",
")",
"return",
"if",
"keep_original",
":",
"shutil",
".",
"copy",
"(",
"filename",
",",
"self",
".",
"artifacts_dir",
")",
"else",
":",
"shutil",
".",
"move",
"(",
"filename",
",",
"self",
".",
"artifacts_dir",
")",
"os",
".",
"chmod",
"(",
"dest",
",",
"0o644",
")"
] | Move or copy single file to artifacts dir | [
"Move",
"or",
"copy",
"single",
"file",
"to",
"artifacts",
"dir"
] | python | test |
cloudera/impyla | impala/hiveserver2.py | https://github.com/cloudera/impyla/blob/547fa2ba3b6151e2a98b3544301471a643212dc3/impala/hiveserver2.py#L80-L140 | def cursor(self, user=None, configuration=None, convert_types=True,
dictify=False, fetch_error=True):
"""Get a cursor from the HiveServer2 (HS2) connection.
Parameters
----------
user : str, optional
configuration : dict of str keys and values, optional
Configuration overlay for the HS2 session.
convert_types : bool, optional
When `False`, timestamps and decimal values will not be converted
to Python `datetime` and `Decimal` values. (These conversions are
expensive.) Only applies when using HS2 protocol versions > 6.
dictify : bool, optional
When `True` cursor will return key value pairs instead of rows.
batch_cursor : bool, optional
When `True` cursor will return CBatches directly rather than rows.
fetch_error : bool, optional
In versions of impala prior to 2.7.0, when an operation fails and
the impalad returns an error state, the error message is not always
returned. In these cases the error message can be retrieved by a
subsequent fetch rpc call but this has a side effect of invalidating
the query handle and causing any further operations against it to
fail. e.g. calling log() or profile().
When set to `True` impyla will attempt to fetch the error message.
When set to `False`, this flag will cause impyla not to attempt to
fetch the message with a fetch call . In this case the query
handle remains valid and impyla will raise an exception with a
message of "Operation is in ERROR_STATE".
The Default option is `True`.
Returns
-------
HiveServer2Cursor
A `Cursor` object (DB API 2.0-compliant).
"""
# PEP 249
log.debug('Getting a cursor (Impala session)')
if user is None:
user = getpass.getuser()
log.debug('.cursor(): getting new session_handle')
session = self.service.open_session(user, configuration)
log.debug('HiveServer2Cursor(service=%s, session_handle=%s, '
'default_config=%s, hs2_protocol_version=%s)',
self.service, session.handle,
session.config, session.hs2_protocol_version)
cursor_class = HiveServer2DictCursor if dictify else HiveServer2Cursor
cursor = cursor_class(session, convert_types=convert_types,
fetch_error=fetch_error)
if self.default_db is not None:
log.info('Using database %s as default', self.default_db)
cursor.execute('USE %s' % self.default_db)
return cursor | [
"def",
"cursor",
"(",
"self",
",",
"user",
"=",
"None",
",",
"configuration",
"=",
"None",
",",
"convert_types",
"=",
"True",
",",
"dictify",
"=",
"False",
",",
"fetch_error",
"=",
"True",
")",
":",
"# PEP 249",
"log",
".",
"debug",
"(",
"'Getting a cursor (Impala session)'",
")",
"if",
"user",
"is",
"None",
":",
"user",
"=",
"getpass",
".",
"getuser",
"(",
")",
"log",
".",
"debug",
"(",
"'.cursor(): getting new session_handle'",
")",
"session",
"=",
"self",
".",
"service",
".",
"open_session",
"(",
"user",
",",
"configuration",
")",
"log",
".",
"debug",
"(",
"'HiveServer2Cursor(service=%s, session_handle=%s, '",
"'default_config=%s, hs2_protocol_version=%s)'",
",",
"self",
".",
"service",
",",
"session",
".",
"handle",
",",
"session",
".",
"config",
",",
"session",
".",
"hs2_protocol_version",
")",
"cursor_class",
"=",
"HiveServer2DictCursor",
"if",
"dictify",
"else",
"HiveServer2Cursor",
"cursor",
"=",
"cursor_class",
"(",
"session",
",",
"convert_types",
"=",
"convert_types",
",",
"fetch_error",
"=",
"fetch_error",
")",
"if",
"self",
".",
"default_db",
"is",
"not",
"None",
":",
"log",
".",
"info",
"(",
"'Using database %s as default'",
",",
"self",
".",
"default_db",
")",
"cursor",
".",
"execute",
"(",
"'USE %s'",
"%",
"self",
".",
"default_db",
")",
"return",
"cursor"
] | Get a cursor from the HiveServer2 (HS2) connection.
Parameters
----------
user : str, optional
configuration : dict of str keys and values, optional
Configuration overlay for the HS2 session.
convert_types : bool, optional
When `False`, timestamps and decimal values will not be converted
to Python `datetime` and `Decimal` values. (These conversions are
expensive.) Only applies when using HS2 protocol versions > 6.
dictify : bool, optional
When `True` cursor will return key value pairs instead of rows.
batch_cursor : bool, optional
When `True` cursor will return CBatches directly rather than rows.
fetch_error : bool, optional
In versions of impala prior to 2.7.0, when an operation fails and
the impalad returns an error state, the error message is not always
returned. In these cases the error message can be retrieved by a
subsequent fetch rpc call but this has a side effect of invalidating
the query handle and causing any further operations against it to
fail. e.g. calling log() or profile().
When set to `True` impyla will attempt to fetch the error message.
When set to `False`, this flag will cause impyla not to attempt to
fetch the message with a fetch call . In this case the query
handle remains valid and impyla will raise an exception with a
message of "Operation is in ERROR_STATE".
The Default option is `True`.
Returns
-------
HiveServer2Cursor
A `Cursor` object (DB API 2.0-compliant). | [
"Get",
"a",
"cursor",
"from",
"the",
"HiveServer2",
"(",
"HS2",
")",
"connection",
"."
] | python | train |
gem/oq-engine | openquake/hazardlib/gsim/campbell_bozorgnia_2014.py | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/campbell_bozorgnia_2014.py#L269-L286 | def _get_hypocentral_depth_term(self, C, rup):
"""
Returns the hypocentral depth scaling term defined in equations 21 - 23
"""
if rup.hypo_depth <= 7.0:
fhyp_h = 0.0
elif rup.hypo_depth > 20.0:
fhyp_h = 13.0
else:
fhyp_h = rup.hypo_depth - 7.0
if rup.mag <= 5.5:
fhyp_m = C["c17"]
elif rup.mag > 6.5:
fhyp_m = C["c18"]
else:
fhyp_m = C["c17"] + ((C["c18"] - C["c17"]) * (rup.mag - 5.5))
return fhyp_h * fhyp_m | [
"def",
"_get_hypocentral_depth_term",
"(",
"self",
",",
"C",
",",
"rup",
")",
":",
"if",
"rup",
".",
"hypo_depth",
"<=",
"7.0",
":",
"fhyp_h",
"=",
"0.0",
"elif",
"rup",
".",
"hypo_depth",
">",
"20.0",
":",
"fhyp_h",
"=",
"13.0",
"else",
":",
"fhyp_h",
"=",
"rup",
".",
"hypo_depth",
"-",
"7.0",
"if",
"rup",
".",
"mag",
"<=",
"5.5",
":",
"fhyp_m",
"=",
"C",
"[",
"\"c17\"",
"]",
"elif",
"rup",
".",
"mag",
">",
"6.5",
":",
"fhyp_m",
"=",
"C",
"[",
"\"c18\"",
"]",
"else",
":",
"fhyp_m",
"=",
"C",
"[",
"\"c17\"",
"]",
"+",
"(",
"(",
"C",
"[",
"\"c18\"",
"]",
"-",
"C",
"[",
"\"c17\"",
"]",
")",
"*",
"(",
"rup",
".",
"mag",
"-",
"5.5",
")",
")",
"return",
"fhyp_h",
"*",
"fhyp_m"
] | Returns the hypocentral depth scaling term defined in equations 21 - 23 | [
"Returns",
"the",
"hypocentral",
"depth",
"scaling",
"term",
"defined",
"in",
"equations",
"21",
"-",
"23"
] | python | train |
ambitioninc/django-query-builder | querybuilder/tables.py | https://github.com/ambitioninc/django-query-builder/blob/113a7d845d3ddc6a45621b9880308e756f87c5bf/querybuilder/tables.py#L136-L150 | def get_alias(self):
"""
Gets the alias for the table or the auto_alias if one is set.
If there isn't any kind of alias, None is returned.
:returns: The table alias, auto_alias, or None
:rtype: str or None
"""
alias = None
if self.alias:
alias = self.alias
elif self.auto_alias:
alias = self.auto_alias
return alias | [
"def",
"get_alias",
"(",
"self",
")",
":",
"alias",
"=",
"None",
"if",
"self",
".",
"alias",
":",
"alias",
"=",
"self",
".",
"alias",
"elif",
"self",
".",
"auto_alias",
":",
"alias",
"=",
"self",
".",
"auto_alias",
"return",
"alias"
] | Gets the alias for the table or the auto_alias if one is set.
If there isn't any kind of alias, None is returned.
:returns: The table alias, auto_alias, or None
:rtype: str or None | [
"Gets",
"the",
"alias",
"for",
"the",
"table",
"or",
"the",
"auto_alias",
"if",
"one",
"is",
"set",
".",
"If",
"there",
"isn",
"t",
"any",
"kind",
"of",
"alias",
"None",
"is",
"returned",
"."
] | python | train |
openeemeter/eeweather | eeweather/visualization.py | https://github.com/openeemeter/eeweather/blob/d32b7369b26edfa3ee431c60457afeb0593123a7/eeweather/visualization.py#L29-L132 | def plot_station_mapping(
target_latitude,
target_longitude,
isd_station,
distance_meters,
target_label="target",
): # pragma: no cover
""" Plots this mapping on a map."""
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError("Plotting requires matplotlib.")
try:
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import cartopy.io.img_tiles as cimgt
except ImportError:
raise ImportError("Plotting requires cartopy.")
lat, lng = isd_station.coords
t_lat, t_lng = float(target_latitude), float(target_longitude)
# fiture
fig = plt.figure(figsize=(16, 8))
# axes
tiles = cimgt.StamenTerrain()
ax = plt.subplot(1, 1, 1, projection=tiles.crs)
# offsets for labels
x_max = max([lng, t_lng])
x_min = min([lng, t_lng])
x_diff = x_max - x_min
y_max = max([lat, t_lat])
y_min = min([lat, t_lat])
y_diff = y_max - y_min
xoffset = x_diff * 0.05
yoffset = y_diff * 0.05
# minimum
left = x_min - x_diff * 0.5
right = x_max + x_diff * 0.5
bottom = y_min - y_diff * 0.3
top = y_max + y_diff * 0.3
width_ratio = 2.
height_ratio = 1.
if (right - left) / (top - bottom) > width_ratio / height_ratio:
# too short
goal = (right - left) * height_ratio / width_ratio
diff = goal - (top - bottom)
bottom = bottom - diff / 2.
top = top + diff / 2.
else:
# too skinny
goal = (top - bottom) * width_ratio / height_ratio
diff = goal - (right - left)
left = left - diff / 2.
right = right + diff / 2.
ax.set_extent([left, right, bottom, top])
# determine zoom level
# tile size at level 1 = 64 km
# level 2 = 32 km, level 3 = 16 km, etc, i.e. 128/(2^n) km
N_TILES = 600 # (how many tiles approximately fit in distance)
km = distance_meters / 1000.0
zoom_level = int(np.log2(128 * N_TILES / km))
ax.add_image(tiles, zoom_level)
# line between
plt.plot(
[lng, t_lng],
[lat, t_lat],
linestyle="-",
dashes=[2, 2],
transform=ccrs.Geodetic(),
)
# station
ax.plot(lng, lat, "ko", markersize=7, transform=ccrs.Geodetic())
# target
ax.plot(t_lng, t_lat, "ro", markersize=7, transform=ccrs.Geodetic())
# station label
station_label = "{} ({})".format(isd_station.usaf_id, isd_station.name)
ax.text(lng + xoffset, lat + yoffset, station_label, transform=ccrs.Geodetic())
# target label
ax.text(t_lng + xoffset, t_lat + yoffset, target_label, transform=ccrs.Geodetic())
# distance labels
mid_lng = (lng + t_lng) / 2
mid_lat = (lat + t_lat) / 2
dist_text = "{:.01f} km".format(km)
ax.text(mid_lng + xoffset, mid_lat + yoffset, dist_text, transform=ccrs.Geodetic())
plt.show() | [
"def",
"plot_station_mapping",
"(",
"target_latitude",
",",
"target_longitude",
",",
"isd_station",
",",
"distance_meters",
",",
"target_label",
"=",
"\"target\"",
",",
")",
":",
"# pragma: no cover",
"try",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"except",
"ImportError",
":",
"raise",
"ImportError",
"(",
"\"Plotting requires matplotlib.\"",
")",
"try",
":",
"import",
"cartopy",
".",
"crs",
"as",
"ccrs",
"import",
"cartopy",
".",
"feature",
"as",
"cfeature",
"import",
"cartopy",
".",
"io",
".",
"img_tiles",
"as",
"cimgt",
"except",
"ImportError",
":",
"raise",
"ImportError",
"(",
"\"Plotting requires cartopy.\"",
")",
"lat",
",",
"lng",
"=",
"isd_station",
".",
"coords",
"t_lat",
",",
"t_lng",
"=",
"float",
"(",
"target_latitude",
")",
",",
"float",
"(",
"target_longitude",
")",
"# fiture",
"fig",
"=",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"(",
"16",
",",
"8",
")",
")",
"# axes",
"tiles",
"=",
"cimgt",
".",
"StamenTerrain",
"(",
")",
"ax",
"=",
"plt",
".",
"subplot",
"(",
"1",
",",
"1",
",",
"1",
",",
"projection",
"=",
"tiles",
".",
"crs",
")",
"# offsets for labels",
"x_max",
"=",
"max",
"(",
"[",
"lng",
",",
"t_lng",
"]",
")",
"x_min",
"=",
"min",
"(",
"[",
"lng",
",",
"t_lng",
"]",
")",
"x_diff",
"=",
"x_max",
"-",
"x_min",
"y_max",
"=",
"max",
"(",
"[",
"lat",
",",
"t_lat",
"]",
")",
"y_min",
"=",
"min",
"(",
"[",
"lat",
",",
"t_lat",
"]",
")",
"y_diff",
"=",
"y_max",
"-",
"y_min",
"xoffset",
"=",
"x_diff",
"*",
"0.05",
"yoffset",
"=",
"y_diff",
"*",
"0.05",
"# minimum",
"left",
"=",
"x_min",
"-",
"x_diff",
"*",
"0.5",
"right",
"=",
"x_max",
"+",
"x_diff",
"*",
"0.5",
"bottom",
"=",
"y_min",
"-",
"y_diff",
"*",
"0.3",
"top",
"=",
"y_max",
"+",
"y_diff",
"*",
"0.3",
"width_ratio",
"=",
"2.",
"height_ratio",
"=",
"1.",
"if",
"(",
"right",
"-",
"left",
")",
"/",
"(",
"top",
"-",
"bottom",
")",
">",
"width_ratio",
"/",
"height_ratio",
":",
"# too short",
"goal",
"=",
"(",
"right",
"-",
"left",
")",
"*",
"height_ratio",
"/",
"width_ratio",
"diff",
"=",
"goal",
"-",
"(",
"top",
"-",
"bottom",
")",
"bottom",
"=",
"bottom",
"-",
"diff",
"/",
"2.",
"top",
"=",
"top",
"+",
"diff",
"/",
"2.",
"else",
":",
"# too skinny",
"goal",
"=",
"(",
"top",
"-",
"bottom",
")",
"*",
"width_ratio",
"/",
"height_ratio",
"diff",
"=",
"goal",
"-",
"(",
"right",
"-",
"left",
")",
"left",
"=",
"left",
"-",
"diff",
"/",
"2.",
"right",
"=",
"right",
"+",
"diff",
"/",
"2.",
"ax",
".",
"set_extent",
"(",
"[",
"left",
",",
"right",
",",
"bottom",
",",
"top",
"]",
")",
"# determine zoom level",
"# tile size at level 1 = 64 km",
"# level 2 = 32 km, level 3 = 16 km, etc, i.e. 128/(2^n) km",
"N_TILES",
"=",
"600",
"# (how many tiles approximately fit in distance)",
"km",
"=",
"distance_meters",
"/",
"1000.0",
"zoom_level",
"=",
"int",
"(",
"np",
".",
"log2",
"(",
"128",
"*",
"N_TILES",
"/",
"km",
")",
")",
"ax",
".",
"add_image",
"(",
"tiles",
",",
"zoom_level",
")",
"# line between",
"plt",
".",
"plot",
"(",
"[",
"lng",
",",
"t_lng",
"]",
",",
"[",
"lat",
",",
"t_lat",
"]",
",",
"linestyle",
"=",
"\"-\"",
",",
"dashes",
"=",
"[",
"2",
",",
"2",
"]",
",",
"transform",
"=",
"ccrs",
".",
"Geodetic",
"(",
")",
",",
")",
"# station",
"ax",
".",
"plot",
"(",
"lng",
",",
"lat",
",",
"\"ko\"",
",",
"markersize",
"=",
"7",
",",
"transform",
"=",
"ccrs",
".",
"Geodetic",
"(",
")",
")",
"# target",
"ax",
".",
"plot",
"(",
"t_lng",
",",
"t_lat",
",",
"\"ro\"",
",",
"markersize",
"=",
"7",
",",
"transform",
"=",
"ccrs",
".",
"Geodetic",
"(",
")",
")",
"# station label",
"station_label",
"=",
"\"{} ({})\"",
".",
"format",
"(",
"isd_station",
".",
"usaf_id",
",",
"isd_station",
".",
"name",
")",
"ax",
".",
"text",
"(",
"lng",
"+",
"xoffset",
",",
"lat",
"+",
"yoffset",
",",
"station_label",
",",
"transform",
"=",
"ccrs",
".",
"Geodetic",
"(",
")",
")",
"# target label",
"ax",
".",
"text",
"(",
"t_lng",
"+",
"xoffset",
",",
"t_lat",
"+",
"yoffset",
",",
"target_label",
",",
"transform",
"=",
"ccrs",
".",
"Geodetic",
"(",
")",
")",
"# distance labels",
"mid_lng",
"=",
"(",
"lng",
"+",
"t_lng",
")",
"/",
"2",
"mid_lat",
"=",
"(",
"lat",
"+",
"t_lat",
")",
"/",
"2",
"dist_text",
"=",
"\"{:.01f} km\"",
".",
"format",
"(",
"km",
")",
"ax",
".",
"text",
"(",
"mid_lng",
"+",
"xoffset",
",",
"mid_lat",
"+",
"yoffset",
",",
"dist_text",
",",
"transform",
"=",
"ccrs",
".",
"Geodetic",
"(",
")",
")",
"plt",
".",
"show",
"(",
")"
] | Plots this mapping on a map. | [
"Plots",
"this",
"mapping",
"on",
"a",
"map",
"."
] | python | train |
scanny/python-pptx | pptx/opc/pkgwriter.py | https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/opc/pkgwriter.py#L81-L92 | def xml_for(cls, parts):
"""
Return content types XML mapping each part in *parts* to the
appropriate content type and suitable for storage as
``[Content_Types].xml`` in an OPC package.
"""
cti = cls()
cti._defaults['rels'] = CT.OPC_RELATIONSHIPS
cti._defaults['xml'] = CT.XML
for part in parts:
cti._add_content_type(part.partname, part.content_type)
return cti._xml() | [
"def",
"xml_for",
"(",
"cls",
",",
"parts",
")",
":",
"cti",
"=",
"cls",
"(",
")",
"cti",
".",
"_defaults",
"[",
"'rels'",
"]",
"=",
"CT",
".",
"OPC_RELATIONSHIPS",
"cti",
".",
"_defaults",
"[",
"'xml'",
"]",
"=",
"CT",
".",
"XML",
"for",
"part",
"in",
"parts",
":",
"cti",
".",
"_add_content_type",
"(",
"part",
".",
"partname",
",",
"part",
".",
"content_type",
")",
"return",
"cti",
".",
"_xml",
"(",
")"
] | Return content types XML mapping each part in *parts* to the
appropriate content type and suitable for storage as
``[Content_Types].xml`` in an OPC package. | [
"Return",
"content",
"types",
"XML",
"mapping",
"each",
"part",
"in",
"*",
"parts",
"*",
"to",
"the",
"appropriate",
"content",
"type",
"and",
"suitable",
"for",
"storage",
"as",
"[",
"Content_Types",
"]",
".",
"xml",
"in",
"an",
"OPC",
"package",
"."
] | python | train |
3DLIRIOUS/MeshLabXML | meshlabxml/files.py | https://github.com/3DLIRIOUS/MeshLabXML/blob/177cce21e92baca500f56a932d66bd9a33257af8/meshlabxml/files.py#L282-L322 | def measure_dimension(fbasename=None, log=None, axis1=None, offset1=0.0,
axis2=None, offset2=0.0, ml_version=ml_version):
"""Measure a dimension of a mesh"""
axis1 = axis1.lower()
axis2 = axis2.lower()
ml_script1_file = 'TEMP3D_measure_dimension.mlx'
file_out = 'TEMP3D_measure_dimension.xyz'
ml_script1 = mlx.FilterScript(file_in=fbasename, file_out=file_out, ml_version=ml_version)
compute.section(ml_script1, axis1, offset1, surface=True)
compute.section(ml_script1, axis2, offset2, surface=False)
layers.delete_lower(ml_script1)
ml_script1.save_to_file(ml_script1_file)
ml_script1.run_script(log=log, script_file=ml_script1_file)
for val in ('x', 'y', 'z'):
if val not in (axis1, axis2):
axis = val
# ord: Get number that represents letter in ASCII
# Here we find the offset from 'x' to determine the list reference
# i.e. 0 for x, 1 for y, 2 for z
axis_num = ord(axis) - ord('x')
aabb = measure_aabb(file_out, log)
dimension = {'min': aabb['min'][axis_num], 'max': aabb['max'][axis_num],
'length': aabb['size'][axis_num], 'axis': axis}
if log is None:
print('\nFor file "%s"' % fbasename)
print('Dimension parallel to %s with %s=%s & %s=%s:' % (axis, axis1, offset1,
axis2, offset2))
print(' Min = %s, Max = %s, Total length = %s' % (dimension['min'],
dimension['max'], dimension['length']))
else:
log_file = open(log, 'a')
log_file.write('\nFor file "%s"\n' % fbasename)
log_file.write('Dimension parallel to %s with %s=%s & %s=%s:\n' % (axis, axis1, offset1,
axis2, offset2))
log_file.write('min = %s\n' % dimension['min'])
log_file.write('max = %s\n' % dimension['max'])
log_file.write('Total length = %s\n' % dimension['length'])
log_file.close()
return dimension | [
"def",
"measure_dimension",
"(",
"fbasename",
"=",
"None",
",",
"log",
"=",
"None",
",",
"axis1",
"=",
"None",
",",
"offset1",
"=",
"0.0",
",",
"axis2",
"=",
"None",
",",
"offset2",
"=",
"0.0",
",",
"ml_version",
"=",
"ml_version",
")",
":",
"axis1",
"=",
"axis1",
".",
"lower",
"(",
")",
"axis2",
"=",
"axis2",
".",
"lower",
"(",
")",
"ml_script1_file",
"=",
"'TEMP3D_measure_dimension.mlx'",
"file_out",
"=",
"'TEMP3D_measure_dimension.xyz'",
"ml_script1",
"=",
"mlx",
".",
"FilterScript",
"(",
"file_in",
"=",
"fbasename",
",",
"file_out",
"=",
"file_out",
",",
"ml_version",
"=",
"ml_version",
")",
"compute",
".",
"section",
"(",
"ml_script1",
",",
"axis1",
",",
"offset1",
",",
"surface",
"=",
"True",
")",
"compute",
".",
"section",
"(",
"ml_script1",
",",
"axis2",
",",
"offset2",
",",
"surface",
"=",
"False",
")",
"layers",
".",
"delete_lower",
"(",
"ml_script1",
")",
"ml_script1",
".",
"save_to_file",
"(",
"ml_script1_file",
")",
"ml_script1",
".",
"run_script",
"(",
"log",
"=",
"log",
",",
"script_file",
"=",
"ml_script1_file",
")",
"for",
"val",
"in",
"(",
"'x'",
",",
"'y'",
",",
"'z'",
")",
":",
"if",
"val",
"not",
"in",
"(",
"axis1",
",",
"axis2",
")",
":",
"axis",
"=",
"val",
"# ord: Get number that represents letter in ASCII",
"# Here we find the offset from 'x' to determine the list reference",
"# i.e. 0 for x, 1 for y, 2 for z",
"axis_num",
"=",
"ord",
"(",
"axis",
")",
"-",
"ord",
"(",
"'x'",
")",
"aabb",
"=",
"measure_aabb",
"(",
"file_out",
",",
"log",
")",
"dimension",
"=",
"{",
"'min'",
":",
"aabb",
"[",
"'min'",
"]",
"[",
"axis_num",
"]",
",",
"'max'",
":",
"aabb",
"[",
"'max'",
"]",
"[",
"axis_num",
"]",
",",
"'length'",
":",
"aabb",
"[",
"'size'",
"]",
"[",
"axis_num",
"]",
",",
"'axis'",
":",
"axis",
"}",
"if",
"log",
"is",
"None",
":",
"print",
"(",
"'\\nFor file \"%s\"'",
"%",
"fbasename",
")",
"print",
"(",
"'Dimension parallel to %s with %s=%s & %s=%s:'",
"%",
"(",
"axis",
",",
"axis1",
",",
"offset1",
",",
"axis2",
",",
"offset2",
")",
")",
"print",
"(",
"' Min = %s, Max = %s, Total length = %s'",
"%",
"(",
"dimension",
"[",
"'min'",
"]",
",",
"dimension",
"[",
"'max'",
"]",
",",
"dimension",
"[",
"'length'",
"]",
")",
")",
"else",
":",
"log_file",
"=",
"open",
"(",
"log",
",",
"'a'",
")",
"log_file",
".",
"write",
"(",
"'\\nFor file \"%s\"\\n'",
"%",
"fbasename",
")",
"log_file",
".",
"write",
"(",
"'Dimension parallel to %s with %s=%s & %s=%s:\\n'",
"%",
"(",
"axis",
",",
"axis1",
",",
"offset1",
",",
"axis2",
",",
"offset2",
")",
")",
"log_file",
".",
"write",
"(",
"'min = %s\\n'",
"%",
"dimension",
"[",
"'min'",
"]",
")",
"log_file",
".",
"write",
"(",
"'max = %s\\n'",
"%",
"dimension",
"[",
"'max'",
"]",
")",
"log_file",
".",
"write",
"(",
"'Total length = %s\\n'",
"%",
"dimension",
"[",
"'length'",
"]",
")",
"log_file",
".",
"close",
"(",
")",
"return",
"dimension"
] | Measure a dimension of a mesh | [
"Measure",
"a",
"dimension",
"of",
"a",
"mesh"
] | python | test |
jaraco/svg.charts | svg/charts/graph.py | https://github.com/jaraco/svg.charts/blob/23053497b3f1af4e760f355050107ae3bc05909d/svg/charts/graph.py#L324-L347 | def make_datapoint_text(self, x, y, value, style=None):
"""
Add text for a datapoint
"""
if not self.show_data_values:
# do nothing
return
# first lay down the text in a wide white stroke to
# differentiate it from the background
e = etree.SubElement(self.foreground, 'text', {
'x': str(x),
'y': str(y),
'class': 'dataPointLabel',
'style': '%(style)s stroke: #fff; stroke-width: 2;' % vars(),
})
e.text = str(value)
# then lay down the text in the specified style
e = etree.SubElement(self.foreground, 'text', {
'x': str(x),
'y': str(y),
'class': 'dataPointLabel'})
e.text = str(value)
if style:
e.set('style', style) | [
"def",
"make_datapoint_text",
"(",
"self",
",",
"x",
",",
"y",
",",
"value",
",",
"style",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"show_data_values",
":",
"# do nothing",
"return",
"# first lay down the text in a wide white stroke to",
"# differentiate it from the background",
"e",
"=",
"etree",
".",
"SubElement",
"(",
"self",
".",
"foreground",
",",
"'text'",
",",
"{",
"'x'",
":",
"str",
"(",
"x",
")",
",",
"'y'",
":",
"str",
"(",
"y",
")",
",",
"'class'",
":",
"'dataPointLabel'",
",",
"'style'",
":",
"'%(style)s stroke: #fff; stroke-width: 2;'",
"%",
"vars",
"(",
")",
",",
"}",
")",
"e",
".",
"text",
"=",
"str",
"(",
"value",
")",
"# then lay down the text in the specified style",
"e",
"=",
"etree",
".",
"SubElement",
"(",
"self",
".",
"foreground",
",",
"'text'",
",",
"{",
"'x'",
":",
"str",
"(",
"x",
")",
",",
"'y'",
":",
"str",
"(",
"y",
")",
",",
"'class'",
":",
"'dataPointLabel'",
"}",
")",
"e",
".",
"text",
"=",
"str",
"(",
"value",
")",
"if",
"style",
":",
"e",
".",
"set",
"(",
"'style'",
",",
"style",
")"
] | Add text for a datapoint | [
"Add",
"text",
"for",
"a",
"datapoint"
] | python | test |
browniebroke/deezer-python | deezer/client.py | https://github.com/browniebroke/deezer-python/blob/fb869c3617045b22e7124e4b783ec1a68d283ac3/deezer/client.py#L246-L264 | def advanced_search(self, terms, relation=None, index=0, limit=25, **kwargs):
"""
Advanced search of track, album or artist.
See `Search section of Deezer API
<https://developers.deezer.com/api/search>`_ for search terms.
:returns: a list of :class:`~deezer.resources.Resource` objects.
>>> client.advanced_search({"artist": "Daft Punk", "album": "Homework"})
>>> client.advanced_search({"artist": "Daft Punk", "album": "Homework"},
... relation="track")
"""
assert isinstance(terms, dict), "terms must be a dict"
# terms are sorted (for consistent tests between Python < 3.7 and >= 3.7)
query = " ".join(sorted(['{}:"{}"'.format(k, v) for (k, v) in terms.items()]))
return self.get_object(
"search", relation=relation, q=query, index=index, limit=limit, **kwargs
) | [
"def",
"advanced_search",
"(",
"self",
",",
"terms",
",",
"relation",
"=",
"None",
",",
"index",
"=",
"0",
",",
"limit",
"=",
"25",
",",
"*",
"*",
"kwargs",
")",
":",
"assert",
"isinstance",
"(",
"terms",
",",
"dict",
")",
",",
"\"terms must be a dict\"",
"# terms are sorted (for consistent tests between Python < 3.7 and >= 3.7)",
"query",
"=",
"\" \"",
".",
"join",
"(",
"sorted",
"(",
"[",
"'{}:\"{}\"'",
".",
"format",
"(",
"k",
",",
"v",
")",
"for",
"(",
"k",
",",
"v",
")",
"in",
"terms",
".",
"items",
"(",
")",
"]",
")",
")",
"return",
"self",
".",
"get_object",
"(",
"\"search\"",
",",
"relation",
"=",
"relation",
",",
"q",
"=",
"query",
",",
"index",
"=",
"index",
",",
"limit",
"=",
"limit",
",",
"*",
"*",
"kwargs",
")"
] | Advanced search of track, album or artist.
See `Search section of Deezer API
<https://developers.deezer.com/api/search>`_ for search terms.
:returns: a list of :class:`~deezer.resources.Resource` objects.
>>> client.advanced_search({"artist": "Daft Punk", "album": "Homework"})
>>> client.advanced_search({"artist": "Daft Punk", "album": "Homework"},
... relation="track") | [
"Advanced",
"search",
"of",
"track",
"album",
"or",
"artist",
"."
] | python | train |
galaxy-genome-annotation/python-apollo | apollo/status/__init__.py | https://github.com/galaxy-genome-annotation/python-apollo/blob/2bc9991302abe4402ec2885dcaac35915475b387/apollo/status/__init__.py#L35-L50 | def show_status(self, status):
"""
Get a specific status
:type status: str
:param status: Status to show
:rtype: dict
:return: A dictionnary containing status description
"""
statuses = self.get_statuses()
statuses = [x for x in statuses if x['value'] == status]
if len(statuses) == 0:
raise Exception("Unknown status value")
else:
return statuses[0] | [
"def",
"show_status",
"(",
"self",
",",
"status",
")",
":",
"statuses",
"=",
"self",
".",
"get_statuses",
"(",
")",
"statuses",
"=",
"[",
"x",
"for",
"x",
"in",
"statuses",
"if",
"x",
"[",
"'value'",
"]",
"==",
"status",
"]",
"if",
"len",
"(",
"statuses",
")",
"==",
"0",
":",
"raise",
"Exception",
"(",
"\"Unknown status value\"",
")",
"else",
":",
"return",
"statuses",
"[",
"0",
"]"
] | Get a specific status
:type status: str
:param status: Status to show
:rtype: dict
:return: A dictionnary containing status description | [
"Get",
"a",
"specific",
"status"
] | python | train |
seperman/deepdiff | deepdiff/diff.py | https://github.com/seperman/deepdiff/blob/a66879190fadc671632f154c1fcb82f5c3cef800/deepdiff/diff.py#L455-L489 | def __create_hashtable(self, t, level):
"""Create hashtable of {item_hash: (indexes, item)}"""
hashes = {}
for (i, item) in enumerate(t):
try:
hashes_all = DeepHash(item,
hashes=self.hashes,
exclude_types=self.exclude_types,
exclude_paths=self.exclude_paths,
exclude_regex_paths=self.exclude_regex_paths,
hasher=self.hasher,
ignore_repetition=not self.report_repetition,
significant_digits=self.significant_digits,
number_format_notation=self.number_format_notation,
ignore_string_type_changes=self.ignore_string_type_changes,
ignore_numeric_type_changes=self.ignore_numeric_type_changes,
ignore_type_in_groups=self.ignore_type_in_groups,
ignore_type_subclasses=self.ignore_type_subclasses,
ignore_string_case=self.ignore_string_case,
number_to_string_func=self.number_to_string,
)
item_hash = hashes_all[item]
except Exception as e: # pragma: no cover
logger.warning("Can not produce a hash for %s."
"Not counting this object.\n %s" %
(level.path(), e))
else:
if item_hash is unprocessed: # pragma: no cover
logger.warning("Item %s was not processed while hashing "
"thus not counting this object." %
level.path())
else:
self._add_hash(hashes=hashes, item_hash=item_hash, item=item, i=i)
return hashes | [
"def",
"__create_hashtable",
"(",
"self",
",",
"t",
",",
"level",
")",
":",
"hashes",
"=",
"{",
"}",
"for",
"(",
"i",
",",
"item",
")",
"in",
"enumerate",
"(",
"t",
")",
":",
"try",
":",
"hashes_all",
"=",
"DeepHash",
"(",
"item",
",",
"hashes",
"=",
"self",
".",
"hashes",
",",
"exclude_types",
"=",
"self",
".",
"exclude_types",
",",
"exclude_paths",
"=",
"self",
".",
"exclude_paths",
",",
"exclude_regex_paths",
"=",
"self",
".",
"exclude_regex_paths",
",",
"hasher",
"=",
"self",
".",
"hasher",
",",
"ignore_repetition",
"=",
"not",
"self",
".",
"report_repetition",
",",
"significant_digits",
"=",
"self",
".",
"significant_digits",
",",
"number_format_notation",
"=",
"self",
".",
"number_format_notation",
",",
"ignore_string_type_changes",
"=",
"self",
".",
"ignore_string_type_changes",
",",
"ignore_numeric_type_changes",
"=",
"self",
".",
"ignore_numeric_type_changes",
",",
"ignore_type_in_groups",
"=",
"self",
".",
"ignore_type_in_groups",
",",
"ignore_type_subclasses",
"=",
"self",
".",
"ignore_type_subclasses",
",",
"ignore_string_case",
"=",
"self",
".",
"ignore_string_case",
",",
"number_to_string_func",
"=",
"self",
".",
"number_to_string",
",",
")",
"item_hash",
"=",
"hashes_all",
"[",
"item",
"]",
"except",
"Exception",
"as",
"e",
":",
"# pragma: no cover",
"logger",
".",
"warning",
"(",
"\"Can not produce a hash for %s.\"",
"\"Not counting this object.\\n %s\"",
"%",
"(",
"level",
".",
"path",
"(",
")",
",",
"e",
")",
")",
"else",
":",
"if",
"item_hash",
"is",
"unprocessed",
":",
"# pragma: no cover",
"logger",
".",
"warning",
"(",
"\"Item %s was not processed while hashing \"",
"\"thus not counting this object.\"",
"%",
"level",
".",
"path",
"(",
")",
")",
"else",
":",
"self",
".",
"_add_hash",
"(",
"hashes",
"=",
"hashes",
",",
"item_hash",
"=",
"item_hash",
",",
"item",
"=",
"item",
",",
"i",
"=",
"i",
")",
"return",
"hashes"
] | Create hashtable of {item_hash: (indexes, item)} | [
"Create",
"hashtable",
"of",
"{",
"item_hash",
":",
"(",
"indexes",
"item",
")",
"}"
] | python | train |
andrenarchy/krypy | krypy/deflation.py | https://github.com/andrenarchy/krypy/blob/4883ec9a61d64ea56489e15c35cc40f0633ab2f1/krypy/deflation.py#L160-L180 | def B_(self):
r''':math:`\underline{B}=\langle V_{n+1},M_lAM_rU\rangle`.
This property is obtained from :math:`C` if the operator is
self-adjoint. Otherwise, the inner products have to be formed
explicitly.'''
(n_, n) = self.H.shape
ls = self.linear_system
if self._B_ is None or self._B_.shape[1] < n_:
# compute B_
if ls.self_adjoint:
self._B_ = self.C.T.conj()
if n_ > n:
self._B_ = numpy.r_[self._B_,
utils.inner(self.V[:, [-1]],
self.projection.AU,
ip_B=ls.ip_B)]
else:
self._B_ = utils.inner(self.V, self.projection.AU,
ip_B=ls.ip_B)
return self._B_ | [
"def",
"B_",
"(",
"self",
")",
":",
"(",
"n_",
",",
"n",
")",
"=",
"self",
".",
"H",
".",
"shape",
"ls",
"=",
"self",
".",
"linear_system",
"if",
"self",
".",
"_B_",
"is",
"None",
"or",
"self",
".",
"_B_",
".",
"shape",
"[",
"1",
"]",
"<",
"n_",
":",
"# compute B_",
"if",
"ls",
".",
"self_adjoint",
":",
"self",
".",
"_B_",
"=",
"self",
".",
"C",
".",
"T",
".",
"conj",
"(",
")",
"if",
"n_",
">",
"n",
":",
"self",
".",
"_B_",
"=",
"numpy",
".",
"r_",
"[",
"self",
".",
"_B_",
",",
"utils",
".",
"inner",
"(",
"self",
".",
"V",
"[",
":",
",",
"[",
"-",
"1",
"]",
"]",
",",
"self",
".",
"projection",
".",
"AU",
",",
"ip_B",
"=",
"ls",
".",
"ip_B",
")",
"]",
"else",
":",
"self",
".",
"_B_",
"=",
"utils",
".",
"inner",
"(",
"self",
".",
"V",
",",
"self",
".",
"projection",
".",
"AU",
",",
"ip_B",
"=",
"ls",
".",
"ip_B",
")",
"return",
"self",
".",
"_B_"
] | r''':math:`\underline{B}=\langle V_{n+1},M_lAM_rU\rangle`.
This property is obtained from :math:`C` if the operator is
self-adjoint. Otherwise, the inner products have to be formed
explicitly. | [
"r",
":",
"math",
":",
"\\",
"underline",
"{",
"B",
"}",
"=",
"\\",
"langle",
"V_",
"{",
"n",
"+",
"1",
"}",
"M_lAM_rU",
"\\",
"rangle",
"."
] | python | train |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/pympler/classtracker_stats.py | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/classtracker_stats.py#L280-L297 | def _print_refs(self, refs, total, prefix=' ',
level=1, minsize=0, minpct=0.1):
"""
Print individual referents recursively.
"""
lrefs = list(refs)
lrefs.sort(key=lambda x: x.size)
lrefs.reverse()
for ref in lrefs:
if ref.size > minsize and (ref.size*100.0/total) > minpct:
self.stream.write('%-50s %-14s %3d%% [%d]\n' % (
trunc(prefix+str(ref.name), 50),
pp(ref.size),
int(ref.size*100.0/total),
level
))
self._print_refs(ref.refs, total, prefix=prefix+' ',
level=level+1) | [
"def",
"_print_refs",
"(",
"self",
",",
"refs",
",",
"total",
",",
"prefix",
"=",
"' '",
",",
"level",
"=",
"1",
",",
"minsize",
"=",
"0",
",",
"minpct",
"=",
"0.1",
")",
":",
"lrefs",
"=",
"list",
"(",
"refs",
")",
"lrefs",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"x",
".",
"size",
")",
"lrefs",
".",
"reverse",
"(",
")",
"for",
"ref",
"in",
"lrefs",
":",
"if",
"ref",
".",
"size",
">",
"minsize",
"and",
"(",
"ref",
".",
"size",
"*",
"100.0",
"/",
"total",
")",
">",
"minpct",
":",
"self",
".",
"stream",
".",
"write",
"(",
"'%-50s %-14s %3d%% [%d]\\n'",
"%",
"(",
"trunc",
"(",
"prefix",
"+",
"str",
"(",
"ref",
".",
"name",
")",
",",
"50",
")",
",",
"pp",
"(",
"ref",
".",
"size",
")",
",",
"int",
"(",
"ref",
".",
"size",
"*",
"100.0",
"/",
"total",
")",
",",
"level",
")",
")",
"self",
".",
"_print_refs",
"(",
"ref",
".",
"refs",
",",
"total",
",",
"prefix",
"=",
"prefix",
"+",
"' '",
",",
"level",
"=",
"level",
"+",
"1",
")"
] | Print individual referents recursively. | [
"Print",
"individual",
"referents",
"recursively",
"."
] | python | train |
MycroftAI/mycroft-precise | precise/scripts/convert.py | https://github.com/MycroftAI/mycroft-precise/blob/e17cebdd171906dbd8a16e282d8a7966fba2eeba/precise/scripts/convert.py#L32-L76 | def convert(model_path: str, out_file: str):
"""
Converts an HD5F file from Keras to a .pb for use with TensorFlow
Args:
model_path: location of Keras model
out_file: location to write protobuf
"""
print('Converting', model_path, 'to', out_file, '...')
import tensorflow as tf
from precise.model import load_precise_model
from keras import backend as K
out_dir, filename = split(out_file)
out_dir = out_dir or '.'
os.makedirs(out_dir, exist_ok=True)
K.set_learning_phase(0)
model = load_precise_model(model_path)
out_name = 'net_output'
tf.identity(model.output, name=out_name)
print('Output node name:', out_name)
print('Output folder:', out_dir)
sess = K.get_session()
# Write the graph in human readable
tf.train.write_graph(sess.graph.as_graph_def(), out_dir, filename + 'txt', as_text=True)
print('Saved readable graph to:', filename + 'txt')
# Write the graph in binary .pb file
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import graph_io
cgraph = graph_util.convert_variables_to_constants(sess, sess.graph.as_graph_def(), [out_name])
graph_io.write_graph(cgraph, out_dir, filename, as_text=False)
if isfile(model_path + '.params'):
copyfile(model_path + '.params', out_file + '.params')
print('Saved graph to:', filename)
del sess | [
"def",
"convert",
"(",
"model_path",
":",
"str",
",",
"out_file",
":",
"str",
")",
":",
"print",
"(",
"'Converting'",
",",
"model_path",
",",
"'to'",
",",
"out_file",
",",
"'...'",
")",
"import",
"tensorflow",
"as",
"tf",
"from",
"precise",
".",
"model",
"import",
"load_precise_model",
"from",
"keras",
"import",
"backend",
"as",
"K",
"out_dir",
",",
"filename",
"=",
"split",
"(",
"out_file",
")",
"out_dir",
"=",
"out_dir",
"or",
"'.'",
"os",
".",
"makedirs",
"(",
"out_dir",
",",
"exist_ok",
"=",
"True",
")",
"K",
".",
"set_learning_phase",
"(",
"0",
")",
"model",
"=",
"load_precise_model",
"(",
"model_path",
")",
"out_name",
"=",
"'net_output'",
"tf",
".",
"identity",
"(",
"model",
".",
"output",
",",
"name",
"=",
"out_name",
")",
"print",
"(",
"'Output node name:'",
",",
"out_name",
")",
"print",
"(",
"'Output folder:'",
",",
"out_dir",
")",
"sess",
"=",
"K",
".",
"get_session",
"(",
")",
"# Write the graph in human readable",
"tf",
".",
"train",
".",
"write_graph",
"(",
"sess",
".",
"graph",
".",
"as_graph_def",
"(",
")",
",",
"out_dir",
",",
"filename",
"+",
"'txt'",
",",
"as_text",
"=",
"True",
")",
"print",
"(",
"'Saved readable graph to:'",
",",
"filename",
"+",
"'txt'",
")",
"# Write the graph in binary .pb file",
"from",
"tensorflow",
".",
"python",
".",
"framework",
"import",
"graph_util",
"from",
"tensorflow",
".",
"python",
".",
"framework",
"import",
"graph_io",
"cgraph",
"=",
"graph_util",
".",
"convert_variables_to_constants",
"(",
"sess",
",",
"sess",
".",
"graph",
".",
"as_graph_def",
"(",
")",
",",
"[",
"out_name",
"]",
")",
"graph_io",
".",
"write_graph",
"(",
"cgraph",
",",
"out_dir",
",",
"filename",
",",
"as_text",
"=",
"False",
")",
"if",
"isfile",
"(",
"model_path",
"+",
"'.params'",
")",
":",
"copyfile",
"(",
"model_path",
"+",
"'.params'",
",",
"out_file",
"+",
"'.params'",
")",
"print",
"(",
"'Saved graph to:'",
",",
"filename",
")",
"del",
"sess"
] | Converts an HD5F file from Keras to a .pb for use with TensorFlow
Args:
model_path: location of Keras model
out_file: location to write protobuf | [
"Converts",
"an",
"HD5F",
"file",
"from",
"Keras",
"to",
"a",
".",
"pb",
"for",
"use",
"with",
"TensorFlow"
] | python | train |
slinderman/pypolyagamma | pypolyagamma/binary_trees.py | https://github.com/slinderman/pypolyagamma/blob/abdc0c53e5114092998f51bf66f1900bc567f0bd/pypolyagamma/binary_trees.py#L60-L74 | def random_tree(n_leaves):
"""
Randomly partition the nodes
"""
def _random_subtree(leaves):
if len(leaves) == 1:
return leaves[0]
elif len(leaves) == 2:
return (leaves[0], leaves[1])
else:
split = npr.randint(1, len(leaves)-1)
return (_random_subtree(leaves[:split]),
_random_subtree(leaves[split:]))
return _random_subtree(np.arange(n_leaves)) | [
"def",
"random_tree",
"(",
"n_leaves",
")",
":",
"def",
"_random_subtree",
"(",
"leaves",
")",
":",
"if",
"len",
"(",
"leaves",
")",
"==",
"1",
":",
"return",
"leaves",
"[",
"0",
"]",
"elif",
"len",
"(",
"leaves",
")",
"==",
"2",
":",
"return",
"(",
"leaves",
"[",
"0",
"]",
",",
"leaves",
"[",
"1",
"]",
")",
"else",
":",
"split",
"=",
"npr",
".",
"randint",
"(",
"1",
",",
"len",
"(",
"leaves",
")",
"-",
"1",
")",
"return",
"(",
"_random_subtree",
"(",
"leaves",
"[",
":",
"split",
"]",
")",
",",
"_random_subtree",
"(",
"leaves",
"[",
"split",
":",
"]",
")",
")",
"return",
"_random_subtree",
"(",
"np",
".",
"arange",
"(",
"n_leaves",
")",
")"
] | Randomly partition the nodes | [
"Randomly",
"partition",
"the",
"nodes"
] | python | train |
saltstack/salt | salt/modules/boto_iam.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_iam.py#L1748-L1776 | def create_policy(policy_name, policy_document, path=None, description=None,
region=None, key=None, keyid=None, profile=None):
'''
Create a policy.
CLI Example:
.. code-block:: bash
salt myminios boto_iam.create_policy mypolicy '{"Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": ["s3:Get*", "s3:List*"], "Resource": ["arn:aws:s3:::my-bucket/shared/*"]},]}'
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not isinstance(policy_document, six.string_types):
policy_document = salt.utils.json.dumps(policy_document)
params = {}
for arg in 'path', 'description':
if locals()[arg] is not None:
params[arg] = locals()[arg]
if policy_exists(policy_name, region, key, keyid, profile):
return True
try:
conn.create_policy(policy_name, policy_document, **params)
log.info('Created IAM policy %s.', policy_name)
except boto.exception.BotoServerError as e:
log.debug(e)
log.error('Failed to create IAM policy %s.', policy_name)
return False
return True | [
"def",
"create_policy",
"(",
"policy_name",
",",
"policy_document",
",",
"path",
"=",
"None",
",",
"description",
"=",
"None",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"conn",
"=",
"_get_conn",
"(",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"if",
"not",
"isinstance",
"(",
"policy_document",
",",
"six",
".",
"string_types",
")",
":",
"policy_document",
"=",
"salt",
".",
"utils",
".",
"json",
".",
"dumps",
"(",
"policy_document",
")",
"params",
"=",
"{",
"}",
"for",
"arg",
"in",
"'path'",
",",
"'description'",
":",
"if",
"locals",
"(",
")",
"[",
"arg",
"]",
"is",
"not",
"None",
":",
"params",
"[",
"arg",
"]",
"=",
"locals",
"(",
")",
"[",
"arg",
"]",
"if",
"policy_exists",
"(",
"policy_name",
",",
"region",
",",
"key",
",",
"keyid",
",",
"profile",
")",
":",
"return",
"True",
"try",
":",
"conn",
".",
"create_policy",
"(",
"policy_name",
",",
"policy_document",
",",
"*",
"*",
"params",
")",
"log",
".",
"info",
"(",
"'Created IAM policy %s.'",
",",
"policy_name",
")",
"except",
"boto",
".",
"exception",
".",
"BotoServerError",
"as",
"e",
":",
"log",
".",
"debug",
"(",
"e",
")",
"log",
".",
"error",
"(",
"'Failed to create IAM policy %s.'",
",",
"policy_name",
")",
"return",
"False",
"return",
"True"
] | Create a policy.
CLI Example:
.. code-block:: bash
salt myminios boto_iam.create_policy mypolicy '{"Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": ["s3:Get*", "s3:List*"], "Resource": ["arn:aws:s3:::my-bucket/shared/*"]},]}' | [
"Create",
"a",
"policy",
"."
] | python | train |
saltstack/salt | salt/pillar/__init__.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/pillar/__init__.py#L948-L1026 | def ext_pillar(self, pillar, errors=None):
'''
Render the external pillar data
'''
if errors is None:
errors = []
try:
# Make sure that on-demand git_pillar is fetched before we try to
# compile the pillar data. git_pillar will fetch a remote when
# the git ext_pillar() func is run, but only for masterless.
if self.ext and 'git' in self.ext \
and self.opts.get('__role') != 'minion':
# Avoid circular import
import salt.utils.gitfs
import salt.pillar.git_pillar
git_pillar = salt.utils.gitfs.GitPillar(
self.opts,
self.ext['git'],
per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY,
global_only=salt.pillar.git_pillar.GLOBAL_ONLY)
git_pillar.fetch_remotes()
except TypeError:
# Handle malformed ext_pillar
pass
if 'ext_pillar' not in self.opts:
return pillar, errors
if not isinstance(self.opts['ext_pillar'], list):
errors.append('The "ext_pillar" option is malformed')
log.critical(errors[-1])
return pillar, errors
ext = None
# Bring in CLI pillar data
if self.pillar_override:
pillar = merge(
pillar,
self.pillar_override,
self.merge_strategy,
self.opts.get('renderer', 'yaml'),
self.opts.get('pillar_merge_lists', False))
for run in self.opts['ext_pillar']:
if not isinstance(run, dict):
errors.append('The "ext_pillar" option is malformed')
log.critical(errors[-1])
return {}, errors
if next(six.iterkeys(run)) in self.opts.get('exclude_ext_pillar', []):
continue
for key, val in six.iteritems(run):
if key not in self.ext_pillars:
log.critical(
'Specified ext_pillar interface %s is unavailable',
key
)
continue
try:
ext = self._external_pillar_data(pillar,
val,
key)
except Exception as exc:
errors.append(
'Failed to load ext_pillar {0}: {1}'.format(
key,
exc.__str__(),
)
)
log.error(
'Exception caught loading ext_pillar \'%s\':\n%s',
key, ''.join(traceback.format_tb(sys.exc_info()[2]))
)
if ext:
pillar = merge(
pillar,
ext,
self.merge_strategy,
self.opts.get('renderer', 'yaml'),
self.opts.get('pillar_merge_lists', False))
ext = None
return pillar, errors | [
"def",
"ext_pillar",
"(",
"self",
",",
"pillar",
",",
"errors",
"=",
"None",
")",
":",
"if",
"errors",
"is",
"None",
":",
"errors",
"=",
"[",
"]",
"try",
":",
"# Make sure that on-demand git_pillar is fetched before we try to",
"# compile the pillar data. git_pillar will fetch a remote when",
"# the git ext_pillar() func is run, but only for masterless.",
"if",
"self",
".",
"ext",
"and",
"'git'",
"in",
"self",
".",
"ext",
"and",
"self",
".",
"opts",
".",
"get",
"(",
"'__role'",
")",
"!=",
"'minion'",
":",
"# Avoid circular import",
"import",
"salt",
".",
"utils",
".",
"gitfs",
"import",
"salt",
".",
"pillar",
".",
"git_pillar",
"git_pillar",
"=",
"salt",
".",
"utils",
".",
"gitfs",
".",
"GitPillar",
"(",
"self",
".",
"opts",
",",
"self",
".",
"ext",
"[",
"'git'",
"]",
",",
"per_remote_overrides",
"=",
"salt",
".",
"pillar",
".",
"git_pillar",
".",
"PER_REMOTE_OVERRIDES",
",",
"per_remote_only",
"=",
"salt",
".",
"pillar",
".",
"git_pillar",
".",
"PER_REMOTE_ONLY",
",",
"global_only",
"=",
"salt",
".",
"pillar",
".",
"git_pillar",
".",
"GLOBAL_ONLY",
")",
"git_pillar",
".",
"fetch_remotes",
"(",
")",
"except",
"TypeError",
":",
"# Handle malformed ext_pillar",
"pass",
"if",
"'ext_pillar'",
"not",
"in",
"self",
".",
"opts",
":",
"return",
"pillar",
",",
"errors",
"if",
"not",
"isinstance",
"(",
"self",
".",
"opts",
"[",
"'ext_pillar'",
"]",
",",
"list",
")",
":",
"errors",
".",
"append",
"(",
"'The \"ext_pillar\" option is malformed'",
")",
"log",
".",
"critical",
"(",
"errors",
"[",
"-",
"1",
"]",
")",
"return",
"pillar",
",",
"errors",
"ext",
"=",
"None",
"# Bring in CLI pillar data",
"if",
"self",
".",
"pillar_override",
":",
"pillar",
"=",
"merge",
"(",
"pillar",
",",
"self",
".",
"pillar_override",
",",
"self",
".",
"merge_strategy",
",",
"self",
".",
"opts",
".",
"get",
"(",
"'renderer'",
",",
"'yaml'",
")",
",",
"self",
".",
"opts",
".",
"get",
"(",
"'pillar_merge_lists'",
",",
"False",
")",
")",
"for",
"run",
"in",
"self",
".",
"opts",
"[",
"'ext_pillar'",
"]",
":",
"if",
"not",
"isinstance",
"(",
"run",
",",
"dict",
")",
":",
"errors",
".",
"append",
"(",
"'The \"ext_pillar\" option is malformed'",
")",
"log",
".",
"critical",
"(",
"errors",
"[",
"-",
"1",
"]",
")",
"return",
"{",
"}",
",",
"errors",
"if",
"next",
"(",
"six",
".",
"iterkeys",
"(",
"run",
")",
")",
"in",
"self",
".",
"opts",
".",
"get",
"(",
"'exclude_ext_pillar'",
",",
"[",
"]",
")",
":",
"continue",
"for",
"key",
",",
"val",
"in",
"six",
".",
"iteritems",
"(",
"run",
")",
":",
"if",
"key",
"not",
"in",
"self",
".",
"ext_pillars",
":",
"log",
".",
"critical",
"(",
"'Specified ext_pillar interface %s is unavailable'",
",",
"key",
")",
"continue",
"try",
":",
"ext",
"=",
"self",
".",
"_external_pillar_data",
"(",
"pillar",
",",
"val",
",",
"key",
")",
"except",
"Exception",
"as",
"exc",
":",
"errors",
".",
"append",
"(",
"'Failed to load ext_pillar {0}: {1}'",
".",
"format",
"(",
"key",
",",
"exc",
".",
"__str__",
"(",
")",
",",
")",
")",
"log",
".",
"error",
"(",
"'Exception caught loading ext_pillar \\'%s\\':\\n%s'",
",",
"key",
",",
"''",
".",
"join",
"(",
"traceback",
".",
"format_tb",
"(",
"sys",
".",
"exc_info",
"(",
")",
"[",
"2",
"]",
")",
")",
")",
"if",
"ext",
":",
"pillar",
"=",
"merge",
"(",
"pillar",
",",
"ext",
",",
"self",
".",
"merge_strategy",
",",
"self",
".",
"opts",
".",
"get",
"(",
"'renderer'",
",",
"'yaml'",
")",
",",
"self",
".",
"opts",
".",
"get",
"(",
"'pillar_merge_lists'",
",",
"False",
")",
")",
"ext",
"=",
"None",
"return",
"pillar",
",",
"errors"
] | Render the external pillar data | [
"Render",
"the",
"external",
"pillar",
"data"
] | python | train |
metagriffin/fso | fso/filesystemoverlay.py | https://github.com/metagriffin/fso/blob/c37701fbfdfde359a2044eb9420abe569a7b35e4/fso/filesystemoverlay.py#L486-L491 | def fso_symlink(self, source, link_name):
'overlays os.symlink()'
path = self.deref(link_name, to_parent=True)
if self._exists(path):
raise OSError(17, 'File exists')
self._addentry(OverlayEntry(self, path, stat.S_IFLNK, source)) | [
"def",
"fso_symlink",
"(",
"self",
",",
"source",
",",
"link_name",
")",
":",
"path",
"=",
"self",
".",
"deref",
"(",
"link_name",
",",
"to_parent",
"=",
"True",
")",
"if",
"self",
".",
"_exists",
"(",
"path",
")",
":",
"raise",
"OSError",
"(",
"17",
",",
"'File exists'",
")",
"self",
".",
"_addentry",
"(",
"OverlayEntry",
"(",
"self",
",",
"path",
",",
"stat",
".",
"S_IFLNK",
",",
"source",
")",
")"
] | overlays os.symlink() | [
"overlays",
"os",
".",
"symlink",
"()"
] | python | valid |
SatelliteQE/nailgun | nailgun/entity_mixins.py | https://github.com/SatelliteQE/nailgun/blob/c36d8c20862e87bf6975bd48ac1ca40a9e634eaa/nailgun/entity_mixins.py#L437-L490 | def path(self, which=None):
"""Return the path to the current entity.
Return the path to base entities of this entity's type if:
* ``which`` is ``'base'``, or
* ``which`` is ``None`` and instance attribute ``id`` is unset.
Return the path to this exact entity if instance attribute ``id`` is
set and:
* ``which`` is ``'self'``, or
* ``which`` is ``None``.
Raise :class:`NoSuchPathError` otherwise.
Child classes may choose to extend this method, especially if a child
entity offers more than the two URLs supported by default. If extended,
then the extending class should check for custom parameters before
calling ``super``::
def path(self, which):
if which == 'custom':
return urljoin(…)
super(ChildEntity, self).__init__(which)
This will allow the extending method to accept a custom parameter
without accidentally raising a :class:`NoSuchPathError`.
:param which: A string. Optional. Valid arguments are 'self' and
'base'.
:return: A string. A fully qualified URL.
:raises nailgun.entity_mixins.NoSuchPathError: If no path can be built.
"""
# It is OK that member ``self._meta`` is not found. Subclasses are
# required to set that attribute if they wish to use this method.
#
# Beware of leading and trailing slashes:
#
# urljoin('example.com', 'foo') => 'foo'
# urljoin('example.com/', 'foo') => 'example.com/foo'
# urljoin('example.com', '/foo') => '/foo'
# urljoin('example.com/', '/foo') => '/foo'
#
base = urljoin(
self._server_config.url + '/',
self._meta['api_path'] # pylint:disable=no-member
)
if which == 'base' or (which is None and not hasattr(self, 'id')):
return base
elif (which == 'self' or which is None) and hasattr(self, 'id'):
return urljoin(base + '/', str(self.id)) # pylint:disable=E1101
raise NoSuchPathError | [
"def",
"path",
"(",
"self",
",",
"which",
"=",
"None",
")",
":",
"# It is OK that member ``self._meta`` is not found. Subclasses are",
"# required to set that attribute if they wish to use this method.",
"#",
"# Beware of leading and trailing slashes:",
"#",
"# urljoin('example.com', 'foo') => 'foo'",
"# urljoin('example.com/', 'foo') => 'example.com/foo'",
"# urljoin('example.com', '/foo') => '/foo'",
"# urljoin('example.com/', '/foo') => '/foo'",
"#",
"base",
"=",
"urljoin",
"(",
"self",
".",
"_server_config",
".",
"url",
"+",
"'/'",
",",
"self",
".",
"_meta",
"[",
"'api_path'",
"]",
"# pylint:disable=no-member",
")",
"if",
"which",
"==",
"'base'",
"or",
"(",
"which",
"is",
"None",
"and",
"not",
"hasattr",
"(",
"self",
",",
"'id'",
")",
")",
":",
"return",
"base",
"elif",
"(",
"which",
"==",
"'self'",
"or",
"which",
"is",
"None",
")",
"and",
"hasattr",
"(",
"self",
",",
"'id'",
")",
":",
"return",
"urljoin",
"(",
"base",
"+",
"'/'",
",",
"str",
"(",
"self",
".",
"id",
")",
")",
"# pylint:disable=E1101",
"raise",
"NoSuchPathError"
] | Return the path to the current entity.
Return the path to base entities of this entity's type if:
* ``which`` is ``'base'``, or
* ``which`` is ``None`` and instance attribute ``id`` is unset.
Return the path to this exact entity if instance attribute ``id`` is
set and:
* ``which`` is ``'self'``, or
* ``which`` is ``None``.
Raise :class:`NoSuchPathError` otherwise.
Child classes may choose to extend this method, especially if a child
entity offers more than the two URLs supported by default. If extended,
then the extending class should check for custom parameters before
calling ``super``::
def path(self, which):
if which == 'custom':
return urljoin(…)
super(ChildEntity, self).__init__(which)
This will allow the extending method to accept a custom parameter
without accidentally raising a :class:`NoSuchPathError`.
:param which: A string. Optional. Valid arguments are 'self' and
'base'.
:return: A string. A fully qualified URL.
:raises nailgun.entity_mixins.NoSuchPathError: If no path can be built. | [
"Return",
"the",
"path",
"to",
"the",
"current",
"entity",
"."
] | python | train |
petebachant/PXL | pxl/timeseries.py | https://github.com/petebachant/PXL/blob/d7d06cb74422e1ac0154741351fbecea080cfcc0/pxl/timeseries.py#L99-L119 | def build_plane_arrays(x, y, qlist):
"""Build a 2-D array out of data taken in the same plane, for contour
plotting.
"""
if type(qlist) is not list:
return_list = False
qlist = [qlist]
else:
return_list = True
xv = x[np.where(y==y[0])[0]]
yv = y[np.where(x==x[0])[0]]
qlistp = []
for n in range(len(qlist)):
qlistp.append(np.zeros((len(yv), len(xv))))
for j in range(len(qlist)):
for n in range(len(yv)):
i = np.where(y==yv[n])[0]
qlistp[j][n,:] = qlist[j][i]
if not return_list:
qlistp = qlistp[0]
return xv, yv, qlistp | [
"def",
"build_plane_arrays",
"(",
"x",
",",
"y",
",",
"qlist",
")",
":",
"if",
"type",
"(",
"qlist",
")",
"is",
"not",
"list",
":",
"return_list",
"=",
"False",
"qlist",
"=",
"[",
"qlist",
"]",
"else",
":",
"return_list",
"=",
"True",
"xv",
"=",
"x",
"[",
"np",
".",
"where",
"(",
"y",
"==",
"y",
"[",
"0",
"]",
")",
"[",
"0",
"]",
"]",
"yv",
"=",
"y",
"[",
"np",
".",
"where",
"(",
"x",
"==",
"x",
"[",
"0",
"]",
")",
"[",
"0",
"]",
"]",
"qlistp",
"=",
"[",
"]",
"for",
"n",
"in",
"range",
"(",
"len",
"(",
"qlist",
")",
")",
":",
"qlistp",
".",
"append",
"(",
"np",
".",
"zeros",
"(",
"(",
"len",
"(",
"yv",
")",
",",
"len",
"(",
"xv",
")",
")",
")",
")",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"qlist",
")",
")",
":",
"for",
"n",
"in",
"range",
"(",
"len",
"(",
"yv",
")",
")",
":",
"i",
"=",
"np",
".",
"where",
"(",
"y",
"==",
"yv",
"[",
"n",
"]",
")",
"[",
"0",
"]",
"qlistp",
"[",
"j",
"]",
"[",
"n",
",",
":",
"]",
"=",
"qlist",
"[",
"j",
"]",
"[",
"i",
"]",
"if",
"not",
"return_list",
":",
"qlistp",
"=",
"qlistp",
"[",
"0",
"]",
"return",
"xv",
",",
"yv",
",",
"qlistp"
] | Build a 2-D array out of data taken in the same plane, for contour
plotting. | [
"Build",
"a",
"2",
"-",
"D",
"array",
"out",
"of",
"data",
"taken",
"in",
"the",
"same",
"plane",
"for",
"contour",
"plotting",
"."
] | python | train |
scottjbarr/bitfinex | bitfinex/client.py | https://github.com/scottjbarr/bitfinex/blob/03f7c71615fe38c2e28be0ebb761d3106ef0a51a/bitfinex/client.py#L383-L398 | def ticker(self, symbol):
"""
GET /ticker/:symbol
curl https://api.bitfinex.com/v1/ticker/btcusd
{
'ask': '562.9999',
'timestamp': '1395552290.70933607',
'bid': '562.25',
'last_price': u'562.25',
'mid': u'562.62495'}
"""
data = self._get(self.url_for(PATH_TICKER, (symbol)))
# convert all values to floats
return self._convert_to_floats(data) | [
"def",
"ticker",
"(",
"self",
",",
"symbol",
")",
":",
"data",
"=",
"self",
".",
"_get",
"(",
"self",
".",
"url_for",
"(",
"PATH_TICKER",
",",
"(",
"symbol",
")",
")",
")",
"# convert all values to floats",
"return",
"self",
".",
"_convert_to_floats",
"(",
"data",
")"
] | GET /ticker/:symbol
curl https://api.bitfinex.com/v1/ticker/btcusd
{
'ask': '562.9999',
'timestamp': '1395552290.70933607',
'bid': '562.25',
'last_price': u'562.25',
'mid': u'562.62495'} | [
"GET",
"/",
"ticker",
"/",
":",
"symbol"
] | python | train |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/model.py | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/model.py#L803-L817 | def to_dict(self):
"""Convert state to dictionary to save in task payload."""
result = {"mapreduce_spec": self.mapreduce_spec.to_json_str(),
"shard_id": self.shard_id,
"slice_id": str(self.slice_id),
"input_reader_state": self.input_reader.to_json_str(),
"initial_input_reader_state":
self.initial_input_reader.to_json_str(),
"retries": str(self.retries)}
if self.output_writer:
result["output_writer_state"] = self.output_writer.to_json_str()
serialized_handler = util.try_serialize_handler(self.handler)
if serialized_handler:
result["serialized_handler"] = serialized_handler
return result | [
"def",
"to_dict",
"(",
"self",
")",
":",
"result",
"=",
"{",
"\"mapreduce_spec\"",
":",
"self",
".",
"mapreduce_spec",
".",
"to_json_str",
"(",
")",
",",
"\"shard_id\"",
":",
"self",
".",
"shard_id",
",",
"\"slice_id\"",
":",
"str",
"(",
"self",
".",
"slice_id",
")",
",",
"\"input_reader_state\"",
":",
"self",
".",
"input_reader",
".",
"to_json_str",
"(",
")",
",",
"\"initial_input_reader_state\"",
":",
"self",
".",
"initial_input_reader",
".",
"to_json_str",
"(",
")",
",",
"\"retries\"",
":",
"str",
"(",
"self",
".",
"retries",
")",
"}",
"if",
"self",
".",
"output_writer",
":",
"result",
"[",
"\"output_writer_state\"",
"]",
"=",
"self",
".",
"output_writer",
".",
"to_json_str",
"(",
")",
"serialized_handler",
"=",
"util",
".",
"try_serialize_handler",
"(",
"self",
".",
"handler",
")",
"if",
"serialized_handler",
":",
"result",
"[",
"\"serialized_handler\"",
"]",
"=",
"serialized_handler",
"return",
"result"
] | Convert state to dictionary to save in task payload. | [
"Convert",
"state",
"to",
"dictionary",
"to",
"save",
"in",
"task",
"payload",
"."
] | python | train |
federico123579/Trading212-API | tradingAPI/low_level.py | https://github.com/federico123579/Trading212-API/blob/0fab20b71a2348e72bbe76071b81f3692128851f/tradingAPI/low_level.py#L526-L531 | def new_pos(self, html_div):
"""factory method pattern"""
pos = self.Position(self, html_div)
pos.bind_mov()
self.positions.append(pos)
return pos | [
"def",
"new_pos",
"(",
"self",
",",
"html_div",
")",
":",
"pos",
"=",
"self",
".",
"Position",
"(",
"self",
",",
"html_div",
")",
"pos",
".",
"bind_mov",
"(",
")",
"self",
".",
"positions",
".",
"append",
"(",
"pos",
")",
"return",
"pos"
] | factory method pattern | [
"factory",
"method",
"pattern"
] | python | train |
konstantint/PassportEye | passporteye/mrz/text.py | https://github.com/konstantint/PassportEye/blob/b32afba0f5dc4eb600c4edc4f49e5d49959c5415/passporteye/mrz/text.py#L183-L227 | def to_dict(self):
"""Converts this object to an (ordered) dictionary of field-value pairs.
>>> m = MRZ(['IDAUT10000999<6<<<<<<<<<<<<<<<', '7109094F1112315AUT<<<<<<<<<<<6', 'MUSTERFRAU<<ISOLDE<<<<<<<<<<<<']).to_dict()
>>> assert m['type'] == 'ID' and m['country'] == 'AUT' and m['number'] == '10000999<'
>>> assert m['valid_number'] and m['valid_date_of_birth'] and m['valid_expiration_date'] and not m['valid_composite']
"""
result = OrderedDict()
result['mrz_type'] = self.mrz_type
result['valid_score'] = self.valid_score
if self.mrz_type is not None:
result['type'] = self.type
result['country'] = self.country
result['number'] = self.number
result['date_of_birth'] = self.date_of_birth
result['expiration_date'] = self.expiration_date
result['nationality'] = self.nationality
result['sex'] = self.sex
result['names'] = self.names
result['surname'] = self.surname
if self.mrz_type == 'TD1':
result['optional1'] = self.optional1
result['optional2'] = self.optional2
elif self.mrz_type in ['TD2', 'MRVA', 'MRVB']:
result['optional1'] = self.optional1
else:
result['personal_number'] = self.personal_number
result['check_number'] = self.check_number
result['check_date_of_birth'] = self.check_date_of_birth
result['check_expiration_date'] = self.check_expiration_date
if self.mrz_type not in ['MRVA', 'MRVB']:
result['check_composite'] = self.check_composite
if self.mrz_type == 'TD3':
result['check_personal_number'] = self.check_personal_number
result['valid_number'] = self.valid_check_digits[0]
result['valid_date_of_birth'] = self.valid_check_digits[1]
result['valid_expiration_date'] = self.valid_check_digits[2]
if self.mrz_type not in ['MRVA', 'MRVB']:
result['valid_composite'] = self.valid_check_digits[3]
if self.mrz_type == 'TD3':
result['valid_personal_number'] = self.valid_check_digits[4]
if 'method' in self.aux:
result['method'] = self.aux['method']
return result | [
"def",
"to_dict",
"(",
"self",
")",
":",
"result",
"=",
"OrderedDict",
"(",
")",
"result",
"[",
"'mrz_type'",
"]",
"=",
"self",
".",
"mrz_type",
"result",
"[",
"'valid_score'",
"]",
"=",
"self",
".",
"valid_score",
"if",
"self",
".",
"mrz_type",
"is",
"not",
"None",
":",
"result",
"[",
"'type'",
"]",
"=",
"self",
".",
"type",
"result",
"[",
"'country'",
"]",
"=",
"self",
".",
"country",
"result",
"[",
"'number'",
"]",
"=",
"self",
".",
"number",
"result",
"[",
"'date_of_birth'",
"]",
"=",
"self",
".",
"date_of_birth",
"result",
"[",
"'expiration_date'",
"]",
"=",
"self",
".",
"expiration_date",
"result",
"[",
"'nationality'",
"]",
"=",
"self",
".",
"nationality",
"result",
"[",
"'sex'",
"]",
"=",
"self",
".",
"sex",
"result",
"[",
"'names'",
"]",
"=",
"self",
".",
"names",
"result",
"[",
"'surname'",
"]",
"=",
"self",
".",
"surname",
"if",
"self",
".",
"mrz_type",
"==",
"'TD1'",
":",
"result",
"[",
"'optional1'",
"]",
"=",
"self",
".",
"optional1",
"result",
"[",
"'optional2'",
"]",
"=",
"self",
".",
"optional2",
"elif",
"self",
".",
"mrz_type",
"in",
"[",
"'TD2'",
",",
"'MRVA'",
",",
"'MRVB'",
"]",
":",
"result",
"[",
"'optional1'",
"]",
"=",
"self",
".",
"optional1",
"else",
":",
"result",
"[",
"'personal_number'",
"]",
"=",
"self",
".",
"personal_number",
"result",
"[",
"'check_number'",
"]",
"=",
"self",
".",
"check_number",
"result",
"[",
"'check_date_of_birth'",
"]",
"=",
"self",
".",
"check_date_of_birth",
"result",
"[",
"'check_expiration_date'",
"]",
"=",
"self",
".",
"check_expiration_date",
"if",
"self",
".",
"mrz_type",
"not",
"in",
"[",
"'MRVA'",
",",
"'MRVB'",
"]",
":",
"result",
"[",
"'check_composite'",
"]",
"=",
"self",
".",
"check_composite",
"if",
"self",
".",
"mrz_type",
"==",
"'TD3'",
":",
"result",
"[",
"'check_personal_number'",
"]",
"=",
"self",
".",
"check_personal_number",
"result",
"[",
"'valid_number'",
"]",
"=",
"self",
".",
"valid_check_digits",
"[",
"0",
"]",
"result",
"[",
"'valid_date_of_birth'",
"]",
"=",
"self",
".",
"valid_check_digits",
"[",
"1",
"]",
"result",
"[",
"'valid_expiration_date'",
"]",
"=",
"self",
".",
"valid_check_digits",
"[",
"2",
"]",
"if",
"self",
".",
"mrz_type",
"not",
"in",
"[",
"'MRVA'",
",",
"'MRVB'",
"]",
":",
"result",
"[",
"'valid_composite'",
"]",
"=",
"self",
".",
"valid_check_digits",
"[",
"3",
"]",
"if",
"self",
".",
"mrz_type",
"==",
"'TD3'",
":",
"result",
"[",
"'valid_personal_number'",
"]",
"=",
"self",
".",
"valid_check_digits",
"[",
"4",
"]",
"if",
"'method'",
"in",
"self",
".",
"aux",
":",
"result",
"[",
"'method'",
"]",
"=",
"self",
".",
"aux",
"[",
"'method'",
"]",
"return",
"result"
] | Converts this object to an (ordered) dictionary of field-value pairs.
>>> m = MRZ(['IDAUT10000999<6<<<<<<<<<<<<<<<', '7109094F1112315AUT<<<<<<<<<<<6', 'MUSTERFRAU<<ISOLDE<<<<<<<<<<<<']).to_dict()
>>> assert m['type'] == 'ID' and m['country'] == 'AUT' and m['number'] == '10000999<'
>>> assert m['valid_number'] and m['valid_date_of_birth'] and m['valid_expiration_date'] and not m['valid_composite'] | [
"Converts",
"this",
"object",
"to",
"an",
"(",
"ordered",
")",
"dictionary",
"of",
"field",
"-",
"value",
"pairs",
"."
] | python | train |
wasp/waspy | waspy/transports/rabbit_patches.py | https://github.com/wasp/waspy/blob/31cc352f300a089f9607d7f13d93591d4c69d5ec/waspy/transports/rabbit_patches.py#L99-L122 | async def _write_frame_awaiting_response(self, waiter_id, frame, request,
no_wait, check_open=True, drain=True):
'''Write a frame and set a waiter for
the response (unless no_wait is set)'''
if no_wait:
await self._write_frame(frame, request, check_open=check_open,
drain=drain)
return None
f = self._set_waiter(waiter_id)
try:
await self._write_frame(frame, request, check_open=check_open,
drain=drain)
except Exception:
self._get_waiter(waiter_id)
f.cancel()
raise
result = await f
try:
self._get_waiter(waiter_id)
except aioamqp.SynchronizationError:
# no waiter to get
pass
return result | [
"async",
"def",
"_write_frame_awaiting_response",
"(",
"self",
",",
"waiter_id",
",",
"frame",
",",
"request",
",",
"no_wait",
",",
"check_open",
"=",
"True",
",",
"drain",
"=",
"True",
")",
":",
"if",
"no_wait",
":",
"await",
"self",
".",
"_write_frame",
"(",
"frame",
",",
"request",
",",
"check_open",
"=",
"check_open",
",",
"drain",
"=",
"drain",
")",
"return",
"None",
"f",
"=",
"self",
".",
"_set_waiter",
"(",
"waiter_id",
")",
"try",
":",
"await",
"self",
".",
"_write_frame",
"(",
"frame",
",",
"request",
",",
"check_open",
"=",
"check_open",
",",
"drain",
"=",
"drain",
")",
"except",
"Exception",
":",
"self",
".",
"_get_waiter",
"(",
"waiter_id",
")",
"f",
".",
"cancel",
"(",
")",
"raise",
"result",
"=",
"await",
"f",
"try",
":",
"self",
".",
"_get_waiter",
"(",
"waiter_id",
")",
"except",
"aioamqp",
".",
"SynchronizationError",
":",
"# no waiter to get",
"pass",
"return",
"result"
] | Write a frame and set a waiter for
the response (unless no_wait is set) | [
"Write",
"a",
"frame",
"and",
"set",
"a",
"waiter",
"for",
"the",
"response",
"(",
"unless",
"no_wait",
"is",
"set",
")"
] | python | train |
frostming/marko | marko/parser.py | https://github.com/frostming/marko/blob/1cd030b665fa37bad1f8b3a25a89ce1a7c491dde/marko/parser.py#L65-L90 | def parse(self, source_or_text):
"""Do the actual parsing and returns an AST or parsed element.
:param source_or_text: the text or source object.
Based on the type, it will do following:
- text: returns the parsed Document element.
- source: parse the source and returns the parsed children as a list.
"""
if isinstance(source_or_text, string_types):
block.parser = self
inline.parser = self
return self.block_elements['Document'](source_or_text)
element_list = self._build_block_element_list()
ast = []
while not source_or_text.exhausted:
for ele_type in element_list:
if ele_type.match(source_or_text):
result = ele_type.parse(source_or_text)
if not hasattr(result, 'priority'):
result = ele_type(result)
ast.append(result)
break
else:
# Quit the current parsing and go back to the last level.
break
return ast | [
"def",
"parse",
"(",
"self",
",",
"source_or_text",
")",
":",
"if",
"isinstance",
"(",
"source_or_text",
",",
"string_types",
")",
":",
"block",
".",
"parser",
"=",
"self",
"inline",
".",
"parser",
"=",
"self",
"return",
"self",
".",
"block_elements",
"[",
"'Document'",
"]",
"(",
"source_or_text",
")",
"element_list",
"=",
"self",
".",
"_build_block_element_list",
"(",
")",
"ast",
"=",
"[",
"]",
"while",
"not",
"source_or_text",
".",
"exhausted",
":",
"for",
"ele_type",
"in",
"element_list",
":",
"if",
"ele_type",
".",
"match",
"(",
"source_or_text",
")",
":",
"result",
"=",
"ele_type",
".",
"parse",
"(",
"source_or_text",
")",
"if",
"not",
"hasattr",
"(",
"result",
",",
"'priority'",
")",
":",
"result",
"=",
"ele_type",
"(",
"result",
")",
"ast",
".",
"append",
"(",
"result",
")",
"break",
"else",
":",
"# Quit the current parsing and go back to the last level.",
"break",
"return",
"ast"
] | Do the actual parsing and returns an AST or parsed element.
:param source_or_text: the text or source object.
Based on the type, it will do following:
- text: returns the parsed Document element.
- source: parse the source and returns the parsed children as a list. | [
"Do",
"the",
"actual",
"parsing",
"and",
"returns",
"an",
"AST",
"or",
"parsed",
"element",
"."
] | python | train |
AdvancedClimateSystems/uModbus | umodbus/server/serial/rtu.py | https://github.com/AdvancedClimateSystems/uModbus/blob/0560a42308003f4072d988f28042b8d55b694ad4/umodbus/server/serial/rtu.py#L69-L77 | def create_response_adu(self, meta_data, response_pdu):
""" Build response ADU from meta data and response PDU and return it.
:param meta_data: A dict with meta data.
:param request_pdu: A bytearray containing request PDU.
:return: A bytearray containing request ADU.
"""
first_part_adu = struct.pack('>B', meta_data['unit_id']) + response_pdu
return first_part_adu + get_crc(first_part_adu) | [
"def",
"create_response_adu",
"(",
"self",
",",
"meta_data",
",",
"response_pdu",
")",
":",
"first_part_adu",
"=",
"struct",
".",
"pack",
"(",
"'>B'",
",",
"meta_data",
"[",
"'unit_id'",
"]",
")",
"+",
"response_pdu",
"return",
"first_part_adu",
"+",
"get_crc",
"(",
"first_part_adu",
")"
] | Build response ADU from meta data and response PDU and return it.
:param meta_data: A dict with meta data.
:param request_pdu: A bytearray containing request PDU.
:return: A bytearray containing request ADU. | [
"Build",
"response",
"ADU",
"from",
"meta",
"data",
"and",
"response",
"PDU",
"and",
"return",
"it",
"."
] | python | train |
johnnoone/aioconsul | aioconsul/client/acl_endpoint.py | https://github.com/johnnoone/aioconsul/blob/02f7a529d7dc2e49bed942111067aa5faf320e90/aioconsul/client/acl_endpoint.py#L15-L59 | async def create(self, token):
"""Creates a new token with a given policy
Parameters:
token (Object): Token specification
Returns:
Object: token ID
The create endpoint is used to make a new token.
A token has a name, a type, and a set of ACL rules.
The request body may take the form::
{
"Name": "my-app-token",
"Type": "client",
"Rules": ""
}
None of the fields are mandatory. The **Name** and **Rules** fields
default to being blank, and the **Type** defaults to "client".
**Name** is opaque to Consul. To aid human operators, it should
be a meaningful indicator of the ACL's purpose.
**Type** is either **client** or **management**. A management token
is comparable to a root user and has the ability to perform any action
including creating, modifying and deleting ACLs.
**ID** field may be provided, and if omitted a random UUID will be
generated.
The format of **Rules** is
`documented here <https://www.consul.io/docs/internals/acl.html>`_.
A successful response body will return the **ID** of the newly
created ACL, like so::
{
"ID": "adf4238a-882b-9ddc-4a9d-5b6758e4159e"
}
"""
token = encode_token(token)
response = await self._api.put("/v1/acl/create", data=token)
return response.body | [
"async",
"def",
"create",
"(",
"self",
",",
"token",
")",
":",
"token",
"=",
"encode_token",
"(",
"token",
")",
"response",
"=",
"await",
"self",
".",
"_api",
".",
"put",
"(",
"\"/v1/acl/create\"",
",",
"data",
"=",
"token",
")",
"return",
"response",
".",
"body"
] | Creates a new token with a given policy
Parameters:
token (Object): Token specification
Returns:
Object: token ID
The create endpoint is used to make a new token.
A token has a name, a type, and a set of ACL rules.
The request body may take the form::
{
"Name": "my-app-token",
"Type": "client",
"Rules": ""
}
None of the fields are mandatory. The **Name** and **Rules** fields
default to being blank, and the **Type** defaults to "client".
**Name** is opaque to Consul. To aid human operators, it should
be a meaningful indicator of the ACL's purpose.
**Type** is either **client** or **management**. A management token
is comparable to a root user and has the ability to perform any action
including creating, modifying and deleting ACLs.
**ID** field may be provided, and if omitted a random UUID will be
generated.
The format of **Rules** is
`documented here <https://www.consul.io/docs/internals/acl.html>`_.
A successful response body will return the **ID** of the newly
created ACL, like so::
{
"ID": "adf4238a-882b-9ddc-4a9d-5b6758e4159e"
} | [
"Creates",
"a",
"new",
"token",
"with",
"a",
"given",
"policy"
] | python | train |
ekmmetering/ekmmeters | ekmmeters.py | https://github.com/ekmmetering/ekmmeters/blob/b3748bdf30263bfa46ea40157bdf8df2522e1904/ekmmeters.py#L1875-L1912 | def splitEkmDate(dateint):
"""Break out a date from Omnimeter read.
Note a corrupt date will raise an exception when you
convert it to int to hand to this method.
Args:
dateint (int): Omnimeter datetime as int.
Returns:
tuple: Named tuple which breaks out as followws:
========== =====================
yy Last 2 digits of year
mm Month 1-12
dd Day 1-31
weekday Zero based weekday
hh Hour 0-23
minutes Minutes 0-59
ss Seconds 0-59
========== =====================
"""
date_str = str(dateint)
dt = namedtuple('EkmDate', ['yy', 'mm', 'dd', 'weekday', 'hh', 'minutes', 'ss'])
if len(date_str) != 14:
dt.yy = dt.mm = dt.dd = dt.weekday = dt.hh = dt.minutes = dt.ss = 0
return dt
dt.yy = int(date_str[0:2])
dt.mm = int(date_str[2:4])
dt.dd = int(date_str[4:6])
dt.weekday = int(date_str[6:8])
dt.hh = int(date_str[8:10])
dt.minutes = int(date_str[10:12])
dt.ss = int(date_str[12:14])
return dt | [
"def",
"splitEkmDate",
"(",
"dateint",
")",
":",
"date_str",
"=",
"str",
"(",
"dateint",
")",
"dt",
"=",
"namedtuple",
"(",
"'EkmDate'",
",",
"[",
"'yy'",
",",
"'mm'",
",",
"'dd'",
",",
"'weekday'",
",",
"'hh'",
",",
"'minutes'",
",",
"'ss'",
"]",
")",
"if",
"len",
"(",
"date_str",
")",
"!=",
"14",
":",
"dt",
".",
"yy",
"=",
"dt",
".",
"mm",
"=",
"dt",
".",
"dd",
"=",
"dt",
".",
"weekday",
"=",
"dt",
".",
"hh",
"=",
"dt",
".",
"minutes",
"=",
"dt",
".",
"ss",
"=",
"0",
"return",
"dt",
"dt",
".",
"yy",
"=",
"int",
"(",
"date_str",
"[",
"0",
":",
"2",
"]",
")",
"dt",
".",
"mm",
"=",
"int",
"(",
"date_str",
"[",
"2",
":",
"4",
"]",
")",
"dt",
".",
"dd",
"=",
"int",
"(",
"date_str",
"[",
"4",
":",
"6",
"]",
")",
"dt",
".",
"weekday",
"=",
"int",
"(",
"date_str",
"[",
"6",
":",
"8",
"]",
")",
"dt",
".",
"hh",
"=",
"int",
"(",
"date_str",
"[",
"8",
":",
"10",
"]",
")",
"dt",
".",
"minutes",
"=",
"int",
"(",
"date_str",
"[",
"10",
":",
"12",
"]",
")",
"dt",
".",
"ss",
"=",
"int",
"(",
"date_str",
"[",
"12",
":",
"14",
"]",
")",
"return",
"dt"
] | Break out a date from Omnimeter read.
Note a corrupt date will raise an exception when you
convert it to int to hand to this method.
Args:
dateint (int): Omnimeter datetime as int.
Returns:
tuple: Named tuple which breaks out as followws:
========== =====================
yy Last 2 digits of year
mm Month 1-12
dd Day 1-31
weekday Zero based weekday
hh Hour 0-23
minutes Minutes 0-59
ss Seconds 0-59
========== ===================== | [
"Break",
"out",
"a",
"date",
"from",
"Omnimeter",
"read",
"."
] | python | test |
alan-turing-institute/topic-modelling-tools | topicmodels/LDA/gibbs.py | https://github.com/alan-turing-institute/topic-modelling-tools/blob/f0cf90cdd06f1072e824b446f201c7469b9de5df/topicmodels/LDA/gibbs.py#L153-L167 | def tt_comp(self, sampled_topics):
"""
Compute term-topic matrix from sampled_topics.
"""
samples = sampled_topics.shape[0]
tt = np.zeros((self.V, self.K, samples))
for s in range(samples):
tt[:, :, s] = \
samplers_lda.tt_comp(self.tokens, sampled_topics[s, :],
self.N, self.V, self.K, self.beta)
return tt | [
"def",
"tt_comp",
"(",
"self",
",",
"sampled_topics",
")",
":",
"samples",
"=",
"sampled_topics",
".",
"shape",
"[",
"0",
"]",
"tt",
"=",
"np",
".",
"zeros",
"(",
"(",
"self",
".",
"V",
",",
"self",
".",
"K",
",",
"samples",
")",
")",
"for",
"s",
"in",
"range",
"(",
"samples",
")",
":",
"tt",
"[",
":",
",",
":",
",",
"s",
"]",
"=",
"samplers_lda",
".",
"tt_comp",
"(",
"self",
".",
"tokens",
",",
"sampled_topics",
"[",
"s",
",",
":",
"]",
",",
"self",
".",
"N",
",",
"self",
".",
"V",
",",
"self",
".",
"K",
",",
"self",
".",
"beta",
")",
"return",
"tt"
] | Compute term-topic matrix from sampled_topics. | [
"Compute",
"term",
"-",
"topic",
"matrix",
"from",
"sampled_topics",
"."
] | python | train |
titusjan/argos | argos/inspector/pgplugins/imageplot2d.py | https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/inspector/pgplugins/imageplot2d.py#L244-L249 | def setVerCrossPlotAutoRangeOn(self, axisNumber):
""" Sets the vertical cross-hair plot's auto-range on for the axis with number axisNumber.
:param axisNumber: 0 (X-axis), 1 (Y-axis), 2, (Both X and Y axes).
"""
setXYAxesAutoRangeOn(self, self.verCrossPlotRangeCti, self.yAxisRangeCti, axisNumber) | [
"def",
"setVerCrossPlotAutoRangeOn",
"(",
"self",
",",
"axisNumber",
")",
":",
"setXYAxesAutoRangeOn",
"(",
"self",
",",
"self",
".",
"verCrossPlotRangeCti",
",",
"self",
".",
"yAxisRangeCti",
",",
"axisNumber",
")"
] | Sets the vertical cross-hair plot's auto-range on for the axis with number axisNumber.
:param axisNumber: 0 (X-axis), 1 (Y-axis), 2, (Both X and Y axes). | [
"Sets",
"the",
"vertical",
"cross",
"-",
"hair",
"plot",
"s",
"auto",
"-",
"range",
"on",
"for",
"the",
"axis",
"with",
"number",
"axisNumber",
"."
] | python | train |
Erotemic/utool | utool/util_dict.py | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_dict.py#L571-L614 | def all_dict_combinations(varied_dict):
"""
all_dict_combinations
Args:
varied_dict (dict): a dict with lists of possible parameter settings
Returns:
list: dict_list a list of dicts correpsonding to all combinations of params settings
CommandLine:
python -m utool.util_dict --test-all_dict_combinations
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> varied_dict = {'logdist_weight': [0.0, 1.0], 'pipeline_root': ['vsmany'], 'sv_on': [True, False, None]}
>>> dict_list = all_dict_combinations(varied_dict)
>>> result = str(ut.repr4(dict_list))
>>> print(result)
[
{'logdist_weight': 0.0, 'pipeline_root': 'vsmany', 'sv_on': True},
{'logdist_weight': 0.0, 'pipeline_root': 'vsmany', 'sv_on': False},
{'logdist_weight': 0.0, 'pipeline_root': 'vsmany', 'sv_on': None},
{'logdist_weight': 1.0, 'pipeline_root': 'vsmany', 'sv_on': True},
{'logdist_weight': 1.0, 'pipeline_root': 'vsmany', 'sv_on': False},
{'logdist_weight': 1.0, 'pipeline_root': 'vsmany', 'sv_on': None},
]
"""
#tups_list = [[(key, val) for val in val_list]
# if isinstance(val_list, (list, tuple))
# else [(key, val_list)]
# for (key, val_list) in six.iteritems(varied_dict)]
tups_list = [[(key, val) for val in val_list]
if isinstance(val_list, (list))
#if isinstance(val_list, (list, tuple))
else [(key, val_list)]
for (key, val_list) in iteritems_sorted(varied_dict)]
dict_list = [dict(tups) for tups in it.product(*tups_list)]
#dict_list = [{key: val for (key, val) in tups} for tups in it.product(*tups_list)]
#from collections import OrderedDict
#dict_list = [OrderedDict([(key, val) for (key, val) in tups]) for tups in it.product(*tups_list)]
return dict_list | [
"def",
"all_dict_combinations",
"(",
"varied_dict",
")",
":",
"#tups_list = [[(key, val) for val in val_list]",
"# if isinstance(val_list, (list, tuple))",
"# else [(key, val_list)]",
"# for (key, val_list) in six.iteritems(varied_dict)]",
"tups_list",
"=",
"[",
"[",
"(",
"key",
",",
"val",
")",
"for",
"val",
"in",
"val_list",
"]",
"if",
"isinstance",
"(",
"val_list",
",",
"(",
"list",
")",
")",
"#if isinstance(val_list, (list, tuple))",
"else",
"[",
"(",
"key",
",",
"val_list",
")",
"]",
"for",
"(",
"key",
",",
"val_list",
")",
"in",
"iteritems_sorted",
"(",
"varied_dict",
")",
"]",
"dict_list",
"=",
"[",
"dict",
"(",
"tups",
")",
"for",
"tups",
"in",
"it",
".",
"product",
"(",
"*",
"tups_list",
")",
"]",
"#dict_list = [{key: val for (key, val) in tups} for tups in it.product(*tups_list)]",
"#from collections import OrderedDict",
"#dict_list = [OrderedDict([(key, val) for (key, val) in tups]) for tups in it.product(*tups_list)]",
"return",
"dict_list"
] | all_dict_combinations
Args:
varied_dict (dict): a dict with lists of possible parameter settings
Returns:
list: dict_list a list of dicts correpsonding to all combinations of params settings
CommandLine:
python -m utool.util_dict --test-all_dict_combinations
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> varied_dict = {'logdist_weight': [0.0, 1.0], 'pipeline_root': ['vsmany'], 'sv_on': [True, False, None]}
>>> dict_list = all_dict_combinations(varied_dict)
>>> result = str(ut.repr4(dict_list))
>>> print(result)
[
{'logdist_weight': 0.0, 'pipeline_root': 'vsmany', 'sv_on': True},
{'logdist_weight': 0.0, 'pipeline_root': 'vsmany', 'sv_on': False},
{'logdist_weight': 0.0, 'pipeline_root': 'vsmany', 'sv_on': None},
{'logdist_weight': 1.0, 'pipeline_root': 'vsmany', 'sv_on': True},
{'logdist_weight': 1.0, 'pipeline_root': 'vsmany', 'sv_on': False},
{'logdist_weight': 1.0, 'pipeline_root': 'vsmany', 'sv_on': None},
] | [
"all_dict_combinations"
] | python | train |
fudge-py/fudge | fudge/__init__.py | https://github.com/fudge-py/fudge/blob/b283fbc1a41900f3f5845b10b8c2ef9136a67ebc/fudge/__init__.py#L777-L805 | def expects_call(self):
"""The fake must be called.
.. doctest::
:hide:
>>> import fudge
>>> fudge.clear_expectations()
>>> fudge.clear_calls()
This is useful for when you stub out a function
as opposed to a class. For example::
>>> import fudge
>>> remove = fudge.Fake('os.remove').expects_call()
>>> fudge.verify()
Traceback (most recent call last):
...
AssertionError: fake:os.remove() was not called
.. doctest::
:hide:
>>> fudge.clear_expectations()
"""
self._callable = ExpectedCall(self, call_name=self._name,
callable=True)
return self | [
"def",
"expects_call",
"(",
"self",
")",
":",
"self",
".",
"_callable",
"=",
"ExpectedCall",
"(",
"self",
",",
"call_name",
"=",
"self",
".",
"_name",
",",
"callable",
"=",
"True",
")",
"return",
"self"
] | The fake must be called.
.. doctest::
:hide:
>>> import fudge
>>> fudge.clear_expectations()
>>> fudge.clear_calls()
This is useful for when you stub out a function
as opposed to a class. For example::
>>> import fudge
>>> remove = fudge.Fake('os.remove').expects_call()
>>> fudge.verify()
Traceback (most recent call last):
...
AssertionError: fake:os.remove() was not called
.. doctest::
:hide:
>>> fudge.clear_expectations() | [
"The",
"fake",
"must",
"be",
"called",
"."
] | python | train |
bocong/urbandictionary-py | urbandictionary.py | https://github.com/bocong/urbandictionary-py/blob/aa919cd2c28563ca7f09ff80a3125de2dc7576ba/urbandictionary.py#L63-L70 | def defineID(defid):
"""Search for UD's definition ID and return list of UrbanDefinition objects.
Keyword arguments:
defid -- definition ID to search for (int or str)
"""
json = _get_urban_json(UD_DEFID_URL + urlquote(str(defid)))
return _parse_urban_json(json) | [
"def",
"defineID",
"(",
"defid",
")",
":",
"json",
"=",
"_get_urban_json",
"(",
"UD_DEFID_URL",
"+",
"urlquote",
"(",
"str",
"(",
"defid",
")",
")",
")",
"return",
"_parse_urban_json",
"(",
"json",
")"
] | Search for UD's definition ID and return list of UrbanDefinition objects.
Keyword arguments:
defid -- definition ID to search for (int or str) | [
"Search",
"for",
"UD",
"s",
"definition",
"ID",
"and",
"return",
"list",
"of",
"UrbanDefinition",
"objects",
"."
] | python | train |
jlesquembre/termite | termite/utils.py | https://github.com/jlesquembre/termite/blob/fb77dcaa31872dc14dd3eeac694cd4c44aeee27b/termite/utils.py#L31-L58 | def copy_rec(source, dest):
"""Copy files between diferent directories.
Copy one or more files to an existing directory. This function is
recursive, if the source is a directory, all its subdirectories are created
in the destination. Existing files in destination are overwrited without
any warning.
Args:
source (str): File or directory name.
dest (str): Directory name.
Raises:
FileNotFoundError: Destination directory doesn't exist.
"""
if os.path.isdir(source):
for child in os.listdir(source):
new_dest = os.path.join(dest, child)
os.makedirs(new_dest, exist_ok=True)
copy_rec(os.path.join(source, child), new_dest)
elif os.path.isfile(source):
logging.info(' Copy "{}" to "{}"'.format(source, dest))
shutil.copy(source, dest)
else:
logging.info(' Ignoring "{}"'.format(source)) | [
"def",
"copy_rec",
"(",
"source",
",",
"dest",
")",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"source",
")",
":",
"for",
"child",
"in",
"os",
".",
"listdir",
"(",
"source",
")",
":",
"new_dest",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dest",
",",
"child",
")",
"os",
".",
"makedirs",
"(",
"new_dest",
",",
"exist_ok",
"=",
"True",
")",
"copy_rec",
"(",
"os",
".",
"path",
".",
"join",
"(",
"source",
",",
"child",
")",
",",
"new_dest",
")",
"elif",
"os",
".",
"path",
".",
"isfile",
"(",
"source",
")",
":",
"logging",
".",
"info",
"(",
"' Copy \"{}\" to \"{}\"'",
".",
"format",
"(",
"source",
",",
"dest",
")",
")",
"shutil",
".",
"copy",
"(",
"source",
",",
"dest",
")",
"else",
":",
"logging",
".",
"info",
"(",
"' Ignoring \"{}\"'",
".",
"format",
"(",
"source",
")",
")"
] | Copy files between diferent directories.
Copy one or more files to an existing directory. This function is
recursive, if the source is a directory, all its subdirectories are created
in the destination. Existing files in destination are overwrited without
any warning.
Args:
source (str): File or directory name.
dest (str): Directory name.
Raises:
FileNotFoundError: Destination directory doesn't exist. | [
"Copy",
"files",
"between",
"diferent",
"directories",
"."
] | python | train |
inspirehep/inspire-dojson | inspire_dojson/hep/rules/bd9xx.py | https://github.com/inspirehep/inspire-dojson/blob/17f3789cd3d5ae58efa1190dc0eea9efb9c8ca59/inspire_dojson/hep/rules/bd9xx.py#L311-L366 | def references2marc(self, key, value):
"""Populate the ``999C5`` MARC field."""
reference = value.get('reference', {})
pids = force_list(reference.get('persistent_identifiers'))
a_values = ['doi:' + el for el in force_list(reference.get('dois'))]
a_values.extend(['hdl:' + el['value'] for el in pids if el.get('schema') == 'HDL'])
a_values.extend(['urn:' + el['value'] for el in pids if el.get('schema') == 'URN'])
external_ids = force_list(reference.get('external_system_identifiers'))
u_values = force_list(get_value(reference, 'urls.value'))
u_values.extend(CDS_RECORD_FORMAT.format(el['value']) for el in external_ids if el.get('schema') == 'CDS')
u_values.extend(ADS_RECORD_FORMAT.format(el['value']) for el in external_ids if el.get('schema') == 'ADS')
authors = force_list(reference.get('authors'))
e_values = [el['full_name'] for el in authors if el.get('inspire_role') == 'editor']
h_values = [el['full_name'] for el in authors if el.get('inspire_role') != 'editor']
r_values = force_list(reference.get('report_numbers'))
if reference.get('arxiv_eprint'):
arxiv_eprint = reference['arxiv_eprint']
r_values.append('arXiv:' + arxiv_eprint if is_arxiv_post_2007(arxiv_eprint) else arxiv_eprint)
if reference.get('publication_info'):
reference['publication_info'] = convert_new_publication_info_to_old([reference['publication_info']])[0]
journal_title = get_value(reference, 'publication_info.journal_title')
journal_volume = get_value(reference, 'publication_info.journal_volume')
page_start = get_value(reference, 'publication_info.page_start')
page_end = get_value(reference, 'publication_info.page_end')
artid = get_value(reference, 'publication_info.artid')
s_value = build_pubnote(journal_title, journal_volume, page_start, page_end, artid)
m_value = ' / '.join(force_list(reference.get('misc')))
return {
'0': get_recid_from_ref(value.get('record')),
'9': 'CURATOR' if value.get('legacy_curated') else None,
'a': a_values,
'b': get_value(reference, 'publication_info.cnum'),
'c': reference.get('collaborations'),
'e': e_values,
'h': h_values,
'i': reference.get('isbn'),
'k': reference.get('texkey'),
'm': m_value,
'o': reference.get('label'),
'p': get_value(reference, 'imprint.publisher'),
'q': get_value(reference, 'publication_info.parent_title'),
'r': r_values,
's': s_value,
't': get_value(reference, 'title.title'),
'u': u_values,
'x': get_value(value, 'raw_refs.value'),
'y': get_value(reference, 'publication_info.year'),
'z': 1 if value.get('curated_relation') else 0,
} | [
"def",
"references2marc",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"reference",
"=",
"value",
".",
"get",
"(",
"'reference'",
",",
"{",
"}",
")",
"pids",
"=",
"force_list",
"(",
"reference",
".",
"get",
"(",
"'persistent_identifiers'",
")",
")",
"a_values",
"=",
"[",
"'doi:'",
"+",
"el",
"for",
"el",
"in",
"force_list",
"(",
"reference",
".",
"get",
"(",
"'dois'",
")",
")",
"]",
"a_values",
".",
"extend",
"(",
"[",
"'hdl:'",
"+",
"el",
"[",
"'value'",
"]",
"for",
"el",
"in",
"pids",
"if",
"el",
".",
"get",
"(",
"'schema'",
")",
"==",
"'HDL'",
"]",
")",
"a_values",
".",
"extend",
"(",
"[",
"'urn:'",
"+",
"el",
"[",
"'value'",
"]",
"for",
"el",
"in",
"pids",
"if",
"el",
".",
"get",
"(",
"'schema'",
")",
"==",
"'URN'",
"]",
")",
"external_ids",
"=",
"force_list",
"(",
"reference",
".",
"get",
"(",
"'external_system_identifiers'",
")",
")",
"u_values",
"=",
"force_list",
"(",
"get_value",
"(",
"reference",
",",
"'urls.value'",
")",
")",
"u_values",
".",
"extend",
"(",
"CDS_RECORD_FORMAT",
".",
"format",
"(",
"el",
"[",
"'value'",
"]",
")",
"for",
"el",
"in",
"external_ids",
"if",
"el",
".",
"get",
"(",
"'schema'",
")",
"==",
"'CDS'",
")",
"u_values",
".",
"extend",
"(",
"ADS_RECORD_FORMAT",
".",
"format",
"(",
"el",
"[",
"'value'",
"]",
")",
"for",
"el",
"in",
"external_ids",
"if",
"el",
".",
"get",
"(",
"'schema'",
")",
"==",
"'ADS'",
")",
"authors",
"=",
"force_list",
"(",
"reference",
".",
"get",
"(",
"'authors'",
")",
")",
"e_values",
"=",
"[",
"el",
"[",
"'full_name'",
"]",
"for",
"el",
"in",
"authors",
"if",
"el",
".",
"get",
"(",
"'inspire_role'",
")",
"==",
"'editor'",
"]",
"h_values",
"=",
"[",
"el",
"[",
"'full_name'",
"]",
"for",
"el",
"in",
"authors",
"if",
"el",
".",
"get",
"(",
"'inspire_role'",
")",
"!=",
"'editor'",
"]",
"r_values",
"=",
"force_list",
"(",
"reference",
".",
"get",
"(",
"'report_numbers'",
")",
")",
"if",
"reference",
".",
"get",
"(",
"'arxiv_eprint'",
")",
":",
"arxiv_eprint",
"=",
"reference",
"[",
"'arxiv_eprint'",
"]",
"r_values",
".",
"append",
"(",
"'arXiv:'",
"+",
"arxiv_eprint",
"if",
"is_arxiv_post_2007",
"(",
"arxiv_eprint",
")",
"else",
"arxiv_eprint",
")",
"if",
"reference",
".",
"get",
"(",
"'publication_info'",
")",
":",
"reference",
"[",
"'publication_info'",
"]",
"=",
"convert_new_publication_info_to_old",
"(",
"[",
"reference",
"[",
"'publication_info'",
"]",
"]",
")",
"[",
"0",
"]",
"journal_title",
"=",
"get_value",
"(",
"reference",
",",
"'publication_info.journal_title'",
")",
"journal_volume",
"=",
"get_value",
"(",
"reference",
",",
"'publication_info.journal_volume'",
")",
"page_start",
"=",
"get_value",
"(",
"reference",
",",
"'publication_info.page_start'",
")",
"page_end",
"=",
"get_value",
"(",
"reference",
",",
"'publication_info.page_end'",
")",
"artid",
"=",
"get_value",
"(",
"reference",
",",
"'publication_info.artid'",
")",
"s_value",
"=",
"build_pubnote",
"(",
"journal_title",
",",
"journal_volume",
",",
"page_start",
",",
"page_end",
",",
"artid",
")",
"m_value",
"=",
"' / '",
".",
"join",
"(",
"force_list",
"(",
"reference",
".",
"get",
"(",
"'misc'",
")",
")",
")",
"return",
"{",
"'0'",
":",
"get_recid_from_ref",
"(",
"value",
".",
"get",
"(",
"'record'",
")",
")",
",",
"'9'",
":",
"'CURATOR'",
"if",
"value",
".",
"get",
"(",
"'legacy_curated'",
")",
"else",
"None",
",",
"'a'",
":",
"a_values",
",",
"'b'",
":",
"get_value",
"(",
"reference",
",",
"'publication_info.cnum'",
")",
",",
"'c'",
":",
"reference",
".",
"get",
"(",
"'collaborations'",
")",
",",
"'e'",
":",
"e_values",
",",
"'h'",
":",
"h_values",
",",
"'i'",
":",
"reference",
".",
"get",
"(",
"'isbn'",
")",
",",
"'k'",
":",
"reference",
".",
"get",
"(",
"'texkey'",
")",
",",
"'m'",
":",
"m_value",
",",
"'o'",
":",
"reference",
".",
"get",
"(",
"'label'",
")",
",",
"'p'",
":",
"get_value",
"(",
"reference",
",",
"'imprint.publisher'",
")",
",",
"'q'",
":",
"get_value",
"(",
"reference",
",",
"'publication_info.parent_title'",
")",
",",
"'r'",
":",
"r_values",
",",
"'s'",
":",
"s_value",
",",
"'t'",
":",
"get_value",
"(",
"reference",
",",
"'title.title'",
")",
",",
"'u'",
":",
"u_values",
",",
"'x'",
":",
"get_value",
"(",
"value",
",",
"'raw_refs.value'",
")",
",",
"'y'",
":",
"get_value",
"(",
"reference",
",",
"'publication_info.year'",
")",
",",
"'z'",
":",
"1",
"if",
"value",
".",
"get",
"(",
"'curated_relation'",
")",
"else",
"0",
",",
"}"
] | Populate the ``999C5`` MARC field. | [
"Populate",
"the",
"999C5",
"MARC",
"field",
"."
] | python | train |
juju/python-libjuju | juju/model.py | https://github.com/juju/python-libjuju/blob/58f0011f4c57cd68830258952fa952eaadca6b38/juju/model.py#L2281-L2312 | def make_archive(self, path):
"""Create archive of directory and write to ``path``.
:param path: Path to archive
Ignored::
* build/* - This is used for packing the charm itself and any
similar tasks.
* */.* - Hidden files are all ignored for now. This will most
likely be changed into a specific ignore list
(.bzr, etc)
"""
zf = zipfile.ZipFile(path, 'w', zipfile.ZIP_DEFLATED)
for dirpath, dirnames, filenames in os.walk(self.path):
relative_path = dirpath[len(self.path) + 1:]
if relative_path and not self._ignore(relative_path):
zf.write(dirpath, relative_path)
for name in filenames:
archive_name = os.path.join(relative_path, name)
if not self._ignore(archive_name):
real_path = os.path.join(dirpath, name)
self._check_type(real_path)
if os.path.islink(real_path):
self._check_link(real_path)
self._write_symlink(
zf, os.readlink(real_path), archive_name)
else:
zf.write(real_path, archive_name)
zf.close()
return path | [
"def",
"make_archive",
"(",
"self",
",",
"path",
")",
":",
"zf",
"=",
"zipfile",
".",
"ZipFile",
"(",
"path",
",",
"'w'",
",",
"zipfile",
".",
"ZIP_DEFLATED",
")",
"for",
"dirpath",
",",
"dirnames",
",",
"filenames",
"in",
"os",
".",
"walk",
"(",
"self",
".",
"path",
")",
":",
"relative_path",
"=",
"dirpath",
"[",
"len",
"(",
"self",
".",
"path",
")",
"+",
"1",
":",
"]",
"if",
"relative_path",
"and",
"not",
"self",
".",
"_ignore",
"(",
"relative_path",
")",
":",
"zf",
".",
"write",
"(",
"dirpath",
",",
"relative_path",
")",
"for",
"name",
"in",
"filenames",
":",
"archive_name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"relative_path",
",",
"name",
")",
"if",
"not",
"self",
".",
"_ignore",
"(",
"archive_name",
")",
":",
"real_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dirpath",
",",
"name",
")",
"self",
".",
"_check_type",
"(",
"real_path",
")",
"if",
"os",
".",
"path",
".",
"islink",
"(",
"real_path",
")",
":",
"self",
".",
"_check_link",
"(",
"real_path",
")",
"self",
".",
"_write_symlink",
"(",
"zf",
",",
"os",
".",
"readlink",
"(",
"real_path",
")",
",",
"archive_name",
")",
"else",
":",
"zf",
".",
"write",
"(",
"real_path",
",",
"archive_name",
")",
"zf",
".",
"close",
"(",
")",
"return",
"path"
] | Create archive of directory and write to ``path``.
:param path: Path to archive
Ignored::
* build/* - This is used for packing the charm itself and any
similar tasks.
* */.* - Hidden files are all ignored for now. This will most
likely be changed into a specific ignore list
(.bzr, etc) | [
"Create",
"archive",
"of",
"directory",
"and",
"write",
"to",
"path",
"."
] | python | train |
maljovec/topopy | topopy/MergeTree.py | https://github.com/maljovec/topopy/blob/4be598d51c4e4043b73d4ad44beed6d289e2f088/topopy/MergeTree.py#L135-L160 | def build_for_contour_tree(self, contour_tree, negate=False):
""" A helper function that will reduce duplication of data by
reusing the parent contour tree's parameters and data
"""
if self.debug:
tree_type = "Join"
if negate:
tree_type = "Split"
sys.stdout.write("{} Tree Computation: ".format(tree_type))
start = time.clock()
Y = contour_tree.Y
if negate:
Y = -Y
self.__tree = MergeTreeFloat(
vectorFloat(contour_tree.Xnorm.flatten()),
vectorFloat(Y),
str(contour_tree.gradient),
contour_tree.graph_rep.full_graph(),
self.debug,
)
self._internal_build()
if self.debug:
end = time.clock()
sys.stdout.write("%f s\n" % (end - start)) | [
"def",
"build_for_contour_tree",
"(",
"self",
",",
"contour_tree",
",",
"negate",
"=",
"False",
")",
":",
"if",
"self",
".",
"debug",
":",
"tree_type",
"=",
"\"Join\"",
"if",
"negate",
":",
"tree_type",
"=",
"\"Split\"",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"{} Tree Computation: \"",
".",
"format",
"(",
"tree_type",
")",
")",
"start",
"=",
"time",
".",
"clock",
"(",
")",
"Y",
"=",
"contour_tree",
".",
"Y",
"if",
"negate",
":",
"Y",
"=",
"-",
"Y",
"self",
".",
"__tree",
"=",
"MergeTreeFloat",
"(",
"vectorFloat",
"(",
"contour_tree",
".",
"Xnorm",
".",
"flatten",
"(",
")",
")",
",",
"vectorFloat",
"(",
"Y",
")",
",",
"str",
"(",
"contour_tree",
".",
"gradient",
")",
",",
"contour_tree",
".",
"graph_rep",
".",
"full_graph",
"(",
")",
",",
"self",
".",
"debug",
",",
")",
"self",
".",
"_internal_build",
"(",
")",
"if",
"self",
".",
"debug",
":",
"end",
"=",
"time",
".",
"clock",
"(",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"%f s\\n\"",
"%",
"(",
"end",
"-",
"start",
")",
")"
] | A helper function that will reduce duplication of data by
reusing the parent contour tree's parameters and data | [
"A",
"helper",
"function",
"that",
"will",
"reduce",
"duplication",
"of",
"data",
"by",
"reusing",
"the",
"parent",
"contour",
"tree",
"s",
"parameters",
"and",
"data"
] | python | train |
fermiPy/fermipy | fermipy/plotting.py | https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/plotting.py#L1159-L1251 | def make_roi_plots(self, gta, mcube_tot, **kwargs):
"""Make various diagnostic plots for the 1D and 2D
counts/model distributions.
Parameters
----------
prefix : str
Prefix that will be appended to all filenames.
"""
fmt = kwargs.get('format', self.config['format'])
figsize = kwargs.get('figsize', self.config['figsize'])
prefix = kwargs.get('prefix', '')
loge_bounds = kwargs.get('loge_bounds', None)
weighted = kwargs.get('weighted', False)
roi_kwargs = {}
roi_kwargs.setdefault('loge_bounds', loge_bounds)
roi_kwargs.setdefault(
'graticule_radii', self.config['graticule_radii'])
roi_kwargs.setdefault('label_ts_threshold',
self.config['label_ts_threshold'])
roi_kwargs.setdefault('cmap', self.config['cmap'])
roi_kwargs.setdefault('catalogs', self._catalogs)
if loge_bounds is None:
loge_bounds = (gta.log_energies[0], gta.log_energies[-1])
esuffix = '_%.3f_%.3f' % (loge_bounds[0], loge_bounds[1])
mcube_diffuse = gta.model_counts_map('diffuse')
counts_map = gta.counts_map()
if weighted:
wmap = gta.weight_map()
counts_map = copy.deepcopy(counts_map)
mcube_tot = copy.deepcopy(mcube_tot)
counts_map.data *= wmap.data
mcube_tot.data *= wmap.data
mcube_diffuse.data *= wmap.data
# colors = ['k', 'b', 'g', 'r']
data_style = {'marker': 's', 'linestyle': 'None'}
fig = plt.figure(figsize=figsize)
if gta.projtype == "WCS":
xmin = -1
xmax = 1
elif gta.projtype == "HPX":
hpx2wcs = counts_map.make_wcs_mapping(proj='CAR', oversample=2)
counts_map = counts_map.to_wcs(hpx2wcs=hpx2wcs)
mcube_tot = mcube_tot.to_wcs(hpx2wcs=hpx2wcs)
mcube_diffuse = mcube_diffuse.to_wcs(hpx2wcs=hpx2wcs)
xmin = None
xmax = None
fig = plt.figure(figsize=figsize)
rp = ROIPlotter(mcube_tot, roi=gta.roi, **roi_kwargs)
rp.plot(cb_label='Counts', zscale='pow', gamma=1. / 3.)
plt.savefig(os.path.join(gta.config['fileio']['workdir'],
'%s_model_map%s.%s' % (
prefix, esuffix, fmt)))
plt.close(fig)
rp = ROIPlotter(counts_map, roi=gta.roi, **roi_kwargs)
rp.plot(cb_label='Counts', zscale='sqrt')
plt.savefig(os.path.join(gta.config['fileio']['workdir'],
'%s_counts_map%s.%s' % (
prefix, esuffix, fmt)))
plt.close(fig)
for iaxis, xlabel, psuffix in zip([0, 1],
['LON Offset [deg]', 'LAT Offset [deg]'],
['xproj', 'yproj']):
fig = plt.figure(figsize=figsize)
rp.plot_projection(iaxis, label='Data', color='k',
xmin=xmin, xmax=xmax, **data_style)
rp.plot_projection(iaxis, data=mcube_tot, label='Model', xmin=xmin, xmax=xmax,
noerror=True)
rp.plot_projection(iaxis, data=mcube_diffuse, label='Diffuse', xmin=xmin, xmax=xmax,
noerror=True)
plt.gca().set_ylabel('Counts')
plt.gca().set_xlabel(xlabel)
plt.gca().legend(frameon=False)
annotate(loge_bounds=loge_bounds)
plt.savefig(os.path.join(gta.config['fileio']['workdir'],
'%s_counts_map_%s%s.%s' % (prefix, psuffix,
esuffix, fmt)))
plt.close(fig) | [
"def",
"make_roi_plots",
"(",
"self",
",",
"gta",
",",
"mcube_tot",
",",
"*",
"*",
"kwargs",
")",
":",
"fmt",
"=",
"kwargs",
".",
"get",
"(",
"'format'",
",",
"self",
".",
"config",
"[",
"'format'",
"]",
")",
"figsize",
"=",
"kwargs",
".",
"get",
"(",
"'figsize'",
",",
"self",
".",
"config",
"[",
"'figsize'",
"]",
")",
"prefix",
"=",
"kwargs",
".",
"get",
"(",
"'prefix'",
",",
"''",
")",
"loge_bounds",
"=",
"kwargs",
".",
"get",
"(",
"'loge_bounds'",
",",
"None",
")",
"weighted",
"=",
"kwargs",
".",
"get",
"(",
"'weighted'",
",",
"False",
")",
"roi_kwargs",
"=",
"{",
"}",
"roi_kwargs",
".",
"setdefault",
"(",
"'loge_bounds'",
",",
"loge_bounds",
")",
"roi_kwargs",
".",
"setdefault",
"(",
"'graticule_radii'",
",",
"self",
".",
"config",
"[",
"'graticule_radii'",
"]",
")",
"roi_kwargs",
".",
"setdefault",
"(",
"'label_ts_threshold'",
",",
"self",
".",
"config",
"[",
"'label_ts_threshold'",
"]",
")",
"roi_kwargs",
".",
"setdefault",
"(",
"'cmap'",
",",
"self",
".",
"config",
"[",
"'cmap'",
"]",
")",
"roi_kwargs",
".",
"setdefault",
"(",
"'catalogs'",
",",
"self",
".",
"_catalogs",
")",
"if",
"loge_bounds",
"is",
"None",
":",
"loge_bounds",
"=",
"(",
"gta",
".",
"log_energies",
"[",
"0",
"]",
",",
"gta",
".",
"log_energies",
"[",
"-",
"1",
"]",
")",
"esuffix",
"=",
"'_%.3f_%.3f'",
"%",
"(",
"loge_bounds",
"[",
"0",
"]",
",",
"loge_bounds",
"[",
"1",
"]",
")",
"mcube_diffuse",
"=",
"gta",
".",
"model_counts_map",
"(",
"'diffuse'",
")",
"counts_map",
"=",
"gta",
".",
"counts_map",
"(",
")",
"if",
"weighted",
":",
"wmap",
"=",
"gta",
".",
"weight_map",
"(",
")",
"counts_map",
"=",
"copy",
".",
"deepcopy",
"(",
"counts_map",
")",
"mcube_tot",
"=",
"copy",
".",
"deepcopy",
"(",
"mcube_tot",
")",
"counts_map",
".",
"data",
"*=",
"wmap",
".",
"data",
"mcube_tot",
".",
"data",
"*=",
"wmap",
".",
"data",
"mcube_diffuse",
".",
"data",
"*=",
"wmap",
".",
"data",
"# colors = ['k', 'b', 'g', 'r']",
"data_style",
"=",
"{",
"'marker'",
":",
"'s'",
",",
"'linestyle'",
":",
"'None'",
"}",
"fig",
"=",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"figsize",
")",
"if",
"gta",
".",
"projtype",
"==",
"\"WCS\"",
":",
"xmin",
"=",
"-",
"1",
"xmax",
"=",
"1",
"elif",
"gta",
".",
"projtype",
"==",
"\"HPX\"",
":",
"hpx2wcs",
"=",
"counts_map",
".",
"make_wcs_mapping",
"(",
"proj",
"=",
"'CAR'",
",",
"oversample",
"=",
"2",
")",
"counts_map",
"=",
"counts_map",
".",
"to_wcs",
"(",
"hpx2wcs",
"=",
"hpx2wcs",
")",
"mcube_tot",
"=",
"mcube_tot",
".",
"to_wcs",
"(",
"hpx2wcs",
"=",
"hpx2wcs",
")",
"mcube_diffuse",
"=",
"mcube_diffuse",
".",
"to_wcs",
"(",
"hpx2wcs",
"=",
"hpx2wcs",
")",
"xmin",
"=",
"None",
"xmax",
"=",
"None",
"fig",
"=",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"figsize",
")",
"rp",
"=",
"ROIPlotter",
"(",
"mcube_tot",
",",
"roi",
"=",
"gta",
".",
"roi",
",",
"*",
"*",
"roi_kwargs",
")",
"rp",
".",
"plot",
"(",
"cb_label",
"=",
"'Counts'",
",",
"zscale",
"=",
"'pow'",
",",
"gamma",
"=",
"1.",
"/",
"3.",
")",
"plt",
".",
"savefig",
"(",
"os",
".",
"path",
".",
"join",
"(",
"gta",
".",
"config",
"[",
"'fileio'",
"]",
"[",
"'workdir'",
"]",
",",
"'%s_model_map%s.%s'",
"%",
"(",
"prefix",
",",
"esuffix",
",",
"fmt",
")",
")",
")",
"plt",
".",
"close",
"(",
"fig",
")",
"rp",
"=",
"ROIPlotter",
"(",
"counts_map",
",",
"roi",
"=",
"gta",
".",
"roi",
",",
"*",
"*",
"roi_kwargs",
")",
"rp",
".",
"plot",
"(",
"cb_label",
"=",
"'Counts'",
",",
"zscale",
"=",
"'sqrt'",
")",
"plt",
".",
"savefig",
"(",
"os",
".",
"path",
".",
"join",
"(",
"gta",
".",
"config",
"[",
"'fileio'",
"]",
"[",
"'workdir'",
"]",
",",
"'%s_counts_map%s.%s'",
"%",
"(",
"prefix",
",",
"esuffix",
",",
"fmt",
")",
")",
")",
"plt",
".",
"close",
"(",
"fig",
")",
"for",
"iaxis",
",",
"xlabel",
",",
"psuffix",
"in",
"zip",
"(",
"[",
"0",
",",
"1",
"]",
",",
"[",
"'LON Offset [deg]'",
",",
"'LAT Offset [deg]'",
"]",
",",
"[",
"'xproj'",
",",
"'yproj'",
"]",
")",
":",
"fig",
"=",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"figsize",
")",
"rp",
".",
"plot_projection",
"(",
"iaxis",
",",
"label",
"=",
"'Data'",
",",
"color",
"=",
"'k'",
",",
"xmin",
"=",
"xmin",
",",
"xmax",
"=",
"xmax",
",",
"*",
"*",
"data_style",
")",
"rp",
".",
"plot_projection",
"(",
"iaxis",
",",
"data",
"=",
"mcube_tot",
",",
"label",
"=",
"'Model'",
",",
"xmin",
"=",
"xmin",
",",
"xmax",
"=",
"xmax",
",",
"noerror",
"=",
"True",
")",
"rp",
".",
"plot_projection",
"(",
"iaxis",
",",
"data",
"=",
"mcube_diffuse",
",",
"label",
"=",
"'Diffuse'",
",",
"xmin",
"=",
"xmin",
",",
"xmax",
"=",
"xmax",
",",
"noerror",
"=",
"True",
")",
"plt",
".",
"gca",
"(",
")",
".",
"set_ylabel",
"(",
"'Counts'",
")",
"plt",
".",
"gca",
"(",
")",
".",
"set_xlabel",
"(",
"xlabel",
")",
"plt",
".",
"gca",
"(",
")",
".",
"legend",
"(",
"frameon",
"=",
"False",
")",
"annotate",
"(",
"loge_bounds",
"=",
"loge_bounds",
")",
"plt",
".",
"savefig",
"(",
"os",
".",
"path",
".",
"join",
"(",
"gta",
".",
"config",
"[",
"'fileio'",
"]",
"[",
"'workdir'",
"]",
",",
"'%s_counts_map_%s%s.%s'",
"%",
"(",
"prefix",
",",
"psuffix",
",",
"esuffix",
",",
"fmt",
")",
")",
")",
"plt",
".",
"close",
"(",
"fig",
")"
] | Make various diagnostic plots for the 1D and 2D
counts/model distributions.
Parameters
----------
prefix : str
Prefix that will be appended to all filenames. | [
"Make",
"various",
"diagnostic",
"plots",
"for",
"the",
"1D",
"and",
"2D",
"counts",
"/",
"model",
"distributions",
"."
] | python | train |
jason-weirather/py-seq-tools | seqtools/format/sam/bam/bamindex.py | https://github.com/jason-weirather/py-seq-tools/blob/f642c2c73ffef2acc83656a78059a476fc734ca1/seqtools/format/sam/bam/bamindex.py#L58-L79 | def check_ordered(self):
""" True if each chromosome is listed together as a chunk and if the range starts go from smallest to largest otherwise false
:return: is it ordered?
:rtype: bool
"""
sys.stderr.write("error unimplemented check_ordered\n")
sys.exit()
seen_chrs = set()
curr_chr = None
prevstart = 0
for l in self._lines:
if not l['rng']: continue
if l['rng'].chr != curr_chr:
prevstart = 0
if l['rng'].chr in seen_chrs:
return False
curr_chr = l['rng'].chr
seen_chrs.add(curr_chr)
if l['rng'].start < prevstart: return False
prevstart = l['rng'].start
return True | [
"def",
"check_ordered",
"(",
"self",
")",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"error unimplemented check_ordered\\n\"",
")",
"sys",
".",
"exit",
"(",
")",
"seen_chrs",
"=",
"set",
"(",
")",
"curr_chr",
"=",
"None",
"prevstart",
"=",
"0",
"for",
"l",
"in",
"self",
".",
"_lines",
":",
"if",
"not",
"l",
"[",
"'rng'",
"]",
":",
"continue",
"if",
"l",
"[",
"'rng'",
"]",
".",
"chr",
"!=",
"curr_chr",
":",
"prevstart",
"=",
"0",
"if",
"l",
"[",
"'rng'",
"]",
".",
"chr",
"in",
"seen_chrs",
":",
"return",
"False",
"curr_chr",
"=",
"l",
"[",
"'rng'",
"]",
".",
"chr",
"seen_chrs",
".",
"add",
"(",
"curr_chr",
")",
"if",
"l",
"[",
"'rng'",
"]",
".",
"start",
"<",
"prevstart",
":",
"return",
"False",
"prevstart",
"=",
"l",
"[",
"'rng'",
"]",
".",
"start",
"return",
"True"
] | True if each chromosome is listed together as a chunk and if the range starts go from smallest to largest otherwise false
:return: is it ordered?
:rtype: bool | [
"True",
"if",
"each",
"chromosome",
"is",
"listed",
"together",
"as",
"a",
"chunk",
"and",
"if",
"the",
"range",
"starts",
"go",
"from",
"smallest",
"to",
"largest",
"otherwise",
"false"
] | python | train |
ifduyue/urlfetch | urlfetch.py | https://github.com/ifduyue/urlfetch/blob/e0ea4673367c157eb832ba4ba2635306c81a61be/urlfetch.py#L314-L322 | def json(self):
"""Load response body as json.
:raises: :class:`ContentDecodingError`
"""
try:
return json.loads(self.text)
except Exception as e:
raise ContentDecodingError(e) | [
"def",
"json",
"(",
"self",
")",
":",
"try",
":",
"return",
"json",
".",
"loads",
"(",
"self",
".",
"text",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"ContentDecodingError",
"(",
"e",
")"
] | Load response body as json.
:raises: :class:`ContentDecodingError` | [
"Load",
"response",
"body",
"as",
"json",
"."
] | python | train |
scanny/python-pptx | pptx/chart/data.py | https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/chart/data.py#L769-L777 | def add_data_point(self, x, y, size, number_format=None):
"""
Append a new BubbleDataPoint object having the values *x*, *y*, and
*size*. The optional *number_format* is used to format the Y value.
If not provided, the number format is inherited from the series data.
"""
data_point = BubbleDataPoint(self, x, y, size, number_format)
self.append(data_point)
return data_point | [
"def",
"add_data_point",
"(",
"self",
",",
"x",
",",
"y",
",",
"size",
",",
"number_format",
"=",
"None",
")",
":",
"data_point",
"=",
"BubbleDataPoint",
"(",
"self",
",",
"x",
",",
"y",
",",
"size",
",",
"number_format",
")",
"self",
".",
"append",
"(",
"data_point",
")",
"return",
"data_point"
] | Append a new BubbleDataPoint object having the values *x*, *y*, and
*size*. The optional *number_format* is used to format the Y value.
If not provided, the number format is inherited from the series data. | [
"Append",
"a",
"new",
"BubbleDataPoint",
"object",
"having",
"the",
"values",
"*",
"x",
"*",
"*",
"y",
"*",
"and",
"*",
"size",
"*",
".",
"The",
"optional",
"*",
"number_format",
"*",
"is",
"used",
"to",
"format",
"the",
"Y",
"value",
".",
"If",
"not",
"provided",
"the",
"number",
"format",
"is",
"inherited",
"from",
"the",
"series",
"data",
"."
] | python | train |
raiden-network/raiden | raiden/messages.py | https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/messages.py#L1597-L1608 | def to_dict(self) -> Dict[str, Any]:
"""Message format according to monitoring service spec"""
return {
'type': self.__class__.__name__,
'channel_identifier': self.channel_identifier,
'token_network_address': to_normalized_address(self.token_network_address),
'balance_hash': encode_hex(self.balance_hash),
'nonce': self.nonce,
'additional_hash': encode_hex(self.additional_hash),
'signature': encode_hex(self.signature),
'chain_id': self.chain_id,
} | [
"def",
"to_dict",
"(",
"self",
")",
"->",
"Dict",
"[",
"str",
",",
"Any",
"]",
":",
"return",
"{",
"'type'",
":",
"self",
".",
"__class__",
".",
"__name__",
",",
"'channel_identifier'",
":",
"self",
".",
"channel_identifier",
",",
"'token_network_address'",
":",
"to_normalized_address",
"(",
"self",
".",
"token_network_address",
")",
",",
"'balance_hash'",
":",
"encode_hex",
"(",
"self",
".",
"balance_hash",
")",
",",
"'nonce'",
":",
"self",
".",
"nonce",
",",
"'additional_hash'",
":",
"encode_hex",
"(",
"self",
".",
"additional_hash",
")",
",",
"'signature'",
":",
"encode_hex",
"(",
"self",
".",
"signature",
")",
",",
"'chain_id'",
":",
"self",
".",
"chain_id",
",",
"}"
] | Message format according to monitoring service spec | [
"Message",
"format",
"according",
"to",
"monitoring",
"service",
"spec"
] | python | train |
ionelmc/python-cogen | examples/cogen-irc/CogenIrcApp/cogenircapp/websetup.py | https://github.com/ionelmc/python-cogen/blob/83b0edb88425eba6e5bfda9f1dcd34642517e2a8/examples/cogen-irc/CogenIrcApp/cogenircapp/websetup.py#L11-L14 | def setup_config(command, filename, section, vars):
"""Place any commands to setup cogenircapp here"""
conf = appconfig('config:' + filename)
load_environment(conf.global_conf, conf.local_conf) | [
"def",
"setup_config",
"(",
"command",
",",
"filename",
",",
"section",
",",
"vars",
")",
":",
"conf",
"=",
"appconfig",
"(",
"'config:'",
"+",
"filename",
")",
"load_environment",
"(",
"conf",
".",
"global_conf",
",",
"conf",
".",
"local_conf",
")"
] | Place any commands to setup cogenircapp here | [
"Place",
"any",
"commands",
"to",
"setup",
"cogenircapp",
"here"
] | python | train |
rhgrant10/Groupy | groupy/api/memberships.py | https://github.com/rhgrant10/Groupy/blob/ffd8cac57586fa1c218e3b4bfaa531142c3be766/groupy/api/memberships.py#L248-L259 | def get_failed_requests(self, results):
"""Return the requests that failed.
:param results: the results of a membership request check
:type results: :class:`list`
:return: the failed requests
:rtype: generator
"""
data = {member['guid']: member for member in results}
for request in self.requests:
if request['guid'] not in data:
yield request | [
"def",
"get_failed_requests",
"(",
"self",
",",
"results",
")",
":",
"data",
"=",
"{",
"member",
"[",
"'guid'",
"]",
":",
"member",
"for",
"member",
"in",
"results",
"}",
"for",
"request",
"in",
"self",
".",
"requests",
":",
"if",
"request",
"[",
"'guid'",
"]",
"not",
"in",
"data",
":",
"yield",
"request"
] | Return the requests that failed.
:param results: the results of a membership request check
:type results: :class:`list`
:return: the failed requests
:rtype: generator | [
"Return",
"the",
"requests",
"that",
"failed",
"."
] | python | train |
saltstack/salt | salt/states/azurearm_network.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/azurearm_network.py#L1544-L1727 | def public_ip_address_present(name, resource_group, tags=None, sku=None, public_ip_allocation_method=None,
public_ip_address_version=None, dns_settings=None, idle_timeout_in_minutes=None,
connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a public IP address exists.
:param name:
Name of the public IP address.
:param resource_group:
The resource group assigned to the public IP address.
:param dns_settings:
An optional dictionary representing a valid PublicIPAddressDnsSettings object. Parameters include
'domain_name_label' and 'reverse_fqdn', which accept strings. The 'domain_name_label' parameter is concatenated
with the regionalized DNS zone make up the fully qualified domain name associated with the public IP address.
If a domain name label is specified, an A DNS record is created for the public IP in the Microsoft Azure DNS
system. The 'reverse_fqdn' parameter is a user-visible, fully qualified domain name that resolves to this public
IP address. If the reverse FQDN is specified, then a PTR DNS record is created pointing from the IP address in
the in-addr.arpa domain to the reverse FQDN.
:param sku:
The public IP address SKU, which can be 'Basic' or 'Standard'.
:param public_ip_allocation_method:
The public IP allocation method. Possible values are: 'Static' and 'Dynamic'.
:param public_ip_address_version:
The public IP address version. Possible values are: 'IPv4' and 'IPv6'.
:param idle_timeout_in_minutes:
An integer representing the idle timeout of the public IP address.
:param tags:
A dictionary of strings can be passed as tag metadata to the public IP address object.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure public IP exists:
azurearm_network.public_ip_address_present:
- name: pub_ip1
- resource_group: group1
- dns_settings:
domain_name_label: decisionlab-ext-test-label
- sku: basic
- public_ip_allocation_method: static
- public_ip_address_version: ipv4
- idle_timeout_in_minutes: 4
- tags:
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
- require:
- azurearm_resource: Ensure resource group exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
if sku:
sku = {'name': sku.capitalize()}
pub_ip = __salt__['azurearm_network.public_ip_address_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in pub_ip:
# tag changes
tag_changes = __utils__['dictdiffer.deep_diff'](pub_ip.get('tags', {}), tags or {})
if tag_changes:
ret['changes']['tags'] = tag_changes
# dns_settings changes
if dns_settings:
if not isinstance(dns_settings, dict):
ret['comment'] = 'DNS settings must be provided as a dictionary!'
return ret
for key in dns_settings:
if dns_settings[key] != pub_ip.get('dns_settings', {}).get(key):
ret['changes']['dns_settings'] = {
'old': pub_ip.get('dns_settings'),
'new': dns_settings
}
break
# sku changes
if sku:
sku_changes = __utils__['dictdiffer.deep_diff'](pub_ip.get('sku', {}), sku)
if sku_changes:
ret['changes']['sku'] = sku_changes
# public_ip_allocation_method changes
if public_ip_allocation_method:
if public_ip_allocation_method.capitalize() != pub_ip.get('public_ip_allocation_method'):
ret['changes']['public_ip_allocation_method'] = {
'old': pub_ip.get('public_ip_allocation_method'),
'new': public_ip_allocation_method
}
# public_ip_address_version changes
if public_ip_address_version:
if public_ip_address_version.lower() != pub_ip.get('public_ip_address_version', '').lower():
ret['changes']['public_ip_address_version'] = {
'old': pub_ip.get('public_ip_address_version'),
'new': public_ip_address_version
}
# idle_timeout_in_minutes changes
if idle_timeout_in_minutes and (int(idle_timeout_in_minutes) != pub_ip.get('idle_timeout_in_minutes')):
ret['changes']['idle_timeout_in_minutes'] = {
'old': pub_ip.get('idle_timeout_in_minutes'),
'new': idle_timeout_in_minutes
}
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Public IP address {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Public IP address {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'tags': tags,
'dns_settings': dns_settings,
'sku': sku,
'public_ip_allocation_method': public_ip_allocation_method,
'public_ip_address_version': public_ip_address_version,
'idle_timeout_in_minutes': idle_timeout_in_minutes,
}
}
if __opts__['test']:
ret['comment'] = 'Public IP address {0} would be created.'.format(name)
ret['result'] = None
return ret
pub_ip_kwargs = kwargs.copy()
pub_ip_kwargs.update(connection_auth)
pub_ip = __salt__['azurearm_network.public_ip_address_create_or_update'](
name=name,
resource_group=resource_group,
sku=sku,
tags=tags,
dns_settings=dns_settings,
public_ip_allocation_method=public_ip_allocation_method,
public_ip_address_version=public_ip_address_version,
idle_timeout_in_minutes=idle_timeout_in_minutes,
**pub_ip_kwargs
)
if 'error' not in pub_ip:
ret['result'] = True
ret['comment'] = 'Public IP address {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create public IP address {0}! ({1})'.format(name, pub_ip.get('error'))
return ret | [
"def",
"public_ip_address_present",
"(",
"name",
",",
"resource_group",
",",
"tags",
"=",
"None",
",",
"sku",
"=",
"None",
",",
"public_ip_allocation_method",
"=",
"None",
",",
"public_ip_address_version",
"=",
"None",
",",
"dns_settings",
"=",
"None",
",",
"idle_timeout_in_minutes",
"=",
"None",
",",
"connection_auth",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'result'",
":",
"False",
",",
"'comment'",
":",
"''",
",",
"'changes'",
":",
"{",
"}",
"}",
"if",
"not",
"isinstance",
"(",
"connection_auth",
",",
"dict",
")",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'Connection information must be specified via connection_auth dictionary!'",
"return",
"ret",
"if",
"sku",
":",
"sku",
"=",
"{",
"'name'",
":",
"sku",
".",
"capitalize",
"(",
")",
"}",
"pub_ip",
"=",
"__salt__",
"[",
"'azurearm_network.public_ip_address_get'",
"]",
"(",
"name",
",",
"resource_group",
",",
"azurearm_log_level",
"=",
"'info'",
",",
"*",
"*",
"connection_auth",
")",
"if",
"'error'",
"not",
"in",
"pub_ip",
":",
"# tag changes",
"tag_changes",
"=",
"__utils__",
"[",
"'dictdiffer.deep_diff'",
"]",
"(",
"pub_ip",
".",
"get",
"(",
"'tags'",
",",
"{",
"}",
")",
",",
"tags",
"or",
"{",
"}",
")",
"if",
"tag_changes",
":",
"ret",
"[",
"'changes'",
"]",
"[",
"'tags'",
"]",
"=",
"tag_changes",
"# dns_settings changes",
"if",
"dns_settings",
":",
"if",
"not",
"isinstance",
"(",
"dns_settings",
",",
"dict",
")",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'DNS settings must be provided as a dictionary!'",
"return",
"ret",
"for",
"key",
"in",
"dns_settings",
":",
"if",
"dns_settings",
"[",
"key",
"]",
"!=",
"pub_ip",
".",
"get",
"(",
"'dns_settings'",
",",
"{",
"}",
")",
".",
"get",
"(",
"key",
")",
":",
"ret",
"[",
"'changes'",
"]",
"[",
"'dns_settings'",
"]",
"=",
"{",
"'old'",
":",
"pub_ip",
".",
"get",
"(",
"'dns_settings'",
")",
",",
"'new'",
":",
"dns_settings",
"}",
"break",
"# sku changes",
"if",
"sku",
":",
"sku_changes",
"=",
"__utils__",
"[",
"'dictdiffer.deep_diff'",
"]",
"(",
"pub_ip",
".",
"get",
"(",
"'sku'",
",",
"{",
"}",
")",
",",
"sku",
")",
"if",
"sku_changes",
":",
"ret",
"[",
"'changes'",
"]",
"[",
"'sku'",
"]",
"=",
"sku_changes",
"# public_ip_allocation_method changes",
"if",
"public_ip_allocation_method",
":",
"if",
"public_ip_allocation_method",
".",
"capitalize",
"(",
")",
"!=",
"pub_ip",
".",
"get",
"(",
"'public_ip_allocation_method'",
")",
":",
"ret",
"[",
"'changes'",
"]",
"[",
"'public_ip_allocation_method'",
"]",
"=",
"{",
"'old'",
":",
"pub_ip",
".",
"get",
"(",
"'public_ip_allocation_method'",
")",
",",
"'new'",
":",
"public_ip_allocation_method",
"}",
"# public_ip_address_version changes",
"if",
"public_ip_address_version",
":",
"if",
"public_ip_address_version",
".",
"lower",
"(",
")",
"!=",
"pub_ip",
".",
"get",
"(",
"'public_ip_address_version'",
",",
"''",
")",
".",
"lower",
"(",
")",
":",
"ret",
"[",
"'changes'",
"]",
"[",
"'public_ip_address_version'",
"]",
"=",
"{",
"'old'",
":",
"pub_ip",
".",
"get",
"(",
"'public_ip_address_version'",
")",
",",
"'new'",
":",
"public_ip_address_version",
"}",
"# idle_timeout_in_minutes changes",
"if",
"idle_timeout_in_minutes",
"and",
"(",
"int",
"(",
"idle_timeout_in_minutes",
")",
"!=",
"pub_ip",
".",
"get",
"(",
"'idle_timeout_in_minutes'",
")",
")",
":",
"ret",
"[",
"'changes'",
"]",
"[",
"'idle_timeout_in_minutes'",
"]",
"=",
"{",
"'old'",
":",
"pub_ip",
".",
"get",
"(",
"'idle_timeout_in_minutes'",
")",
",",
"'new'",
":",
"idle_timeout_in_minutes",
"}",
"if",
"not",
"ret",
"[",
"'changes'",
"]",
":",
"ret",
"[",
"'result'",
"]",
"=",
"True",
"ret",
"[",
"'comment'",
"]",
"=",
"'Public IP address {0} is already present.'",
".",
"format",
"(",
"name",
")",
"return",
"ret",
"if",
"__opts__",
"[",
"'test'",
"]",
":",
"ret",
"[",
"'result'",
"]",
"=",
"None",
"ret",
"[",
"'comment'",
"]",
"=",
"'Public IP address {0} would be updated.'",
".",
"format",
"(",
"name",
")",
"return",
"ret",
"else",
":",
"ret",
"[",
"'changes'",
"]",
"=",
"{",
"'old'",
":",
"{",
"}",
",",
"'new'",
":",
"{",
"'name'",
":",
"name",
",",
"'tags'",
":",
"tags",
",",
"'dns_settings'",
":",
"dns_settings",
",",
"'sku'",
":",
"sku",
",",
"'public_ip_allocation_method'",
":",
"public_ip_allocation_method",
",",
"'public_ip_address_version'",
":",
"public_ip_address_version",
",",
"'idle_timeout_in_minutes'",
":",
"idle_timeout_in_minutes",
",",
"}",
"}",
"if",
"__opts__",
"[",
"'test'",
"]",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'Public IP address {0} would be created.'",
".",
"format",
"(",
"name",
")",
"ret",
"[",
"'result'",
"]",
"=",
"None",
"return",
"ret",
"pub_ip_kwargs",
"=",
"kwargs",
".",
"copy",
"(",
")",
"pub_ip_kwargs",
".",
"update",
"(",
"connection_auth",
")",
"pub_ip",
"=",
"__salt__",
"[",
"'azurearm_network.public_ip_address_create_or_update'",
"]",
"(",
"name",
"=",
"name",
",",
"resource_group",
"=",
"resource_group",
",",
"sku",
"=",
"sku",
",",
"tags",
"=",
"tags",
",",
"dns_settings",
"=",
"dns_settings",
",",
"public_ip_allocation_method",
"=",
"public_ip_allocation_method",
",",
"public_ip_address_version",
"=",
"public_ip_address_version",
",",
"idle_timeout_in_minutes",
"=",
"idle_timeout_in_minutes",
",",
"*",
"*",
"pub_ip_kwargs",
")",
"if",
"'error'",
"not",
"in",
"pub_ip",
":",
"ret",
"[",
"'result'",
"]",
"=",
"True",
"ret",
"[",
"'comment'",
"]",
"=",
"'Public IP address {0} has been created.'",
".",
"format",
"(",
"name",
")",
"return",
"ret",
"ret",
"[",
"'comment'",
"]",
"=",
"'Failed to create public IP address {0}! ({1})'",
".",
"format",
"(",
"name",
",",
"pub_ip",
".",
"get",
"(",
"'error'",
")",
")",
"return",
"ret"
] | .. versionadded:: 2019.2.0
Ensure a public IP address exists.
:param name:
Name of the public IP address.
:param resource_group:
The resource group assigned to the public IP address.
:param dns_settings:
An optional dictionary representing a valid PublicIPAddressDnsSettings object. Parameters include
'domain_name_label' and 'reverse_fqdn', which accept strings. The 'domain_name_label' parameter is concatenated
with the regionalized DNS zone make up the fully qualified domain name associated with the public IP address.
If a domain name label is specified, an A DNS record is created for the public IP in the Microsoft Azure DNS
system. The 'reverse_fqdn' parameter is a user-visible, fully qualified domain name that resolves to this public
IP address. If the reverse FQDN is specified, then a PTR DNS record is created pointing from the IP address in
the in-addr.arpa domain to the reverse FQDN.
:param sku:
The public IP address SKU, which can be 'Basic' or 'Standard'.
:param public_ip_allocation_method:
The public IP allocation method. Possible values are: 'Static' and 'Dynamic'.
:param public_ip_address_version:
The public IP address version. Possible values are: 'IPv4' and 'IPv6'.
:param idle_timeout_in_minutes:
An integer representing the idle timeout of the public IP address.
:param tags:
A dictionary of strings can be passed as tag metadata to the public IP address object.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure public IP exists:
azurearm_network.public_ip_address_present:
- name: pub_ip1
- resource_group: group1
- dns_settings:
domain_name_label: decisionlab-ext-test-label
- sku: basic
- public_ip_allocation_method: static
- public_ip_address_version: ipv4
- idle_timeout_in_minutes: 4
- tags:
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
- require:
- azurearm_resource: Ensure resource group exists | [
"..",
"versionadded",
"::",
"2019",
".",
"2",
".",
"0"
] | python | train |
rochacbruno/dynaconf | dynaconf/base.py | https://github.com/rochacbruno/dynaconf/blob/5a7cc8f8252251cbdf4f4112965801f9dfe2831d/dynaconf/base.py#L729-L732 | def reload(self, env=None, silent=None): # pragma: no cover
"""Clean end Execute all loaders"""
self.clean()
self.execute_loaders(env, silent) | [
"def",
"reload",
"(",
"self",
",",
"env",
"=",
"None",
",",
"silent",
"=",
"None",
")",
":",
"# pragma: no cover",
"self",
".",
"clean",
"(",
")",
"self",
".",
"execute_loaders",
"(",
"env",
",",
"silent",
")"
] | Clean end Execute all loaders | [
"Clean",
"end",
"Execute",
"all",
"loaders"
] | python | train |
skggm/skggm | inverse_covariance/profiling/lattice_graph.py | https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/profiling/lattice_graph.py#L40-L73 | def prototype_adjacency(self, n_block_features, alpha):
"""Build a new graph.
Doc for ".create(n_features, alpha)"
Parameters
-----------
n_features : int
alpha : float (0,1)
The complexity / sparsity factor.
Each graph will have a minimum of
n_blocks * ceil(alpha * n_block_features)
where
n_block_features = floor(n_features / self.n_blocks)
edges and exactly this amount if chain_blocks=False.
Returns
-----------
(n_features, n_features) matrices: covariance, precision, adjacency
"""
return lattice(
self.prng,
n_block_features,
alpha,
random_sign=self.random_sign,
low=self.low,
high=self.high,
) | [
"def",
"prototype_adjacency",
"(",
"self",
",",
"n_block_features",
",",
"alpha",
")",
":",
"return",
"lattice",
"(",
"self",
".",
"prng",
",",
"n_block_features",
",",
"alpha",
",",
"random_sign",
"=",
"self",
".",
"random_sign",
",",
"low",
"=",
"self",
".",
"low",
",",
"high",
"=",
"self",
".",
"high",
",",
")"
] | Build a new graph.
Doc for ".create(n_features, alpha)"
Parameters
-----------
n_features : int
alpha : float (0,1)
The complexity / sparsity factor.
Each graph will have a minimum of
n_blocks * ceil(alpha * n_block_features)
where
n_block_features = floor(n_features / self.n_blocks)
edges and exactly this amount if chain_blocks=False.
Returns
-----------
(n_features, n_features) matrices: covariance, precision, adjacency | [
"Build",
"a",
"new",
"graph",
"."
] | python | train |
bcbnz/python-rofi | rofi.py | https://github.com/bcbnz/python-rofi/blob/d20b3a2ba4ba1b294b002f25a8fb526c5115d0d4/rofi.py#L465-L533 | def generic_entry(self, prompt, validator=None, message=None, rofi_args=None, **kwargs):
"""A generic entry box.
Parameters
----------
prompt: string
Text prompt for the entry.
validator: function, optional
A function to validate and convert the value entered by the user.
It should take one parameter, the string that the user entered, and
return a tuple (value, error). The value should be the users entry
converted to the appropriate Python type, or None if the entry was
invalid. The error message should be a string telling the user what
was wrong, or None if the entry was valid. The prompt will be
re-displayed to the user (along with the error message) until they
enter a valid value. If no validator is given, the text that the
user entered is returned as-is.
message: string
Optional message to display under the entry.
Returns
-------
The value returned by the validator, or None if the dialog was
cancelled.
Examples
--------
Enforce a minimum entry length:
>>> r = Rofi()
>>> validator = lambda s: (s, None) if len(s) > 6 else (None, "Too short")
>>> r.generic_entry('Enter a 7-character or longer string: ', validator)
"""
error = ""
rofi_args = rofi_args or []
# Keep going until we get something valid.
while True:
args = ['rofi', '-dmenu', '-p', prompt, '-format', 's']
# Add any error to the given message.
msg = message or ""
if error:
msg = '<span color="#FF0000" font_weight="bold">{0:s}</span>\n{1:s}'.format(error, msg)
msg = msg.rstrip('\n')
# If there is actually a message to show.
if msg:
args.extend(['-mesg', msg])
# Add in common arguments.
args.extend(self._common_args(**kwargs))
args.extend(rofi_args)
# Run it.
returncode, stdout = self._run_blocking(args, input="")
# Was the dialog cancelled?
if returncode == 1:
return None
# Get rid of the trailing newline and check its validity.
text = stdout.rstrip('\n')
if validator:
value, error = validator(text)
if not error:
return value
else:
return text | [
"def",
"generic_entry",
"(",
"self",
",",
"prompt",
",",
"validator",
"=",
"None",
",",
"message",
"=",
"None",
",",
"rofi_args",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"error",
"=",
"\"\"",
"rofi_args",
"=",
"rofi_args",
"or",
"[",
"]",
"# Keep going until we get something valid.",
"while",
"True",
":",
"args",
"=",
"[",
"'rofi'",
",",
"'-dmenu'",
",",
"'-p'",
",",
"prompt",
",",
"'-format'",
",",
"'s'",
"]",
"# Add any error to the given message.",
"msg",
"=",
"message",
"or",
"\"\"",
"if",
"error",
":",
"msg",
"=",
"'<span color=\"#FF0000\" font_weight=\"bold\">{0:s}</span>\\n{1:s}'",
".",
"format",
"(",
"error",
",",
"msg",
")",
"msg",
"=",
"msg",
".",
"rstrip",
"(",
"'\\n'",
")",
"# If there is actually a message to show.",
"if",
"msg",
":",
"args",
".",
"extend",
"(",
"[",
"'-mesg'",
",",
"msg",
"]",
")",
"# Add in common arguments.",
"args",
".",
"extend",
"(",
"self",
".",
"_common_args",
"(",
"*",
"*",
"kwargs",
")",
")",
"args",
".",
"extend",
"(",
"rofi_args",
")",
"# Run it.",
"returncode",
",",
"stdout",
"=",
"self",
".",
"_run_blocking",
"(",
"args",
",",
"input",
"=",
"\"\"",
")",
"# Was the dialog cancelled?",
"if",
"returncode",
"==",
"1",
":",
"return",
"None",
"# Get rid of the trailing newline and check its validity.",
"text",
"=",
"stdout",
".",
"rstrip",
"(",
"'\\n'",
")",
"if",
"validator",
":",
"value",
",",
"error",
"=",
"validator",
"(",
"text",
")",
"if",
"not",
"error",
":",
"return",
"value",
"else",
":",
"return",
"text"
] | A generic entry box.
Parameters
----------
prompt: string
Text prompt for the entry.
validator: function, optional
A function to validate and convert the value entered by the user.
It should take one parameter, the string that the user entered, and
return a tuple (value, error). The value should be the users entry
converted to the appropriate Python type, or None if the entry was
invalid. The error message should be a string telling the user what
was wrong, or None if the entry was valid. The prompt will be
re-displayed to the user (along with the error message) until they
enter a valid value. If no validator is given, the text that the
user entered is returned as-is.
message: string
Optional message to display under the entry.
Returns
-------
The value returned by the validator, or None if the dialog was
cancelled.
Examples
--------
Enforce a minimum entry length:
>>> r = Rofi()
>>> validator = lambda s: (s, None) if len(s) > 6 else (None, "Too short")
>>> r.generic_entry('Enter a 7-character or longer string: ', validator) | [
"A",
"generic",
"entry",
"box",
"."
] | python | train |
pycontribs/pyrax | pyrax/cloudblockstorage.py | https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/cloudblockstorage.py#L98-L105 | def update(self, display_name=None, display_description=None):
"""
Update the specified values on this snapshot. You may specify one or
more values to update. If no values are specified as non-None, the call
is a no-op; no exception will be raised.
"""
return self.manager.update(self, display_name=display_name,
display_description=display_description) | [
"def",
"update",
"(",
"self",
",",
"display_name",
"=",
"None",
",",
"display_description",
"=",
"None",
")",
":",
"return",
"self",
".",
"manager",
".",
"update",
"(",
"self",
",",
"display_name",
"=",
"display_name",
",",
"display_description",
"=",
"display_description",
")"
] | Update the specified values on this snapshot. You may specify one or
more values to update. If no values are specified as non-None, the call
is a no-op; no exception will be raised. | [
"Update",
"the",
"specified",
"values",
"on",
"this",
"snapshot",
".",
"You",
"may",
"specify",
"one",
"or",
"more",
"values",
"to",
"update",
".",
"If",
"no",
"values",
"are",
"specified",
"as",
"non",
"-",
"None",
"the",
"call",
"is",
"a",
"no",
"-",
"op",
";",
"no",
"exception",
"will",
"be",
"raised",
"."
] | python | train |
delph-in/pydelphin | delphin/tokens.py | https://github.com/delph-in/pydelphin/blob/7bd2cd63ab7cf74803e1d6547b9ebc014b382abd/delphin/tokens.py#L85-L107 | def to_dict(self):
"""
Encode the token as a dictionary suitable for JSON serialization.
"""
d = {
'id': self.id,
'start': self.start,
'end': self.end,
'form': self.form
}
if self.lnk is not None:
cfrom, cto = self.lnk.data
d['from'] = cfrom
d['to'] = cto
# d['paths'] = self.paths
if self.surface is not None:
d['surface'] = self.surface
# d['ipos'] = self.ipos
# d['lrules'] = self.lrules
if self.pos:
d['tags'] = [ps[0] for ps in self.pos]
d['probabilities'] = [ps[1] for ps in self.pos]
return d | [
"def",
"to_dict",
"(",
"self",
")",
":",
"d",
"=",
"{",
"'id'",
":",
"self",
".",
"id",
",",
"'start'",
":",
"self",
".",
"start",
",",
"'end'",
":",
"self",
".",
"end",
",",
"'form'",
":",
"self",
".",
"form",
"}",
"if",
"self",
".",
"lnk",
"is",
"not",
"None",
":",
"cfrom",
",",
"cto",
"=",
"self",
".",
"lnk",
".",
"data",
"d",
"[",
"'from'",
"]",
"=",
"cfrom",
"d",
"[",
"'to'",
"]",
"=",
"cto",
"# d['paths'] = self.paths",
"if",
"self",
".",
"surface",
"is",
"not",
"None",
":",
"d",
"[",
"'surface'",
"]",
"=",
"self",
".",
"surface",
"# d['ipos'] = self.ipos",
"# d['lrules'] = self.lrules",
"if",
"self",
".",
"pos",
":",
"d",
"[",
"'tags'",
"]",
"=",
"[",
"ps",
"[",
"0",
"]",
"for",
"ps",
"in",
"self",
".",
"pos",
"]",
"d",
"[",
"'probabilities'",
"]",
"=",
"[",
"ps",
"[",
"1",
"]",
"for",
"ps",
"in",
"self",
".",
"pos",
"]",
"return",
"d"
] | Encode the token as a dictionary suitable for JSON serialization. | [
"Encode",
"the",
"token",
"as",
"a",
"dictionary",
"suitable",
"for",
"JSON",
"serialization",
"."
] | python | train |
hasgeek/coaster | coaster/views/misc.py | https://github.com/hasgeek/coaster/blob/07f7eb5d5f516e22fa14fdf4dc70e0ae13ee398d/coaster/views/misc.py#L115-L172 | def endpoint_for(url, method=None, return_rule=False, follow_redirects=True):
"""
Given an absolute URL, retrieve the matching endpoint name (or rule) and
view arguments. Requires a current request context to determine runtime
environment.
:param str method: HTTP method to use (defaults to GET)
:param bool return_rule: Return the URL rule instead of the endpoint name
:param bool follow_redirects: Follow redirects to final endpoint
:return: Tuple of endpoint name or URL rule or `None`, view arguments
"""
parsed_url = urlsplit(url)
if not parsed_url.netloc:
# We require an absolute URL
return None, {}
# Take the current runtime environment...
environ = dict(request.environ)
# ...but replace the HTTP host with the URL's host...
environ['HTTP_HOST'] = parsed_url.netloc
# ...and the path with the URL's path (after discounting the app path, if not hosted at root).
environ['PATH_INFO'] = parsed_url.path[len(environ.get('SCRIPT_NAME', '')):]
# Create a new request with this environment...
url_request = current_app.request_class(environ)
# ...and a URL adapter with the new request.
url_adapter = current_app.create_url_adapter(url_request)
# Run three hostname tests, one of which must pass:
# 1. Does the URL map have host matching enabled? If so, the URL adapter will validate the hostname.
if current_app.url_map.host_matching:
pass
# 2. If not, does the domain match? url_adapter.server_name will prefer app.config['SERVER_NAME'],
# but if that is not specified, it will take it from the environment.
elif parsed_url.netloc == url_adapter.server_name:
pass
# 3. If subdomain matching is enabled, does the subdomain match?
elif current_app.subdomain_matching and parsed_url.netloc.endswith('.' + url_adapter.server_name):
pass
# If no test passed, we don't have a matching endpoint.
else:
return None, {}
# Now retrieve the endpoint or rule, watching for redirects or resolution failures
try:
return url_adapter.match(parsed_url.path, method, return_rule=return_rule)
except RequestRedirect as r:
# A redirect typically implies `/folder` -> `/folder/`
# This will not be a redirect response from a view, since the view isn't being called
if follow_redirects:
return endpoint_for(r.new_url, method=method, return_rule=return_rule, follow_redirects=follow_redirects)
except (NotFound, MethodNotAllowed):
pass
# If we got here, no endpoint was found.
return None, {} | [
"def",
"endpoint_for",
"(",
"url",
",",
"method",
"=",
"None",
",",
"return_rule",
"=",
"False",
",",
"follow_redirects",
"=",
"True",
")",
":",
"parsed_url",
"=",
"urlsplit",
"(",
"url",
")",
"if",
"not",
"parsed_url",
".",
"netloc",
":",
"# We require an absolute URL",
"return",
"None",
",",
"{",
"}",
"# Take the current runtime environment...",
"environ",
"=",
"dict",
"(",
"request",
".",
"environ",
")",
"# ...but replace the HTTP host with the URL's host...",
"environ",
"[",
"'HTTP_HOST'",
"]",
"=",
"parsed_url",
".",
"netloc",
"# ...and the path with the URL's path (after discounting the app path, if not hosted at root).",
"environ",
"[",
"'PATH_INFO'",
"]",
"=",
"parsed_url",
".",
"path",
"[",
"len",
"(",
"environ",
".",
"get",
"(",
"'SCRIPT_NAME'",
",",
"''",
")",
")",
":",
"]",
"# Create a new request with this environment...",
"url_request",
"=",
"current_app",
".",
"request_class",
"(",
"environ",
")",
"# ...and a URL adapter with the new request.",
"url_adapter",
"=",
"current_app",
".",
"create_url_adapter",
"(",
"url_request",
")",
"# Run three hostname tests, one of which must pass:",
"# 1. Does the URL map have host matching enabled? If so, the URL adapter will validate the hostname.",
"if",
"current_app",
".",
"url_map",
".",
"host_matching",
":",
"pass",
"# 2. If not, does the domain match? url_adapter.server_name will prefer app.config['SERVER_NAME'],",
"# but if that is not specified, it will take it from the environment.",
"elif",
"parsed_url",
".",
"netloc",
"==",
"url_adapter",
".",
"server_name",
":",
"pass",
"# 3. If subdomain matching is enabled, does the subdomain match?",
"elif",
"current_app",
".",
"subdomain_matching",
"and",
"parsed_url",
".",
"netloc",
".",
"endswith",
"(",
"'.'",
"+",
"url_adapter",
".",
"server_name",
")",
":",
"pass",
"# If no test passed, we don't have a matching endpoint.",
"else",
":",
"return",
"None",
",",
"{",
"}",
"# Now retrieve the endpoint or rule, watching for redirects or resolution failures",
"try",
":",
"return",
"url_adapter",
".",
"match",
"(",
"parsed_url",
".",
"path",
",",
"method",
",",
"return_rule",
"=",
"return_rule",
")",
"except",
"RequestRedirect",
"as",
"r",
":",
"# A redirect typically implies `/folder` -> `/folder/`",
"# This will not be a redirect response from a view, since the view isn't being called",
"if",
"follow_redirects",
":",
"return",
"endpoint_for",
"(",
"r",
".",
"new_url",
",",
"method",
"=",
"method",
",",
"return_rule",
"=",
"return_rule",
",",
"follow_redirects",
"=",
"follow_redirects",
")",
"except",
"(",
"NotFound",
",",
"MethodNotAllowed",
")",
":",
"pass",
"# If we got here, no endpoint was found.",
"return",
"None",
",",
"{",
"}"
] | Given an absolute URL, retrieve the matching endpoint name (or rule) and
view arguments. Requires a current request context to determine runtime
environment.
:param str method: HTTP method to use (defaults to GET)
:param bool return_rule: Return the URL rule instead of the endpoint name
:param bool follow_redirects: Follow redirects to final endpoint
:return: Tuple of endpoint name or URL rule or `None`, view arguments | [
"Given",
"an",
"absolute",
"URL",
"retrieve",
"the",
"matching",
"endpoint",
"name",
"(",
"or",
"rule",
")",
"and",
"view",
"arguments",
".",
"Requires",
"a",
"current",
"request",
"context",
"to",
"determine",
"runtime",
"environment",
"."
] | python | train |
zhmcclient/python-zhmcclient | zhmcclient/_cpc.py | https://github.com/zhmcclient/python-zhmcclient/blob/9657563e5d9184c51d3c903442a58b9725fdf335/zhmcclient/_cpc.py#L1204-L1266 | def list_associated_storage_groups(
self, full_properties=False, filter_args=None):
"""
Return the :term:`storage groups <storage group>` that are associated
to this CPC.
If the CPC does not support the "dpm-storage-management" feature, or
does not have it enabled, an empty list is returned.
Storage groups for which the authenticated user does not have
object-access permission are not included.
Authorization requirements:
* Object-access permission to any storage groups to be included in the
result.
Parameters:
full_properties (bool):
Controls that the full set of resource properties for each returned
storage group is being retrieved, vs. only the following short set:
"object-uri", "cpc-uri", "name", "fulfillment-state", and
"type".
filter_args (dict):
Filter arguments that narrow the list of returned resources to
those that match the specified filter arguments. For details, see
:ref:`Filtering`.
`None` causes no filtering to happen.
The 'cpc-uri' property is automatically added to the filter
arguments and must not be specified in this parameter.
Returns:
: A list of :class:`~zhmcclient.StorageGroup` objects.
Raises:
ValueError: The filter_args parameter specifies the 'cpc-uri'
property
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
if filter_args is None:
filter_args = {}
else:
filter_args = filter_args.copy()
if 'cpc-uri' in filter_args:
raise ValueError(
"The filter_args parameter specifies the 'cpc-uri' property "
"with value: %s" % filter_args['cpc-uri'])
filter_args['cpc-uri'] = self.uri
sg_list = self.manager.console.storage_groups.list(
full_properties, filter_args)
return sg_list | [
"def",
"list_associated_storage_groups",
"(",
"self",
",",
"full_properties",
"=",
"False",
",",
"filter_args",
"=",
"None",
")",
":",
"if",
"filter_args",
"is",
"None",
":",
"filter_args",
"=",
"{",
"}",
"else",
":",
"filter_args",
"=",
"filter_args",
".",
"copy",
"(",
")",
"if",
"'cpc-uri'",
"in",
"filter_args",
":",
"raise",
"ValueError",
"(",
"\"The filter_args parameter specifies the 'cpc-uri' property \"",
"\"with value: %s\"",
"%",
"filter_args",
"[",
"'cpc-uri'",
"]",
")",
"filter_args",
"[",
"'cpc-uri'",
"]",
"=",
"self",
".",
"uri",
"sg_list",
"=",
"self",
".",
"manager",
".",
"console",
".",
"storage_groups",
".",
"list",
"(",
"full_properties",
",",
"filter_args",
")",
"return",
"sg_list"
] | Return the :term:`storage groups <storage group>` that are associated
to this CPC.
If the CPC does not support the "dpm-storage-management" feature, or
does not have it enabled, an empty list is returned.
Storage groups for which the authenticated user does not have
object-access permission are not included.
Authorization requirements:
* Object-access permission to any storage groups to be included in the
result.
Parameters:
full_properties (bool):
Controls that the full set of resource properties for each returned
storage group is being retrieved, vs. only the following short set:
"object-uri", "cpc-uri", "name", "fulfillment-state", and
"type".
filter_args (dict):
Filter arguments that narrow the list of returned resources to
those that match the specified filter arguments. For details, see
:ref:`Filtering`.
`None` causes no filtering to happen.
The 'cpc-uri' property is automatically added to the filter
arguments and must not be specified in this parameter.
Returns:
: A list of :class:`~zhmcclient.StorageGroup` objects.
Raises:
ValueError: The filter_args parameter specifies the 'cpc-uri'
property
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError` | [
"Return",
"the",
":",
"term",
":",
"storage",
"groups",
"<storage",
"group",
">",
"that",
"are",
"associated",
"to",
"this",
"CPC",
"."
] | python | train |
attilaolah/diffbot.py | diffbot.py | https://github.com/attilaolah/diffbot.py/blob/b66d68a36a22c944297c0575413db23687029af4/diffbot.py#L33-L46 | def _get(url, params=None):
"""HTTP GET request."""
try:
response = requests.get(url, params=params)
response.raise_for_status()
# If JSON fails, return raw data
# (e.g. when downloading CSV job logs).
try:
return response.json()
except ValueError:
return response.text
except NameError:
url = '{0}?{1}'.format(url, urllib.urlencode(params))
return json.loads(urllib2.urlopen(url).read().decode(ENCODING)) | [
"def",
"_get",
"(",
"url",
",",
"params",
"=",
"None",
")",
":",
"try",
":",
"response",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"params",
"=",
"params",
")",
"response",
".",
"raise_for_status",
"(",
")",
"# If JSON fails, return raw data",
"# (e.g. when downloading CSV job logs).",
"try",
":",
"return",
"response",
".",
"json",
"(",
")",
"except",
"ValueError",
":",
"return",
"response",
".",
"text",
"except",
"NameError",
":",
"url",
"=",
"'{0}?{1}'",
".",
"format",
"(",
"url",
",",
"urllib",
".",
"urlencode",
"(",
"params",
")",
")",
"return",
"json",
".",
"loads",
"(",
"urllib2",
".",
"urlopen",
"(",
"url",
")",
".",
"read",
"(",
")",
".",
"decode",
"(",
"ENCODING",
")",
")"
] | HTTP GET request. | [
"HTTP",
"GET",
"request",
"."
] | python | train |
agile-geoscience/welly | welly/utils.py | https://github.com/agile-geoscience/welly/blob/ed4c991011d6290938fef365553041026ba29f42/welly/utils.py#L326-L370 | def moving_average(a, length, mode='valid'):
"""
From ``bruges``
Computes the mean in a moving window. Naive implementation.
Example:
>>> test = np.array([1,9,9,9,9,9,9,2,3,9,2,2,3,1,1,1,1,3,4,9,9,9,8,3])
>>> moving_average(test, 7, mode='same')
[ 4.42857143, 5.57142857, 6.71428571, 7.85714286, 8. ,
7.14285714, 7.14285714, 6.14285714, 5.14285714, 4.28571429,
3.14285714, 3. , 2.71428571, 1.57142857, 1.71428571,
2. , 2.85714286, 4. , 5.14285714, 6.14285714,
6.42857143, 6.42857143, 6.28571429, 5.42857143]
TODO:
Other types of average.
"""
pad = np.floor(length/2)
if mode == 'full':
pad *= 2
pad = int(pad)
# Make a padded version, paddding with first and last values
r = np.zeros(a.shape[0] + 2*pad)
r[:pad] = a[0]
r[pad:-pad] = a
r[-pad:] = a[-1]
# Cumsum with shifting trick
s = np.cumsum(r, dtype=float)
s[length:] = s[length:] - s[:-length]
out = s[length-1:]/length
# Decide what to return
if mode == 'same':
if out.shape[0] != a.shape[0]:
# If size doesn't match, then interpolate.
out = (out[:-1, ...] + out[1:, ...]) / 2
return out
elif mode == 'valid':
return out[pad:-pad]
else: # mode=='full' and we used a double pad
return out | [
"def",
"moving_average",
"(",
"a",
",",
"length",
",",
"mode",
"=",
"'valid'",
")",
":",
"pad",
"=",
"np",
".",
"floor",
"(",
"length",
"/",
"2",
")",
"if",
"mode",
"==",
"'full'",
":",
"pad",
"*=",
"2",
"pad",
"=",
"int",
"(",
"pad",
")",
"# Make a padded version, paddding with first and last values",
"r",
"=",
"np",
".",
"zeros",
"(",
"a",
".",
"shape",
"[",
"0",
"]",
"+",
"2",
"*",
"pad",
")",
"r",
"[",
":",
"pad",
"]",
"=",
"a",
"[",
"0",
"]",
"r",
"[",
"pad",
":",
"-",
"pad",
"]",
"=",
"a",
"r",
"[",
"-",
"pad",
":",
"]",
"=",
"a",
"[",
"-",
"1",
"]",
"# Cumsum with shifting trick",
"s",
"=",
"np",
".",
"cumsum",
"(",
"r",
",",
"dtype",
"=",
"float",
")",
"s",
"[",
"length",
":",
"]",
"=",
"s",
"[",
"length",
":",
"]",
"-",
"s",
"[",
":",
"-",
"length",
"]",
"out",
"=",
"s",
"[",
"length",
"-",
"1",
":",
"]",
"/",
"length",
"# Decide what to return",
"if",
"mode",
"==",
"'same'",
":",
"if",
"out",
".",
"shape",
"[",
"0",
"]",
"!=",
"a",
".",
"shape",
"[",
"0",
"]",
":",
"# If size doesn't match, then interpolate.",
"out",
"=",
"(",
"out",
"[",
":",
"-",
"1",
",",
"...",
"]",
"+",
"out",
"[",
"1",
":",
",",
"...",
"]",
")",
"/",
"2",
"return",
"out",
"elif",
"mode",
"==",
"'valid'",
":",
"return",
"out",
"[",
"pad",
":",
"-",
"pad",
"]",
"else",
":",
"# mode=='full' and we used a double pad",
"return",
"out"
] | From ``bruges``
Computes the mean in a moving window. Naive implementation.
Example:
>>> test = np.array([1,9,9,9,9,9,9,2,3,9,2,2,3,1,1,1,1,3,4,9,9,9,8,3])
>>> moving_average(test, 7, mode='same')
[ 4.42857143, 5.57142857, 6.71428571, 7.85714286, 8. ,
7.14285714, 7.14285714, 6.14285714, 5.14285714, 4.28571429,
3.14285714, 3. , 2.71428571, 1.57142857, 1.71428571,
2. , 2.85714286, 4. , 5.14285714, 6.14285714,
6.42857143, 6.42857143, 6.28571429, 5.42857143]
TODO:
Other types of average. | [
"From",
"bruges"
] | python | train |
empirical-org/Quill-NLP-Tools-and-Datasets | utils/qfragment/examples/porcupine/app.py | https://github.com/empirical-org/Quill-NLP-Tools-and-Datasets/blob/f2ff579ddf3a556d9cdc47c5f702422fa06863d9/utils/qfragment/examples/porcupine/app.py#L49-L56 | def list_submissions():
"""List the past submissions with information about them"""
submissions = []
try:
submissions = session.query(Submission).all()
except SQLAlchemyError as e:
session.rollback()
return render_template('list_submissions.html', submissions=submissions) | [
"def",
"list_submissions",
"(",
")",
":",
"submissions",
"=",
"[",
"]",
"try",
":",
"submissions",
"=",
"session",
".",
"query",
"(",
"Submission",
")",
".",
"all",
"(",
")",
"except",
"SQLAlchemyError",
"as",
"e",
":",
"session",
".",
"rollback",
"(",
")",
"return",
"render_template",
"(",
"'list_submissions.html'",
",",
"submissions",
"=",
"submissions",
")"
] | List the past submissions with information about them | [
"List",
"the",
"past",
"submissions",
"with",
"information",
"about",
"them"
] | python | train |
CalebBell/ht | ht/conv_internal.py | https://github.com/CalebBell/ht/blob/3097ef9524c4cf0068ad453c17b10ec9ce551eee/ht/conv_internal.py#L634-L677 | def turbulent_Petukhov_Kirillov_Popov(Re=None, Pr=None, fd=None):
r'''Calculates internal convection Nusselt number for turbulent flows
in pipe according to [2]_ and [3]_ as in [1]_.
.. math::
Nu = \frac{(f/8)RePr}{C+12.7(f/8)^{1/2}(Pr^{2/3}-1)}\\
C = 1.07 + 900/Re - [0.63/(1+10Pr)]
Parameters
----------
Re : float
Reynolds number, [-]
Pr : float
Prandtl number, [-]
fd : float
Darcy friction factor [-]
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
Range according to [1]_ is 0.5 < Pr ≤ 10^6 and 4000 ≤ Re ≤ 5*10^6
Examples
--------
>>> turbulent_Petukhov_Kirillov_Popov(Re=1E5, Pr=1.2, fd=0.0185)
250.11935088905105
References
----------
.. [1] Rohsenow, Warren and James Hartnett and Young Cho. Handbook of Heat
Transfer, 3E. New York: McGraw-Hill, 1998.
.. [2] B. S. Petukhov, and V. V. Kirillov, "The Problem of Heat Exchange
in the Turbulent Flow of Liquids in Tubes," (Russian) Teploenergetika,
(4): 63-68, 1958
.. [3] B. S. Petukhov and V. N. Popov, "Theoretical Calculation of Heat
Exchange in Turbulent Flow in Tubes of an Incompressible Fluidwith
Variable Physical Properties," High Temp., (111): 69-83, 1963.
'''
C = 1.07 + 900./Re - (0.63/(1. + 10.*Pr))
return (fd/8.)*Re*Pr/(C + 12.7*(fd/8.)**0.5*(Pr**(2/3.) - 1.)) | [
"def",
"turbulent_Petukhov_Kirillov_Popov",
"(",
"Re",
"=",
"None",
",",
"Pr",
"=",
"None",
",",
"fd",
"=",
"None",
")",
":",
"C",
"=",
"1.07",
"+",
"900.",
"/",
"Re",
"-",
"(",
"0.63",
"/",
"(",
"1.",
"+",
"10.",
"*",
"Pr",
")",
")",
"return",
"(",
"fd",
"/",
"8.",
")",
"*",
"Re",
"*",
"Pr",
"/",
"(",
"C",
"+",
"12.7",
"*",
"(",
"fd",
"/",
"8.",
")",
"**",
"0.5",
"*",
"(",
"Pr",
"**",
"(",
"2",
"/",
"3.",
")",
"-",
"1.",
")",
")"
] | r'''Calculates internal convection Nusselt number for turbulent flows
in pipe according to [2]_ and [3]_ as in [1]_.
.. math::
Nu = \frac{(f/8)RePr}{C+12.7(f/8)^{1/2}(Pr^{2/3}-1)}\\
C = 1.07 + 900/Re - [0.63/(1+10Pr)]
Parameters
----------
Re : float
Reynolds number, [-]
Pr : float
Prandtl number, [-]
fd : float
Darcy friction factor [-]
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
Range according to [1]_ is 0.5 < Pr ≤ 10^6 and 4000 ≤ Re ≤ 5*10^6
Examples
--------
>>> turbulent_Petukhov_Kirillov_Popov(Re=1E5, Pr=1.2, fd=0.0185)
250.11935088905105
References
----------
.. [1] Rohsenow, Warren and James Hartnett and Young Cho. Handbook of Heat
Transfer, 3E. New York: McGraw-Hill, 1998.
.. [2] B. S. Petukhov, and V. V. Kirillov, "The Problem of Heat Exchange
in the Turbulent Flow of Liquids in Tubes," (Russian) Teploenergetika,
(4): 63-68, 1958
.. [3] B. S. Petukhov and V. N. Popov, "Theoretical Calculation of Heat
Exchange in Turbulent Flow in Tubes of an Incompressible Fluidwith
Variable Physical Properties," High Temp., (111): 69-83, 1963. | [
"r",
"Calculates",
"internal",
"convection",
"Nusselt",
"number",
"for",
"turbulent",
"flows",
"in",
"pipe",
"according",
"to",
"[",
"2",
"]",
"_",
"and",
"[",
"3",
"]",
"_",
"as",
"in",
"[",
"1",
"]",
"_",
"."
] | python | train |
yyuu/botornado | boto/mturk/connection.py | https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/mturk/connection.py#L515-L523 | def notify_workers(self, worker_ids, subject, message_text):
"""
Send a text message to workers.
"""
params = {'Subject' : subject,
'MessageText': message_text}
self.build_list_params(params, worker_ids, 'WorkerId')
return self._process_request('NotifyWorkers', params) | [
"def",
"notify_workers",
"(",
"self",
",",
"worker_ids",
",",
"subject",
",",
"message_text",
")",
":",
"params",
"=",
"{",
"'Subject'",
":",
"subject",
",",
"'MessageText'",
":",
"message_text",
"}",
"self",
".",
"build_list_params",
"(",
"params",
",",
"worker_ids",
",",
"'WorkerId'",
")",
"return",
"self",
".",
"_process_request",
"(",
"'NotifyWorkers'",
",",
"params",
")"
] | Send a text message to workers. | [
"Send",
"a",
"text",
"message",
"to",
"workers",
"."
] | python | train |
PythonSanSebastian/docstamp | docstamp/qrcode.py | https://github.com/PythonSanSebastian/docstamp/blob/b43808f2e15351b0b2f0b7eade9c7ef319c9e646/docstamp/qrcode.py#L44-L58 | def _qrcode_to_file(qrcode, out_filepath):
""" Save a `qrcode` object into `out_filepath`.
Parameters
----------
qrcode: qrcode object
out_filepath: str
Path to the output file.
"""
try:
qrcode.save(out_filepath)
except Exception as exc:
raise IOError('Error trying to save QR code file {}.'.format(out_filepath)) from exc
else:
return qrcode | [
"def",
"_qrcode_to_file",
"(",
"qrcode",
",",
"out_filepath",
")",
":",
"try",
":",
"qrcode",
".",
"save",
"(",
"out_filepath",
")",
"except",
"Exception",
"as",
"exc",
":",
"raise",
"IOError",
"(",
"'Error trying to save QR code file {}.'",
".",
"format",
"(",
"out_filepath",
")",
")",
"from",
"exc",
"else",
":",
"return",
"qrcode"
] | Save a `qrcode` object into `out_filepath`.
Parameters
----------
qrcode: qrcode object
out_filepath: str
Path to the output file. | [
"Save",
"a",
"qrcode",
"object",
"into",
"out_filepath",
".",
"Parameters",
"----------",
"qrcode",
":",
"qrcode",
"object"
] | python | test |
maxzheng/localconfig | localconfig/manager.py | https://github.com/maxzheng/localconfig/blob/636087f2489295d9dae2693dda8a86e4daa4ff9d/localconfig/manager.py#L398-L414 | def _set_comment(self, section, comment, key=None):
"""
Set a comment for section or key
:param str section: Section to add comment to
:param str comment: Comment to add
:param str key: Key to add comment to
"""
if '\n' in comment:
comment = '\n# '.join(comment.split('\n'))
comment = '# ' + comment
if key:
self._comments[(section, key)] = comment
else:
self._comments[section] = comment | [
"def",
"_set_comment",
"(",
"self",
",",
"section",
",",
"comment",
",",
"key",
"=",
"None",
")",
":",
"if",
"'\\n'",
"in",
"comment",
":",
"comment",
"=",
"'\\n# '",
".",
"join",
"(",
"comment",
".",
"split",
"(",
"'\\n'",
")",
")",
"comment",
"=",
"'# '",
"+",
"comment",
"if",
"key",
":",
"self",
".",
"_comments",
"[",
"(",
"section",
",",
"key",
")",
"]",
"=",
"comment",
"else",
":",
"self",
".",
"_comments",
"[",
"section",
"]",
"=",
"comment"
] | Set a comment for section or key
:param str section: Section to add comment to
:param str comment: Comment to add
:param str key: Key to add comment to | [
"Set",
"a",
"comment",
"for",
"section",
"or",
"key"
] | python | train |
astroML/gatspy | gatspy/periodic/template_modeler.py | https://github.com/astroML/gatspy/blob/a8f94082a3f27dfe9cb58165707b883bf28d9223/gatspy/periodic/template_modeler.py#L92-L111 | def _chi2(self, theta, period, tmpid, return_gradient=False):
"""
Compute the chi2 for the given parameters, period, & template
Optionally return the gradient for faster optimization
"""
template = self.templates[tmpid]
phase = (self.t / period - theta[2]) % 1
model = theta[0] + theta[1] * template(phase)
chi2 = (((model - self.y) / self.dy) ** 2).sum()
if return_gradient:
grad = 2 * (model - self.y) / self.dy ** 2
gradient = np.array([np.sum(grad),
np.sum(grad * template(phase)),
-np.sum(grad * theta[1]
* template.derivative(1)(phase))])
return chi2, gradient
else:
return chi2 | [
"def",
"_chi2",
"(",
"self",
",",
"theta",
",",
"period",
",",
"tmpid",
",",
"return_gradient",
"=",
"False",
")",
":",
"template",
"=",
"self",
".",
"templates",
"[",
"tmpid",
"]",
"phase",
"=",
"(",
"self",
".",
"t",
"/",
"period",
"-",
"theta",
"[",
"2",
"]",
")",
"%",
"1",
"model",
"=",
"theta",
"[",
"0",
"]",
"+",
"theta",
"[",
"1",
"]",
"*",
"template",
"(",
"phase",
")",
"chi2",
"=",
"(",
"(",
"(",
"model",
"-",
"self",
".",
"y",
")",
"/",
"self",
".",
"dy",
")",
"**",
"2",
")",
".",
"sum",
"(",
")",
"if",
"return_gradient",
":",
"grad",
"=",
"2",
"*",
"(",
"model",
"-",
"self",
".",
"y",
")",
"/",
"self",
".",
"dy",
"**",
"2",
"gradient",
"=",
"np",
".",
"array",
"(",
"[",
"np",
".",
"sum",
"(",
"grad",
")",
",",
"np",
".",
"sum",
"(",
"grad",
"*",
"template",
"(",
"phase",
")",
")",
",",
"-",
"np",
".",
"sum",
"(",
"grad",
"*",
"theta",
"[",
"1",
"]",
"*",
"template",
".",
"derivative",
"(",
"1",
")",
"(",
"phase",
")",
")",
"]",
")",
"return",
"chi2",
",",
"gradient",
"else",
":",
"return",
"chi2"
] | Compute the chi2 for the given parameters, period, & template
Optionally return the gradient for faster optimization | [
"Compute",
"the",
"chi2",
"for",
"the",
"given",
"parameters",
"period",
"&",
"template"
] | python | train |
ultrabug/py3status | py3status/core.py | https://github.com/ultrabug/py3status/blob/4c105f1b44f7384ca4f7da5f821a47e468c7dee2/py3status/core.py#L455-L468 | def get_user_configured_modules(self):
"""
Get a dict of all available and configured py3status modules
in the user's i3status.conf.
"""
user_modules = {}
if not self.py3_modules:
return user_modules
for module_name, module_info in self.get_user_modules().items():
for module in self.py3_modules:
if module_name == module.split(" ")[0]:
include_path, f_name = module_info
user_modules[module_name] = (include_path, f_name)
return user_modules | [
"def",
"get_user_configured_modules",
"(",
"self",
")",
":",
"user_modules",
"=",
"{",
"}",
"if",
"not",
"self",
".",
"py3_modules",
":",
"return",
"user_modules",
"for",
"module_name",
",",
"module_info",
"in",
"self",
".",
"get_user_modules",
"(",
")",
".",
"items",
"(",
")",
":",
"for",
"module",
"in",
"self",
".",
"py3_modules",
":",
"if",
"module_name",
"==",
"module",
".",
"split",
"(",
"\" \"",
")",
"[",
"0",
"]",
":",
"include_path",
",",
"f_name",
"=",
"module_info",
"user_modules",
"[",
"module_name",
"]",
"=",
"(",
"include_path",
",",
"f_name",
")",
"return",
"user_modules"
] | Get a dict of all available and configured py3status modules
in the user's i3status.conf. | [
"Get",
"a",
"dict",
"of",
"all",
"available",
"and",
"configured",
"py3status",
"modules",
"in",
"the",
"user",
"s",
"i3status",
".",
"conf",
"."
] | python | train |
xflows/rdm | rdm/db/converters.py | https://github.com/xflows/rdm/blob/d984e2a0297e5fa8d799953bbd0dba79b05d403d/rdm/db/converters.py#L304-L314 | def target_Orange_table(self):
'''
Returns the target table as an Orange example table.
:rtype: orange.ExampleTable
'''
table, cls_att = self.db.target_table, self.db.target_att
if not self.db.orng_tables:
return self.convert_table(table, cls_att=cls_att)
else:
return self.db.orng_tables[table] | [
"def",
"target_Orange_table",
"(",
"self",
")",
":",
"table",
",",
"cls_att",
"=",
"self",
".",
"db",
".",
"target_table",
",",
"self",
".",
"db",
".",
"target_att",
"if",
"not",
"self",
".",
"db",
".",
"orng_tables",
":",
"return",
"self",
".",
"convert_table",
"(",
"table",
",",
"cls_att",
"=",
"cls_att",
")",
"else",
":",
"return",
"self",
".",
"db",
".",
"orng_tables",
"[",
"table",
"]"
] | Returns the target table as an Orange example table.
:rtype: orange.ExampleTable | [
"Returns",
"the",
"target",
"table",
"as",
"an",
"Orange",
"example",
"table",
"."
] | python | train |
quantopian/empyrical | empyrical/stats.py | https://github.com/quantopian/empyrical/blob/badbdca75f5b293f28b5e947974894de041d6868/empyrical/stats.py#L937-L960 | def _to_pandas(ob):
"""Convert an array-like to a pandas object.
Parameters
----------
ob : array-like
The object to convert.
Returns
-------
pandas_structure : pd.Series or pd.DataFrame
The correct structure based on the dimensionality of the data.
"""
if isinstance(ob, (pd.Series, pd.DataFrame)):
return ob
if ob.ndim == 1:
return pd.Series(ob)
elif ob.ndim == 2:
return pd.DataFrame(ob)
else:
raise ValueError(
'cannot convert array of dim > 2 to a pandas structure',
) | [
"def",
"_to_pandas",
"(",
"ob",
")",
":",
"if",
"isinstance",
"(",
"ob",
",",
"(",
"pd",
".",
"Series",
",",
"pd",
".",
"DataFrame",
")",
")",
":",
"return",
"ob",
"if",
"ob",
".",
"ndim",
"==",
"1",
":",
"return",
"pd",
".",
"Series",
"(",
"ob",
")",
"elif",
"ob",
".",
"ndim",
"==",
"2",
":",
"return",
"pd",
".",
"DataFrame",
"(",
"ob",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'cannot convert array of dim > 2 to a pandas structure'",
",",
")"
] | Convert an array-like to a pandas object.
Parameters
----------
ob : array-like
The object to convert.
Returns
-------
pandas_structure : pd.Series or pd.DataFrame
The correct structure based on the dimensionality of the data. | [
"Convert",
"an",
"array",
"-",
"like",
"to",
"a",
"pandas",
"object",
"."
] | python | train |
AkihikoITOH/capybara | capybara/virtualenv/lib/python2.7/site-packages/lxml/html/__init__.py | https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/lxml/html/__init__.py#L918-L953 | def submit_form(form, extra_values=None, open_http=None):
"""
Helper function to submit a form. Returns a file-like object, as from
``urllib.urlopen()``. This object also has a ``.geturl()`` function,
which shows the URL if there were any redirects.
You can use this like::
form = doc.forms[0]
form.inputs['foo'].value = 'bar' # etc
response = form.submit()
doc = parse(response)
doc.make_links_absolute(response.geturl())
To change the HTTP requester, pass a function as ``open_http`` keyword
argument that opens the URL for you. The function must have the following
signature::
open_http(method, URL, values)
The action is one of 'GET' or 'POST', the URL is the target URL as a
string, and the values are a sequence of ``(name, value)`` tuples with the
form data.
"""
values = form.form_values()
if extra_values:
if hasattr(extra_values, 'items'):
extra_values = extra_values.items()
values.extend(extra_values)
if open_http is None:
open_http = open_http_urllib
if form.action:
url = form.action
else:
url = form.base_url
return open_http(form.method, url, values) | [
"def",
"submit_form",
"(",
"form",
",",
"extra_values",
"=",
"None",
",",
"open_http",
"=",
"None",
")",
":",
"values",
"=",
"form",
".",
"form_values",
"(",
")",
"if",
"extra_values",
":",
"if",
"hasattr",
"(",
"extra_values",
",",
"'items'",
")",
":",
"extra_values",
"=",
"extra_values",
".",
"items",
"(",
")",
"values",
".",
"extend",
"(",
"extra_values",
")",
"if",
"open_http",
"is",
"None",
":",
"open_http",
"=",
"open_http_urllib",
"if",
"form",
".",
"action",
":",
"url",
"=",
"form",
".",
"action",
"else",
":",
"url",
"=",
"form",
".",
"base_url",
"return",
"open_http",
"(",
"form",
".",
"method",
",",
"url",
",",
"values",
")"
] | Helper function to submit a form. Returns a file-like object, as from
``urllib.urlopen()``. This object also has a ``.geturl()`` function,
which shows the URL if there were any redirects.
You can use this like::
form = doc.forms[0]
form.inputs['foo'].value = 'bar' # etc
response = form.submit()
doc = parse(response)
doc.make_links_absolute(response.geturl())
To change the HTTP requester, pass a function as ``open_http`` keyword
argument that opens the URL for you. The function must have the following
signature::
open_http(method, URL, values)
The action is one of 'GET' or 'POST', the URL is the target URL as a
string, and the values are a sequence of ``(name, value)`` tuples with the
form data. | [
"Helper",
"function",
"to",
"submit",
"a",
"form",
".",
"Returns",
"a",
"file",
"-",
"like",
"object",
"as",
"from",
"urllib",
".",
"urlopen",
"()",
".",
"This",
"object",
"also",
"has",
"a",
".",
"geturl",
"()",
"function",
"which",
"shows",
"the",
"URL",
"if",
"there",
"were",
"any",
"redirects",
"."
] | python | test |
twilio/twilio-python | twilio/rest/messaging/v1/service/alpha_sender.py | https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/messaging/v1/service/alpha_sender.py#L303-L317 | def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: AlphaSenderContext for this AlphaSenderInstance
:rtype: twilio.rest.messaging.v1.service.alpha_sender.AlphaSenderContext
"""
if self._context is None:
self._context = AlphaSenderContext(
self._version,
service_sid=self._solution['service_sid'],
sid=self._solution['sid'],
)
return self._context | [
"def",
"_proxy",
"(",
"self",
")",
":",
"if",
"self",
".",
"_context",
"is",
"None",
":",
"self",
".",
"_context",
"=",
"AlphaSenderContext",
"(",
"self",
".",
"_version",
",",
"service_sid",
"=",
"self",
".",
"_solution",
"[",
"'service_sid'",
"]",
",",
"sid",
"=",
"self",
".",
"_solution",
"[",
"'sid'",
"]",
",",
")",
"return",
"self",
".",
"_context"
] | Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: AlphaSenderContext for this AlphaSenderInstance
:rtype: twilio.rest.messaging.v1.service.alpha_sender.AlphaSenderContext | [
"Generate",
"an",
"instance",
"context",
"for",
"the",
"instance",
"the",
"context",
"is",
"capable",
"of",
"performing",
"various",
"actions",
".",
"All",
"instance",
"actions",
"are",
"proxied",
"to",
"the",
"context"
] | python | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py#L674-L684 | def _AddFieldPaths(node, prefix, field_mask):
"""Adds the field paths descended from node to field_mask."""
if not node:
field_mask.paths.append(prefix)
return
for name in sorted(node):
if prefix:
child_path = prefix + '.' + name
else:
child_path = name
_AddFieldPaths(node[name], child_path, field_mask) | [
"def",
"_AddFieldPaths",
"(",
"node",
",",
"prefix",
",",
"field_mask",
")",
":",
"if",
"not",
"node",
":",
"field_mask",
".",
"paths",
".",
"append",
"(",
"prefix",
")",
"return",
"for",
"name",
"in",
"sorted",
"(",
"node",
")",
":",
"if",
"prefix",
":",
"child_path",
"=",
"prefix",
"+",
"'.'",
"+",
"name",
"else",
":",
"child_path",
"=",
"name",
"_AddFieldPaths",
"(",
"node",
"[",
"name",
"]",
",",
"child_path",
",",
"field_mask",
")"
] | Adds the field paths descended from node to field_mask. | [
"Adds",
"the",
"field",
"paths",
"descended",
"from",
"node",
"to",
"field_mask",
"."
] | python | train |
AguaClara/aguaclara | aguaclara/design/cdc.py | https://github.com/AguaClara/aguaclara/blob/8dd4e734768b166a7fc2b60388a24df2f93783fc/aguaclara/design/cdc.py#L33-L43 | def viscosity_kinematic_alum(conc_alum, temp):
"""Return the dynamic viscosity of water at a given temperature.
If given units, the function will automatically convert to Kelvin.
If not given units, the function will assume Kelvin.
This function assumes that the temperature dependence can be explained
based on the effect on water and that there is no confounding effect from
the coagulant.
"""
nu = (1 + (4.255 * 10**-6) * conc_alum**2.289) * pc.viscosity_kinematic(temp).magnitude
return nu | [
"def",
"viscosity_kinematic_alum",
"(",
"conc_alum",
",",
"temp",
")",
":",
"nu",
"=",
"(",
"1",
"+",
"(",
"4.255",
"*",
"10",
"**",
"-",
"6",
")",
"*",
"conc_alum",
"**",
"2.289",
")",
"*",
"pc",
".",
"viscosity_kinematic",
"(",
"temp",
")",
".",
"magnitude",
"return",
"nu"
] | Return the dynamic viscosity of water at a given temperature.
If given units, the function will automatically convert to Kelvin.
If not given units, the function will assume Kelvin.
This function assumes that the temperature dependence can be explained
based on the effect on water and that there is no confounding effect from
the coagulant. | [
"Return",
"the",
"dynamic",
"viscosity",
"of",
"water",
"at",
"a",
"given",
"temperature",
"."
] | python | train |
ajvb/webpype | webpype/client.py | https://github.com/ajvb/webpype/blob/cc9ed84c81b98a83925630b417ddb67b7567b677/webpype/client.py#L82-L100 | def execute_from_file(self, url, file_var):
'''
Identical to WebPypeClient.execute(), except this function accepts a
file path or file type instead of a dictionary.
'''
if isinstance(file_var, file):
f = file_var
elif isinstance(file_var, str):
try:
f = open(file_var)
except IOError, e:
raise e
else:
raise TypeError("This function only accepts a 'file' type or file path")
inputs = json.loads(f.read())
resp = self.execute(url, inputs)
return resp | [
"def",
"execute_from_file",
"(",
"self",
",",
"url",
",",
"file_var",
")",
":",
"if",
"isinstance",
"(",
"file_var",
",",
"file",
")",
":",
"f",
"=",
"file_var",
"elif",
"isinstance",
"(",
"file_var",
",",
"str",
")",
":",
"try",
":",
"f",
"=",
"open",
"(",
"file_var",
")",
"except",
"IOError",
",",
"e",
":",
"raise",
"e",
"else",
":",
"raise",
"TypeError",
"(",
"\"This function only accepts a 'file' type or file path\"",
")",
"inputs",
"=",
"json",
".",
"loads",
"(",
"f",
".",
"read",
"(",
")",
")",
"resp",
"=",
"self",
".",
"execute",
"(",
"url",
",",
"inputs",
")",
"return",
"resp"
] | Identical to WebPypeClient.execute(), except this function accepts a
file path or file type instead of a dictionary. | [
"Identical",
"to",
"WebPypeClient",
".",
"execute",
"()",
"except",
"this",
"function",
"accepts",
"a",
"file",
"path",
"or",
"file",
"type",
"instead",
"of",
"a",
"dictionary",
"."
] | python | train |
dturanski/springcloudstream | springcloudstream/tcp/tcp.py | https://github.com/dturanski/springcloudstream/blob/208b542f9eba82e97882d52703af8e965a62a980/springcloudstream/tcp/tcp.py#L97-L112 | def create_handler(cls, message_handler, buffer_size, logger):
"""
Class variables used here since the framework creates an instance for each connection
:param message_handler: the MessageHandler used to process each message.
:param buffer_size: the TCP buffer size.
:param logger: the global logger.
:return: this class.
"""
cls.BUFFER_SIZE = buffer_size
cls.message_handler = message_handler
cls.logger = logger
cls.message_handler.logger = logging.getLogger(message_handler.__class__.__name__)
cls.message_handler.logger.setLevel(logger.level)
return cls | [
"def",
"create_handler",
"(",
"cls",
",",
"message_handler",
",",
"buffer_size",
",",
"logger",
")",
":",
"cls",
".",
"BUFFER_SIZE",
"=",
"buffer_size",
"cls",
".",
"message_handler",
"=",
"message_handler",
"cls",
".",
"logger",
"=",
"logger",
"cls",
".",
"message_handler",
".",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"message_handler",
".",
"__class__",
".",
"__name__",
")",
"cls",
".",
"message_handler",
".",
"logger",
".",
"setLevel",
"(",
"logger",
".",
"level",
")",
"return",
"cls"
] | Class variables used here since the framework creates an instance for each connection
:param message_handler: the MessageHandler used to process each message.
:param buffer_size: the TCP buffer size.
:param logger: the global logger.
:return: this class. | [
"Class",
"variables",
"used",
"here",
"since",
"the",
"framework",
"creates",
"an",
"instance",
"for",
"each",
"connection"
] | python | train |