repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
sequence | docstring
stringlengths 1
46.9k
| docstring_tokens
sequence | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
googleapis/google-cloud-python | bigquery/google/cloud/bigquery/client.py | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/client.py#L293-L309 | def dataset(self, dataset_id, project=None):
"""Construct a reference to a dataset.
:type dataset_id: str
:param dataset_id: ID of the dataset.
:type project: str
:param project: (Optional) project ID for the dataset (defaults to
the project of the client).
:rtype: :class:`google.cloud.bigquery.dataset.DatasetReference`
:returns: a new ``DatasetReference`` instance
"""
if project is None:
project = self.project
return DatasetReference(project, dataset_id) | [
"def",
"dataset",
"(",
"self",
",",
"dataset_id",
",",
"project",
"=",
"None",
")",
":",
"if",
"project",
"is",
"None",
":",
"project",
"=",
"self",
".",
"project",
"return",
"DatasetReference",
"(",
"project",
",",
"dataset_id",
")"
] | Construct a reference to a dataset.
:type dataset_id: str
:param dataset_id: ID of the dataset.
:type project: str
:param project: (Optional) project ID for the dataset (defaults to
the project of the client).
:rtype: :class:`google.cloud.bigquery.dataset.DatasetReference`
:returns: a new ``DatasetReference`` instance | [
"Construct",
"a",
"reference",
"to",
"a",
"dataset",
"."
] | python | train |
honzamach/pydgets | pydgets/widgets.py | https://github.com/honzamach/pydgets/blob/5ca4ce19fc2d9b5f41441fb9163810f8ca502e79/pydgets/widgets.py#L1294-L1308 | def _render_content(self, content, **settings):
"""
Perform widget rendering, but do not print anything.
"""
bar_len = int(settings[self.SETTING_BAR_WIDTH])
if not bar_len:
bar_len = TERMINAL_WIDTH - 10
percent = content
progress = ""
progress += str(settings[self.SETTING_BAR_CHAR]) * int(bar_len * percent)
s = {k: settings[k] for k in (self.SETTING_FLAG_PLAIN,)}
s.update(settings[self.SETTING_BAR_FORMATING])
progress = self.fmt_text(progress, **s)
progress += ' ' * int(bar_len - int(bar_len * percent))
return "{:6.2f}% [{:s}]".format(percent * 100, progress) | [
"def",
"_render_content",
"(",
"self",
",",
"content",
",",
"*",
"*",
"settings",
")",
":",
"bar_len",
"=",
"int",
"(",
"settings",
"[",
"self",
".",
"SETTING_BAR_WIDTH",
"]",
")",
"if",
"not",
"bar_len",
":",
"bar_len",
"=",
"TERMINAL_WIDTH",
"-",
"10",
"percent",
"=",
"content",
"progress",
"=",
"\"\"",
"progress",
"+=",
"str",
"(",
"settings",
"[",
"self",
".",
"SETTING_BAR_CHAR",
"]",
")",
"*",
"int",
"(",
"bar_len",
"*",
"percent",
")",
"s",
"=",
"{",
"k",
":",
"settings",
"[",
"k",
"]",
"for",
"k",
"in",
"(",
"self",
".",
"SETTING_FLAG_PLAIN",
",",
")",
"}",
"s",
".",
"update",
"(",
"settings",
"[",
"self",
".",
"SETTING_BAR_FORMATING",
"]",
")",
"progress",
"=",
"self",
".",
"fmt_text",
"(",
"progress",
",",
"*",
"*",
"s",
")",
"progress",
"+=",
"' '",
"*",
"int",
"(",
"bar_len",
"-",
"int",
"(",
"bar_len",
"*",
"percent",
")",
")",
"return",
"\"{:6.2f}% [{:s}]\"",
".",
"format",
"(",
"percent",
"*",
"100",
",",
"progress",
")"
] | Perform widget rendering, but do not print anything. | [
"Perform",
"widget",
"rendering",
"but",
"do",
"not",
"print",
"anything",
"."
] | python | train |
alvations/pywsd | pywsd/utils.py | https://github.com/alvations/pywsd/blob/4c12394c8adbcfed71dd912bdbef2e36370821bf/pywsd/utils.py#L29-L34 | def remove_tags(text: str) -> str:
""" Removes <tags> in angled brackets from text. """
tags = {i:" " for i in re.findall("(<[^>\n]*>)",text.strip())}
no_tag_text = reduce(lambda x, kv:x.replace(*kv), tags.iteritems(), text)
return " ".join(no_tag_text.split()) | [
"def",
"remove_tags",
"(",
"text",
":",
"str",
")",
"->",
"str",
":",
"tags",
"=",
"{",
"i",
":",
"\" \"",
"for",
"i",
"in",
"re",
".",
"findall",
"(",
"\"(<[^>\\n]*>)\"",
",",
"text",
".",
"strip",
"(",
")",
")",
"}",
"no_tag_text",
"=",
"reduce",
"(",
"lambda",
"x",
",",
"kv",
":",
"x",
".",
"replace",
"(",
"*",
"kv",
")",
",",
"tags",
".",
"iteritems",
"(",
")",
",",
"text",
")",
"return",
"\" \"",
".",
"join",
"(",
"no_tag_text",
".",
"split",
"(",
")",
")"
] | Removes <tags> in angled brackets from text. | [
"Removes",
"<tags",
">",
"in",
"angled",
"brackets",
"from",
"text",
"."
] | python | train |
frictionlessdata/tableschema-pandas-py | tableschema_pandas/mapper.py | https://github.com/frictionlessdata/tableschema-pandas-py/blob/ef941dbc12f5d346e9612f8fec1b4b356b8493ca/tableschema_pandas/mapper.py#L156-L176 | def restore_row(self, row, schema, pk):
"""Restore row from Pandas
"""
result = []
for field in schema.fields:
if schema.primary_key and schema.primary_key[0] == field.name:
if field.type == 'number' and np.isnan(pk):
pk = None
if pk and field.type == 'integer':
pk = int(pk)
result.append(field.cast_value(pk))
else:
value = row[field.name]
if field.type == 'number' and np.isnan(value):
value = None
if value and field.type == 'integer':
value = int(value)
elif field.type == 'datetime':
value = value.to_pydatetime()
result.append(field.cast_value(value))
return result | [
"def",
"restore_row",
"(",
"self",
",",
"row",
",",
"schema",
",",
"pk",
")",
":",
"result",
"=",
"[",
"]",
"for",
"field",
"in",
"schema",
".",
"fields",
":",
"if",
"schema",
".",
"primary_key",
"and",
"schema",
".",
"primary_key",
"[",
"0",
"]",
"==",
"field",
".",
"name",
":",
"if",
"field",
".",
"type",
"==",
"'number'",
"and",
"np",
".",
"isnan",
"(",
"pk",
")",
":",
"pk",
"=",
"None",
"if",
"pk",
"and",
"field",
".",
"type",
"==",
"'integer'",
":",
"pk",
"=",
"int",
"(",
"pk",
")",
"result",
".",
"append",
"(",
"field",
".",
"cast_value",
"(",
"pk",
")",
")",
"else",
":",
"value",
"=",
"row",
"[",
"field",
".",
"name",
"]",
"if",
"field",
".",
"type",
"==",
"'number'",
"and",
"np",
".",
"isnan",
"(",
"value",
")",
":",
"value",
"=",
"None",
"if",
"value",
"and",
"field",
".",
"type",
"==",
"'integer'",
":",
"value",
"=",
"int",
"(",
"value",
")",
"elif",
"field",
".",
"type",
"==",
"'datetime'",
":",
"value",
"=",
"value",
".",
"to_pydatetime",
"(",
")",
"result",
".",
"append",
"(",
"field",
".",
"cast_value",
"(",
"value",
")",
")",
"return",
"result"
] | Restore row from Pandas | [
"Restore",
"row",
"from",
"Pandas"
] | python | train |
klen/muffin-debugtoolbar | muffin_debugtoolbar/tbtools/tbtools.py | https://github.com/klen/muffin-debugtoolbar/blob/b650b35fbe2035888f6bba5dac3073ef01c94dc6/muffin_debugtoolbar/tbtools/tbtools.py#L231-L256 | def render_full(self, request, lodgeit_url=None):
"""Render the Full HTML page with the traceback info."""
app = request.app
root_path = request.app.ps.debugtoolbar.cfg.prefix
exc = escape(self.exception)
summary = self.render_summary(include_title=False, request=request)
token = request.app['debugtoolbar']['pdbt_token']
vars = {
'evalex': app.ps.debugtoolbar.cfg.intercept_exc == 'debug' and 'true' or 'false',
'console': 'console',
'lodgeit_url': lodgeit_url and escape(lodgeit_url) or '',
'title': exc,
'exception': exc,
'exception_type': escape(self.exception_type),
'summary': summary,
'plaintext': self.plaintext,
'plaintext_cs': re.sub('-{2,}', '-', self.plaintext),
'traceback_id': self.id,
'static_path': root_path + 'static/',
'token': token,
'root_path': root_path,
'url': root_path + 'exception?token=%s&tb=%s' % (token, self.id),
}
template = app.ps.jinja2.env.get_template('debugtoolbar/exception.html')
return template.render(app=app, request=request, **vars) | [
"def",
"render_full",
"(",
"self",
",",
"request",
",",
"lodgeit_url",
"=",
"None",
")",
":",
"app",
"=",
"request",
".",
"app",
"root_path",
"=",
"request",
".",
"app",
".",
"ps",
".",
"debugtoolbar",
".",
"cfg",
".",
"prefix",
"exc",
"=",
"escape",
"(",
"self",
".",
"exception",
")",
"summary",
"=",
"self",
".",
"render_summary",
"(",
"include_title",
"=",
"False",
",",
"request",
"=",
"request",
")",
"token",
"=",
"request",
".",
"app",
"[",
"'debugtoolbar'",
"]",
"[",
"'pdbt_token'",
"]",
"vars",
"=",
"{",
"'evalex'",
":",
"app",
".",
"ps",
".",
"debugtoolbar",
".",
"cfg",
".",
"intercept_exc",
"==",
"'debug'",
"and",
"'true'",
"or",
"'false'",
",",
"'console'",
":",
"'console'",
",",
"'lodgeit_url'",
":",
"lodgeit_url",
"and",
"escape",
"(",
"lodgeit_url",
")",
"or",
"''",
",",
"'title'",
":",
"exc",
",",
"'exception'",
":",
"exc",
",",
"'exception_type'",
":",
"escape",
"(",
"self",
".",
"exception_type",
")",
",",
"'summary'",
":",
"summary",
",",
"'plaintext'",
":",
"self",
".",
"plaintext",
",",
"'plaintext_cs'",
":",
"re",
".",
"sub",
"(",
"'-{2,}'",
",",
"'-'",
",",
"self",
".",
"plaintext",
")",
",",
"'traceback_id'",
":",
"self",
".",
"id",
",",
"'static_path'",
":",
"root_path",
"+",
"'static/'",
",",
"'token'",
":",
"token",
",",
"'root_path'",
":",
"root_path",
",",
"'url'",
":",
"root_path",
"+",
"'exception?token=%s&tb=%s'",
"%",
"(",
"token",
",",
"self",
".",
"id",
")",
",",
"}",
"template",
"=",
"app",
".",
"ps",
".",
"jinja2",
".",
"env",
".",
"get_template",
"(",
"'debugtoolbar/exception.html'",
")",
"return",
"template",
".",
"render",
"(",
"app",
"=",
"app",
",",
"request",
"=",
"request",
",",
"*",
"*",
"vars",
")"
] | Render the Full HTML page with the traceback info. | [
"Render",
"the",
"Full",
"HTML",
"page",
"with",
"the",
"traceback",
"info",
"."
] | python | train |
pypa/pipenv | pipenv/vendor/click/globals.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/click/globals.py#L39-L48 | def resolve_color_default(color=None):
""""Internal helper to get the default value of the color flag. If a
value is passed it's returned unchanged, otherwise it's looked up from
the current context.
"""
if color is not None:
return color
ctx = get_current_context(silent=True)
if ctx is not None:
return ctx.color | [
"def",
"resolve_color_default",
"(",
"color",
"=",
"None",
")",
":",
"if",
"color",
"is",
"not",
"None",
":",
"return",
"color",
"ctx",
"=",
"get_current_context",
"(",
"silent",
"=",
"True",
")",
"if",
"ctx",
"is",
"not",
"None",
":",
"return",
"ctx",
".",
"color"
] | Internal helper to get the default value of the color flag. If a
value is passed it's returned unchanged, otherwise it's looked up from
the current context. | [
"Internal",
"helper",
"to",
"get",
"the",
"default",
"value",
"of",
"the",
"color",
"flag",
".",
"If",
"a",
"value",
"is",
"passed",
"it",
"s",
"returned",
"unchanged",
"otherwise",
"it",
"s",
"looked",
"up",
"from",
"the",
"current",
"context",
"."
] | python | train |
watson-developer-cloud/python-sdk | ibm_watson/visual_recognition_v3.py | https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/visual_recognition_v3.py#L789-L809 | def _from_dict(cls, _dict):
"""Initialize a ClassifiedImage object from a json dictionary."""
args = {}
if 'source_url' in _dict:
args['source_url'] = _dict.get('source_url')
if 'resolved_url' in _dict:
args['resolved_url'] = _dict.get('resolved_url')
if 'image' in _dict:
args['image'] = _dict.get('image')
if 'error' in _dict:
args['error'] = ErrorInfo._from_dict(_dict.get('error'))
if 'classifiers' in _dict:
args['classifiers'] = [
ClassifierResult._from_dict(x)
for x in (_dict.get('classifiers'))
]
else:
raise ValueError(
'Required property \'classifiers\' not present in ClassifiedImage JSON'
)
return cls(**args) | [
"def",
"_from_dict",
"(",
"cls",
",",
"_dict",
")",
":",
"args",
"=",
"{",
"}",
"if",
"'source_url'",
"in",
"_dict",
":",
"args",
"[",
"'source_url'",
"]",
"=",
"_dict",
".",
"get",
"(",
"'source_url'",
")",
"if",
"'resolved_url'",
"in",
"_dict",
":",
"args",
"[",
"'resolved_url'",
"]",
"=",
"_dict",
".",
"get",
"(",
"'resolved_url'",
")",
"if",
"'image'",
"in",
"_dict",
":",
"args",
"[",
"'image'",
"]",
"=",
"_dict",
".",
"get",
"(",
"'image'",
")",
"if",
"'error'",
"in",
"_dict",
":",
"args",
"[",
"'error'",
"]",
"=",
"ErrorInfo",
".",
"_from_dict",
"(",
"_dict",
".",
"get",
"(",
"'error'",
")",
")",
"if",
"'classifiers'",
"in",
"_dict",
":",
"args",
"[",
"'classifiers'",
"]",
"=",
"[",
"ClassifierResult",
".",
"_from_dict",
"(",
"x",
")",
"for",
"x",
"in",
"(",
"_dict",
".",
"get",
"(",
"'classifiers'",
")",
")",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"'Required property \\'classifiers\\' not present in ClassifiedImage JSON'",
")",
"return",
"cls",
"(",
"*",
"*",
"args",
")"
] | Initialize a ClassifiedImage object from a json dictionary. | [
"Initialize",
"a",
"ClassifiedImage",
"object",
"from",
"a",
"json",
"dictionary",
"."
] | python | train |
PrefPy/prefpy | prefpy/mechanismMcmc.py | https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanismMcmc.py#L140-L171 | def getRankingBruteForce(self, profile):
"""
Returns a list that orders all candidates from best to worst when we use brute force to
compute Bayesian utilities for an election profile. This function assumes that
getCandScoresMapBruteForce(profile) is implemented for the child Mechanism class. Note that
the returned list gives no indication of ties between candidates.
:ivar Profile profile: A Profile object that represents an election profile.
"""
# We generate a map that associates each score with the candidates that have that score.
candScoresMapBruteForce = self.getCandScoresMapBruteForce(profile)
reverseCandScoresMap = dict()
for key, value in candScoresMapBruteForce.items():
if value not in reverseCandScoresMap.keys():
reverseCandScoresMap[value] = [key]
else:
reverseCandScoresMap[value].append(key)
# We sort the scores by either decreasing order or increasing order.
if self.maximizeCandScore == True:
sortedCandScores = sorted(reverseCandScoresMap.keys(), reverse=True)
else:
sortedCandScores = sorted(reverseCandScoresMap.keys())
# We put the candidates into our ranking based on the order in which their score appears
ranking = []
for candScore in sortedCandScores:
for cand in reverseCandScoresMap[candScore]:
ranking.append(cand)
return ranking | [
"def",
"getRankingBruteForce",
"(",
"self",
",",
"profile",
")",
":",
"# We generate a map that associates each score with the candidates that have that score.",
"candScoresMapBruteForce",
"=",
"self",
".",
"getCandScoresMapBruteForce",
"(",
"profile",
")",
"reverseCandScoresMap",
"=",
"dict",
"(",
")",
"for",
"key",
",",
"value",
"in",
"candScoresMapBruteForce",
".",
"items",
"(",
")",
":",
"if",
"value",
"not",
"in",
"reverseCandScoresMap",
".",
"keys",
"(",
")",
":",
"reverseCandScoresMap",
"[",
"value",
"]",
"=",
"[",
"key",
"]",
"else",
":",
"reverseCandScoresMap",
"[",
"value",
"]",
".",
"append",
"(",
"key",
")",
"# We sort the scores by either decreasing order or increasing order.",
"if",
"self",
".",
"maximizeCandScore",
"==",
"True",
":",
"sortedCandScores",
"=",
"sorted",
"(",
"reverseCandScoresMap",
".",
"keys",
"(",
")",
",",
"reverse",
"=",
"True",
")",
"else",
":",
"sortedCandScores",
"=",
"sorted",
"(",
"reverseCandScoresMap",
".",
"keys",
"(",
")",
")",
"# We put the candidates into our ranking based on the order in which their score appears",
"ranking",
"=",
"[",
"]",
"for",
"candScore",
"in",
"sortedCandScores",
":",
"for",
"cand",
"in",
"reverseCandScoresMap",
"[",
"candScore",
"]",
":",
"ranking",
".",
"append",
"(",
"cand",
")",
"return",
"ranking"
] | Returns a list that orders all candidates from best to worst when we use brute force to
compute Bayesian utilities for an election profile. This function assumes that
getCandScoresMapBruteForce(profile) is implemented for the child Mechanism class. Note that
the returned list gives no indication of ties between candidates.
:ivar Profile profile: A Profile object that represents an election profile. | [
"Returns",
"a",
"list",
"that",
"orders",
"all",
"candidates",
"from",
"best",
"to",
"worst",
"when",
"we",
"use",
"brute",
"force",
"to",
"compute",
"Bayesian",
"utilities",
"for",
"an",
"election",
"profile",
".",
"This",
"function",
"assumes",
"that",
"getCandScoresMapBruteForce",
"(",
"profile",
")",
"is",
"implemented",
"for",
"the",
"child",
"Mechanism",
"class",
".",
"Note",
"that",
"the",
"returned",
"list",
"gives",
"no",
"indication",
"of",
"ties",
"between",
"candidates",
".",
":",
"ivar",
"Profile",
"profile",
":",
"A",
"Profile",
"object",
"that",
"represents",
"an",
"election",
"profile",
"."
] | python | train |
ministryofjustice/money-to-prisoners-common | mtp_common/build_tasks/tasks.py | https://github.com/ministryofjustice/money-to-prisoners-common/blob/33c43a2912cb990d9148da7c8718f480f07d90a1/mtp_common/build_tasks/tasks.py#L131-L139 | def python_dependencies(context: Context, common_path=None):
"""
Updates python dependencies
"""
context.pip_command('install', '-r', context.requirements_file)
if common_path:
context.pip_command('uninstall', '--yes', 'money-to-prisoners-common')
context.pip_command('install', '--force-reinstall', '-e', common_path)
context.shell('rm', '-rf', 'webpack.config.js') | [
"def",
"python_dependencies",
"(",
"context",
":",
"Context",
",",
"common_path",
"=",
"None",
")",
":",
"context",
".",
"pip_command",
"(",
"'install'",
",",
"'-r'",
",",
"context",
".",
"requirements_file",
")",
"if",
"common_path",
":",
"context",
".",
"pip_command",
"(",
"'uninstall'",
",",
"'--yes'",
",",
"'money-to-prisoners-common'",
")",
"context",
".",
"pip_command",
"(",
"'install'",
",",
"'--force-reinstall'",
",",
"'-e'",
",",
"common_path",
")",
"context",
".",
"shell",
"(",
"'rm'",
",",
"'-rf'",
",",
"'webpack.config.js'",
")"
] | Updates python dependencies | [
"Updates",
"python",
"dependencies"
] | python | train |
flo-compbio/xlmhg | xlmhg/result.py | https://github.com/flo-compbio/xlmhg/blob/8e5929ee1dc91b95e343b7a2b1b1d6664c4540a1/xlmhg/result.py#L181-L188 | def escore(self):
"""(property) Returns the E-score associated with the result."""
hg_pval_thresh = self.escore_pval_thresh or self.pval
escore_tol = self.escore_tol or mhg_cython.get_default_tol()
es = mhg_cython.get_xlmhg_escore(
self.indices, self.N, self.K, self.X, self.L,
hg_pval_thresh, escore_tol)
return es | [
"def",
"escore",
"(",
"self",
")",
":",
"hg_pval_thresh",
"=",
"self",
".",
"escore_pval_thresh",
"or",
"self",
".",
"pval",
"escore_tol",
"=",
"self",
".",
"escore_tol",
"or",
"mhg_cython",
".",
"get_default_tol",
"(",
")",
"es",
"=",
"mhg_cython",
".",
"get_xlmhg_escore",
"(",
"self",
".",
"indices",
",",
"self",
".",
"N",
",",
"self",
".",
"K",
",",
"self",
".",
"X",
",",
"self",
".",
"L",
",",
"hg_pval_thresh",
",",
"escore_tol",
")",
"return",
"es"
] | (property) Returns the E-score associated with the result. | [
"(",
"property",
")",
"Returns",
"the",
"E",
"-",
"score",
"associated",
"with",
"the",
"result",
"."
] | python | train |
KelSolaar/Foundations | foundations/exceptions.py | https://github.com/KelSolaar/Foundations/blob/5c141330faf09dad70a12bc321f4c564917d0a91/foundations/exceptions.py#L171-L196 | def extract_locals(trcback):
"""
Extracts the frames locals of given traceback.
:param trcback: Traceback.
:type trcback: Traceback
:return: Frames locals.
:rtype: list
"""
output = []
stack = extract_stack(get_inner_most_frame(trcback))
for frame, file_name, line_number, name, context, index in stack:
args_names, nameless, keyword = extract_arguments(frame)
arguments, nameless_args, keyword_args, locals = OrderedDict(), [], {}, {}
for key, data in frame.f_locals.iteritems():
if key == nameless:
nameless_args = map(repr, frame.f_locals.get(nameless, ()))
elif key == keyword:
keyword_args = dict((arg, repr(value)) for arg, value in frame.f_locals.get(keyword, {}).iteritems())
elif key in args_names:
arguments[key] = repr(data)
else:
locals[key] = repr(data)
output.append(((name, file_name, line_number), (arguments, nameless_args, keyword_args, locals)))
return output | [
"def",
"extract_locals",
"(",
"trcback",
")",
":",
"output",
"=",
"[",
"]",
"stack",
"=",
"extract_stack",
"(",
"get_inner_most_frame",
"(",
"trcback",
")",
")",
"for",
"frame",
",",
"file_name",
",",
"line_number",
",",
"name",
",",
"context",
",",
"index",
"in",
"stack",
":",
"args_names",
",",
"nameless",
",",
"keyword",
"=",
"extract_arguments",
"(",
"frame",
")",
"arguments",
",",
"nameless_args",
",",
"keyword_args",
",",
"locals",
"=",
"OrderedDict",
"(",
")",
",",
"[",
"]",
",",
"{",
"}",
",",
"{",
"}",
"for",
"key",
",",
"data",
"in",
"frame",
".",
"f_locals",
".",
"iteritems",
"(",
")",
":",
"if",
"key",
"==",
"nameless",
":",
"nameless_args",
"=",
"map",
"(",
"repr",
",",
"frame",
".",
"f_locals",
".",
"get",
"(",
"nameless",
",",
"(",
")",
")",
")",
"elif",
"key",
"==",
"keyword",
":",
"keyword_args",
"=",
"dict",
"(",
"(",
"arg",
",",
"repr",
"(",
"value",
")",
")",
"for",
"arg",
",",
"value",
"in",
"frame",
".",
"f_locals",
".",
"get",
"(",
"keyword",
",",
"{",
"}",
")",
".",
"iteritems",
"(",
")",
")",
"elif",
"key",
"in",
"args_names",
":",
"arguments",
"[",
"key",
"]",
"=",
"repr",
"(",
"data",
")",
"else",
":",
"locals",
"[",
"key",
"]",
"=",
"repr",
"(",
"data",
")",
"output",
".",
"append",
"(",
"(",
"(",
"name",
",",
"file_name",
",",
"line_number",
")",
",",
"(",
"arguments",
",",
"nameless_args",
",",
"keyword_args",
",",
"locals",
")",
")",
")",
"return",
"output"
] | Extracts the frames locals of given traceback.
:param trcback: Traceback.
:type trcback: Traceback
:return: Frames locals.
:rtype: list | [
"Extracts",
"the",
"frames",
"locals",
"of",
"given",
"traceback",
"."
] | python | train |
Nachtfeuer/pipeline | spline/tools/report/generator.py | https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/report/generator.py#L26-L45 | def generate_html(store):
"""
Generating HTML report.
Args:
store (Store): report data.
Returns:
str: rendered HTML template.
"""
spline = {
'version': VERSION,
'url': 'https://github.com/Nachtfeuer/pipeline',
'generated': datetime.now().strftime("%A, %d. %B %Y - %I:%M:%S %p")
}
html_template_file = os.path.join(os.path.dirname(__file__), 'templates/report.html.j2')
with open(html_template_file) as handle:
html_template = handle.read()
return render(html_template, spline=spline, store=store) | [
"def",
"generate_html",
"(",
"store",
")",
":",
"spline",
"=",
"{",
"'version'",
":",
"VERSION",
",",
"'url'",
":",
"'https://github.com/Nachtfeuer/pipeline'",
",",
"'generated'",
":",
"datetime",
".",
"now",
"(",
")",
".",
"strftime",
"(",
"\"%A, %d. %B %Y - %I:%M:%S %p\"",
")",
"}",
"html_template_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
",",
"'templates/report.html.j2'",
")",
"with",
"open",
"(",
"html_template_file",
")",
"as",
"handle",
":",
"html_template",
"=",
"handle",
".",
"read",
"(",
")",
"return",
"render",
"(",
"html_template",
",",
"spline",
"=",
"spline",
",",
"store",
"=",
"store",
")"
] | Generating HTML report.
Args:
store (Store): report data.
Returns:
str: rendered HTML template. | [
"Generating",
"HTML",
"report",
"."
] | python | train |
rackerlabs/simpl | simpl/rest.py | https://github.com/rackerlabs/simpl/blob/60ed3336a931cd6a7a7246e60f26165d9dc7c99c/simpl/rest.py#L169-L175 | def validate_range_values(request, label, kwargs):
"""Ensure value contained in label is a positive integer."""
value = kwargs.get(label, request.query.get(label))
if value:
kwargs[label] = int(value)
if kwargs[label] < 0 or kwargs[label] > MAX_PAGE_SIZE:
raise ValueError | [
"def",
"validate_range_values",
"(",
"request",
",",
"label",
",",
"kwargs",
")",
":",
"value",
"=",
"kwargs",
".",
"get",
"(",
"label",
",",
"request",
".",
"query",
".",
"get",
"(",
"label",
")",
")",
"if",
"value",
":",
"kwargs",
"[",
"label",
"]",
"=",
"int",
"(",
"value",
")",
"if",
"kwargs",
"[",
"label",
"]",
"<",
"0",
"or",
"kwargs",
"[",
"label",
"]",
">",
"MAX_PAGE_SIZE",
":",
"raise",
"ValueError"
] | Ensure value contained in label is a positive integer. | [
"Ensure",
"value",
"contained",
"in",
"label",
"is",
"a",
"positive",
"integer",
"."
] | python | train |
portantier/habu | habu/cli/cmd_hasher.py | https://github.com/portantier/habu/blob/87091e389dc6332fe1b82830c22b2eefc55816f2/habu/cli/cmd_hasher.py#L11-L43 | def cmd_hasher(f, algorithm):
"""Compute various hashes for the input data, that can be a file or a stream.
Example:
\b
$ habu.hasher README.rst
md5 992a833cd162047daaa6a236b8ac15ae README.rst
ripemd160 0566f9141e65e57cae93e0e3b70d1d8c2ccb0623 README.rst
sha1 d7dbfd2c5e2828eb22f776550c826e4166526253 README.rst
sha256 6bb22d927e1b6307ced616821a1877b6cc35e... README.rst
sha512 8743f3eb12a11cf3edcc16e400fb14d599b4a... README.rst
whirlpool 96bcc083242e796992c0f3462f330811f9e8c... README.rst
You can also specify which algorithm to use. In such case, the output is
only the value of the calculated hash:
\b
$ habu.hasher -a md5 README.rst
992a833cd162047daaa6a236b8ac15ae README.rst
"""
data = f.read()
if not data:
print("Empty file or string!")
return 1
if algorithm:
print(hasher(data, algorithm)[algorithm], f.name)
else:
for algo, result in hasher(data).items():
print("{:<12} {} {}".format(algo, result, f.name)) | [
"def",
"cmd_hasher",
"(",
"f",
",",
"algorithm",
")",
":",
"data",
"=",
"f",
".",
"read",
"(",
")",
"if",
"not",
"data",
":",
"print",
"(",
"\"Empty file or string!\"",
")",
"return",
"1",
"if",
"algorithm",
":",
"print",
"(",
"hasher",
"(",
"data",
",",
"algorithm",
")",
"[",
"algorithm",
"]",
",",
"f",
".",
"name",
")",
"else",
":",
"for",
"algo",
",",
"result",
"in",
"hasher",
"(",
"data",
")",
".",
"items",
"(",
")",
":",
"print",
"(",
"\"{:<12} {} {}\"",
".",
"format",
"(",
"algo",
",",
"result",
",",
"f",
".",
"name",
")",
")"
] | Compute various hashes for the input data, that can be a file or a stream.
Example:
\b
$ habu.hasher README.rst
md5 992a833cd162047daaa6a236b8ac15ae README.rst
ripemd160 0566f9141e65e57cae93e0e3b70d1d8c2ccb0623 README.rst
sha1 d7dbfd2c5e2828eb22f776550c826e4166526253 README.rst
sha256 6bb22d927e1b6307ced616821a1877b6cc35e... README.rst
sha512 8743f3eb12a11cf3edcc16e400fb14d599b4a... README.rst
whirlpool 96bcc083242e796992c0f3462f330811f9e8c... README.rst
You can also specify which algorithm to use. In such case, the output is
only the value of the calculated hash:
\b
$ habu.hasher -a md5 README.rst
992a833cd162047daaa6a236b8ac15ae README.rst | [
"Compute",
"various",
"hashes",
"for",
"the",
"input",
"data",
"that",
"can",
"be",
"a",
"file",
"or",
"a",
"stream",
"."
] | python | train |
DLR-RM/RAFCON | source/rafcon/gui/controllers/graphical_editor_gaphas.py | https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/controllers/graphical_editor_gaphas.py#L380-L627 | def state_machine_change_after(self, model, prop_name, info):
"""Called on any change within th state machine
This method is called, when any state, transition, data flow, etc. within the state machine changes. This
then typically requires a redraw of the graphical editor, to display these changes immediately.
:param rafcon.gui.models.state_machine.StateMachineModel model: The state machine model
:param str prop_name: The property that was changed
:param dict info: Information about the change
"""
if 'method_name' in info and info['method_name'] == 'root_state_change':
method_name, model, result, arguments, instance = self._extract_info_data(info['kwargs'])
if self.model.ongoing_complex_actions:
return
# The method causing the change raised an exception, thus nothing was changed
if (isinstance(result, string_types) and "CRASH" in result) or isinstance(result, Exception):
return
# avoid to remove views of elements of states which parent state is destroyed recursively
if 'remove' in method_name:
# for remove the model is always a state and in case of remove_state it is the container_state
# that performs the operation therefore if is_about_to_be_destroyed_recursively is False
# the child state can be removed and for True ignored because its parent will create a notification
if model.is_about_to_be_destroyed_recursively:
return
# only react to the notification if the model is a model, which has to be drawn
# if it is a model inside a library state, this is eventually not the case
if isinstance(model, AbstractStateModel):
library_root_state = model.state.get_next_upper_library_root_state()
if library_root_state:
parent_library_root_state_m = self.model.get_state_model_by_path(library_root_state.get_path())
if not parent_library_root_state_m.parent.show_content():
return
if method_name == 'state_execution_status':
state_v = self.canvas.get_view_for_model(model)
if state_v: # Children of LibraryStates are not modeled, yet
self.canvas.request_update(state_v, matrix=False)
elif method_name == 'add_state':
new_state = arguments[1]
new_state_m = model.states[new_state.state_id]
self.add_state_view_with_meta_data_for_model(new_state_m, model)
if not self.perform_drag_and_drop:
self.canvas.wait_for_update()
elif method_name == 'remove_state':
state_v = self.canvas.get_view_for_core_element(result)
if state_v:
parent_v = self.canvas.get_parent(state_v)
state_v.remove()
if parent_v:
self.canvas.request_update(parent_v)
self.canvas.wait_for_update()
# ----------------------------------
# TRANSITIONS
# ----------------------------------
elif method_name == 'add_transition':
transitions_models = model.transitions
transition_id = result
for transition_m in transitions_models:
if transition_m.transition.transition_id == transition_id:
self.add_transition_view_for_model(transition_m, model)
self.canvas.wait_for_update()
break
elif method_name == 'remove_transition':
transition_v = self.canvas.get_view_for_core_element(result)
if transition_v:
state_m = model
state_v = self.canvas.get_view_for_model(state_m)
transition_v.remove()
self.canvas.request_update(state_v, matrix=False)
self.canvas.wait_for_update()
elif method_name == 'transition_change':
transition_m = model
transition_v = self.canvas.get_view_for_model(transition_m)
self._reconnect_transition(transition_v, transition_m, transition_m.parent)
self.canvas.wait_for_update()
# ----------------------------------
# DATA FLOW
# ----------------------------------
elif method_name == 'add_data_flow':
data_flow_models = model.data_flows
data_flow_id = result
for data_flow_m in data_flow_models:
if data_flow_m.data_flow.data_flow_id == data_flow_id:
self.add_data_flow_view_for_model(data_flow_m, model)
self.canvas.wait_for_update()
break
elif method_name == 'remove_data_flow':
data_flow_v = self.canvas.get_view_for_core_element(result)
if data_flow_v:
state_m = model
state_v = self.canvas.get_view_for_model(state_m)
self.canvas.request_update(state_v, matrix=False)
data_flow_v.remove()
self.canvas.wait_for_update()
elif method_name == 'data_flow_change':
data_flow_m = model
data_flow_v = self.canvas.get_view_for_model(data_flow_m)
self._reconnect_data_flow(data_flow_v, data_flow_m, data_flow_m.parent)
self.canvas.wait_for_update()
# ----------------------------------
# OUTCOMES
# ----------------------------------
elif method_name == 'add_outcome':
state_m = model
state_v = self.canvas.get_view_for_model(state_m)
for outcome_m in state_m.outcomes:
if outcome_m.outcome.outcome_id == result:
state_v.add_outcome(outcome_m)
self.canvas.request_update(state_v, matrix=False)
self.canvas.wait_for_update()
break
elif method_name == 'remove_outcome':
state_m = model
state_v = self.canvas.get_view_for_model(state_m)
if state_v is None:
logger.debug("no state_v found for method_name '{}'".format(method_name))
else:
outcome_v = self.canvas.get_view_for_core_element(result)
if outcome_v:
state_v.remove_outcome(outcome_v)
self.canvas.request_update(state_v, matrix=False)
self.canvas.wait_for_update()
# ----------------------------------
# DATA PORTS
# ----------------------------------
elif method_name == 'add_input_data_port':
state_m = model
state_v = self.canvas.get_view_for_model(state_m)
for input_data_port_m in state_m.input_data_ports:
if input_data_port_m.data_port.data_port_id == result:
state_v.add_input_port(input_data_port_m)
self.canvas.request_update(state_v, matrix=False)
self.canvas.wait_for_update()
break
elif method_name == 'add_output_data_port':
state_m = model
state_v = self.canvas.get_view_for_model(state_m)
for output_data_port_m in state_m.output_data_ports:
if output_data_port_m.data_port.data_port_id == result:
state_v.add_output_port(output_data_port_m)
self.canvas.request_update(state_v, matrix=False)
self.canvas.wait_for_update()
break
elif method_name == 'remove_input_data_port':
state_m = model
state_v = self.canvas.get_view_for_model(state_m)
if state_v is None:
logger.debug("no state_v found for method_name '{}'".format(method_name))
else:
input_port_v = self.canvas.get_view_for_core_element(result)
if input_port_v:
state_v.remove_input_port(input_port_v)
self.canvas.request_update(state_v, matrix=False)
self.canvas.wait_for_update()
elif method_name == 'remove_output_data_port':
state_m = model
state_v = self.canvas.get_view_for_model(state_m)
if state_v is None:
logger.debug("no state_v found for method_name '{}'".format(method_name))
else:
output_port_v = self.canvas.get_view_for_core_element(result)
if output_port_v:
state_v.remove_output_port(output_port_v)
self.canvas.request_update(state_v, matrix=False)
self.canvas.wait_for_update()
elif method_name in ['data_type', 'change_data_type']:
pass
elif method_name == 'default_value':
pass
# ----------------------------------
# SCOPED VARIABLES
# ----------------------------------
elif method_name == 'add_scoped_variable':
state_m = model
state_v = self.canvas.get_view_for_model(state_m)
for scoped_variable_m in state_m.scoped_variables:
if scoped_variable_m.scoped_variable.data_port_id == result:
state_v.add_scoped_variable(scoped_variable_m)
self.canvas.request_update(state_v, matrix=False)
self.canvas.wait_for_update()
break
elif method_name == 'remove_scoped_variable':
state_m = model
state_v = self.canvas.get_view_for_model(state_m)
if state_v is None:
logger.debug("no state_v found for method_name '{}'".format(method_name))
else:
scoped_variable_v = self.canvas.get_view_for_core_element(result)
if scoped_variable_v:
state_v.remove_scoped_variable(scoped_variable_v)
self.canvas.request_update(state_v, matrix=False)
self.canvas.wait_for_update()
# ----------------------------------
# STATE MISCELLANEOUS
# ----------------------------------
elif method_name == 'name':
# The name of a state was changed
if not isinstance(model, AbstractStateModel):
parent_model = model.parent
# The name of a port (input, output, scoped var, outcome) was changed
else:
parent_model = model
state_v = self.canvas.get_view_for_model(parent_model)
if parent_model is model:
state_v.name_view.name = arguments[1]
self.canvas.request_update(state_v.name_view, matrix=False)
else:
self.canvas.request_update(state_v, matrix=False)
self.canvas.wait_for_update()
elif method_name == 'parent':
pass
elif method_name == 'description':
pass
elif method_name == 'script_text':
pass
# TODO handle the following method calls -> for now those are explicit (in the past implicit) ignored
# TODO -> correct the complex actions which are used in some test (by test calls or by adapting the model)
elif method_name in ['input_data_ports', 'output_data_ports', 'outcomes',
'change_root_state_type', 'change_state_type',
'group_states', 'ungroup_state', 'substitute_state']:
pass
else:
known_ignore_list = ['set_input_runtime_value', 'set_use_input_runtime_value', # from library State
'set_output_runtime_value', 'set_use_output_runtime_value',
'input_data_port_runtime_values', 'use_runtime_value_input_data_ports',
'output_data_port_runtime_values', 'use_runtime_value_output_data_ports',
'semantic_data', 'add_semantic_data', 'remove_semantic_data',
'remove_income']
if method_name not in known_ignore_list:
logger.warning("Method {0} not caught in GraphicalViewer, details: {1}".format(method_name, info))
if method_name in ['add_state', 'add_transition', 'add_data_flow', 'add_outcome', 'add_input_data_port',
'add_output_data_port', 'add_scoped_variable', 'data_flow_change', 'transition_change']:
try:
self._meta_data_changed(None, model, 'append_to_last_change', True)
except Exception as e:
logger.exception('Error while trying to emit meta data signal {0} {1}'.format(e, model)) | [
"def",
"state_machine_change_after",
"(",
"self",
",",
"model",
",",
"prop_name",
",",
"info",
")",
":",
"if",
"'method_name'",
"in",
"info",
"and",
"info",
"[",
"'method_name'",
"]",
"==",
"'root_state_change'",
":",
"method_name",
",",
"model",
",",
"result",
",",
"arguments",
",",
"instance",
"=",
"self",
".",
"_extract_info_data",
"(",
"info",
"[",
"'kwargs'",
"]",
")",
"if",
"self",
".",
"model",
".",
"ongoing_complex_actions",
":",
"return",
"# The method causing the change raised an exception, thus nothing was changed",
"if",
"(",
"isinstance",
"(",
"result",
",",
"string_types",
")",
"and",
"\"CRASH\"",
"in",
"result",
")",
"or",
"isinstance",
"(",
"result",
",",
"Exception",
")",
":",
"return",
"# avoid to remove views of elements of states which parent state is destroyed recursively",
"if",
"'remove'",
"in",
"method_name",
":",
"# for remove the model is always a state and in case of remove_state it is the container_state",
"# that performs the operation therefore if is_about_to_be_destroyed_recursively is False",
"# the child state can be removed and for True ignored because its parent will create a notification",
"if",
"model",
".",
"is_about_to_be_destroyed_recursively",
":",
"return",
"# only react to the notification if the model is a model, which has to be drawn",
"# if it is a model inside a library state, this is eventually not the case",
"if",
"isinstance",
"(",
"model",
",",
"AbstractStateModel",
")",
":",
"library_root_state",
"=",
"model",
".",
"state",
".",
"get_next_upper_library_root_state",
"(",
")",
"if",
"library_root_state",
":",
"parent_library_root_state_m",
"=",
"self",
".",
"model",
".",
"get_state_model_by_path",
"(",
"library_root_state",
".",
"get_path",
"(",
")",
")",
"if",
"not",
"parent_library_root_state_m",
".",
"parent",
".",
"show_content",
"(",
")",
":",
"return",
"if",
"method_name",
"==",
"'state_execution_status'",
":",
"state_v",
"=",
"self",
".",
"canvas",
".",
"get_view_for_model",
"(",
"model",
")",
"if",
"state_v",
":",
"# Children of LibraryStates are not modeled, yet",
"self",
".",
"canvas",
".",
"request_update",
"(",
"state_v",
",",
"matrix",
"=",
"False",
")",
"elif",
"method_name",
"==",
"'add_state'",
":",
"new_state",
"=",
"arguments",
"[",
"1",
"]",
"new_state_m",
"=",
"model",
".",
"states",
"[",
"new_state",
".",
"state_id",
"]",
"self",
".",
"add_state_view_with_meta_data_for_model",
"(",
"new_state_m",
",",
"model",
")",
"if",
"not",
"self",
".",
"perform_drag_and_drop",
":",
"self",
".",
"canvas",
".",
"wait_for_update",
"(",
")",
"elif",
"method_name",
"==",
"'remove_state'",
":",
"state_v",
"=",
"self",
".",
"canvas",
".",
"get_view_for_core_element",
"(",
"result",
")",
"if",
"state_v",
":",
"parent_v",
"=",
"self",
".",
"canvas",
".",
"get_parent",
"(",
"state_v",
")",
"state_v",
".",
"remove",
"(",
")",
"if",
"parent_v",
":",
"self",
".",
"canvas",
".",
"request_update",
"(",
"parent_v",
")",
"self",
".",
"canvas",
".",
"wait_for_update",
"(",
")",
"# ----------------------------------",
"# TRANSITIONS",
"# ----------------------------------",
"elif",
"method_name",
"==",
"'add_transition'",
":",
"transitions_models",
"=",
"model",
".",
"transitions",
"transition_id",
"=",
"result",
"for",
"transition_m",
"in",
"transitions_models",
":",
"if",
"transition_m",
".",
"transition",
".",
"transition_id",
"==",
"transition_id",
":",
"self",
".",
"add_transition_view_for_model",
"(",
"transition_m",
",",
"model",
")",
"self",
".",
"canvas",
".",
"wait_for_update",
"(",
")",
"break",
"elif",
"method_name",
"==",
"'remove_transition'",
":",
"transition_v",
"=",
"self",
".",
"canvas",
".",
"get_view_for_core_element",
"(",
"result",
")",
"if",
"transition_v",
":",
"state_m",
"=",
"model",
"state_v",
"=",
"self",
".",
"canvas",
".",
"get_view_for_model",
"(",
"state_m",
")",
"transition_v",
".",
"remove",
"(",
")",
"self",
".",
"canvas",
".",
"request_update",
"(",
"state_v",
",",
"matrix",
"=",
"False",
")",
"self",
".",
"canvas",
".",
"wait_for_update",
"(",
")",
"elif",
"method_name",
"==",
"'transition_change'",
":",
"transition_m",
"=",
"model",
"transition_v",
"=",
"self",
".",
"canvas",
".",
"get_view_for_model",
"(",
"transition_m",
")",
"self",
".",
"_reconnect_transition",
"(",
"transition_v",
",",
"transition_m",
",",
"transition_m",
".",
"parent",
")",
"self",
".",
"canvas",
".",
"wait_for_update",
"(",
")",
"# ----------------------------------",
"# DATA FLOW",
"# ----------------------------------",
"elif",
"method_name",
"==",
"'add_data_flow'",
":",
"data_flow_models",
"=",
"model",
".",
"data_flows",
"data_flow_id",
"=",
"result",
"for",
"data_flow_m",
"in",
"data_flow_models",
":",
"if",
"data_flow_m",
".",
"data_flow",
".",
"data_flow_id",
"==",
"data_flow_id",
":",
"self",
".",
"add_data_flow_view_for_model",
"(",
"data_flow_m",
",",
"model",
")",
"self",
".",
"canvas",
".",
"wait_for_update",
"(",
")",
"break",
"elif",
"method_name",
"==",
"'remove_data_flow'",
":",
"data_flow_v",
"=",
"self",
".",
"canvas",
".",
"get_view_for_core_element",
"(",
"result",
")",
"if",
"data_flow_v",
":",
"state_m",
"=",
"model",
"state_v",
"=",
"self",
".",
"canvas",
".",
"get_view_for_model",
"(",
"state_m",
")",
"self",
".",
"canvas",
".",
"request_update",
"(",
"state_v",
",",
"matrix",
"=",
"False",
")",
"data_flow_v",
".",
"remove",
"(",
")",
"self",
".",
"canvas",
".",
"wait_for_update",
"(",
")",
"elif",
"method_name",
"==",
"'data_flow_change'",
":",
"data_flow_m",
"=",
"model",
"data_flow_v",
"=",
"self",
".",
"canvas",
".",
"get_view_for_model",
"(",
"data_flow_m",
")",
"self",
".",
"_reconnect_data_flow",
"(",
"data_flow_v",
",",
"data_flow_m",
",",
"data_flow_m",
".",
"parent",
")",
"self",
".",
"canvas",
".",
"wait_for_update",
"(",
")",
"# ----------------------------------",
"# OUTCOMES",
"# ----------------------------------",
"elif",
"method_name",
"==",
"'add_outcome'",
":",
"state_m",
"=",
"model",
"state_v",
"=",
"self",
".",
"canvas",
".",
"get_view_for_model",
"(",
"state_m",
")",
"for",
"outcome_m",
"in",
"state_m",
".",
"outcomes",
":",
"if",
"outcome_m",
".",
"outcome",
".",
"outcome_id",
"==",
"result",
":",
"state_v",
".",
"add_outcome",
"(",
"outcome_m",
")",
"self",
".",
"canvas",
".",
"request_update",
"(",
"state_v",
",",
"matrix",
"=",
"False",
")",
"self",
".",
"canvas",
".",
"wait_for_update",
"(",
")",
"break",
"elif",
"method_name",
"==",
"'remove_outcome'",
":",
"state_m",
"=",
"model",
"state_v",
"=",
"self",
".",
"canvas",
".",
"get_view_for_model",
"(",
"state_m",
")",
"if",
"state_v",
"is",
"None",
":",
"logger",
".",
"debug",
"(",
"\"no state_v found for method_name '{}'\"",
".",
"format",
"(",
"method_name",
")",
")",
"else",
":",
"outcome_v",
"=",
"self",
".",
"canvas",
".",
"get_view_for_core_element",
"(",
"result",
")",
"if",
"outcome_v",
":",
"state_v",
".",
"remove_outcome",
"(",
"outcome_v",
")",
"self",
".",
"canvas",
".",
"request_update",
"(",
"state_v",
",",
"matrix",
"=",
"False",
")",
"self",
".",
"canvas",
".",
"wait_for_update",
"(",
")",
"# ----------------------------------",
"# DATA PORTS",
"# ----------------------------------",
"elif",
"method_name",
"==",
"'add_input_data_port'",
":",
"state_m",
"=",
"model",
"state_v",
"=",
"self",
".",
"canvas",
".",
"get_view_for_model",
"(",
"state_m",
")",
"for",
"input_data_port_m",
"in",
"state_m",
".",
"input_data_ports",
":",
"if",
"input_data_port_m",
".",
"data_port",
".",
"data_port_id",
"==",
"result",
":",
"state_v",
".",
"add_input_port",
"(",
"input_data_port_m",
")",
"self",
".",
"canvas",
".",
"request_update",
"(",
"state_v",
",",
"matrix",
"=",
"False",
")",
"self",
".",
"canvas",
".",
"wait_for_update",
"(",
")",
"break",
"elif",
"method_name",
"==",
"'add_output_data_port'",
":",
"state_m",
"=",
"model",
"state_v",
"=",
"self",
".",
"canvas",
".",
"get_view_for_model",
"(",
"state_m",
")",
"for",
"output_data_port_m",
"in",
"state_m",
".",
"output_data_ports",
":",
"if",
"output_data_port_m",
".",
"data_port",
".",
"data_port_id",
"==",
"result",
":",
"state_v",
".",
"add_output_port",
"(",
"output_data_port_m",
")",
"self",
".",
"canvas",
".",
"request_update",
"(",
"state_v",
",",
"matrix",
"=",
"False",
")",
"self",
".",
"canvas",
".",
"wait_for_update",
"(",
")",
"break",
"elif",
"method_name",
"==",
"'remove_input_data_port'",
":",
"state_m",
"=",
"model",
"state_v",
"=",
"self",
".",
"canvas",
".",
"get_view_for_model",
"(",
"state_m",
")",
"if",
"state_v",
"is",
"None",
":",
"logger",
".",
"debug",
"(",
"\"no state_v found for method_name '{}'\"",
".",
"format",
"(",
"method_name",
")",
")",
"else",
":",
"input_port_v",
"=",
"self",
".",
"canvas",
".",
"get_view_for_core_element",
"(",
"result",
")",
"if",
"input_port_v",
":",
"state_v",
".",
"remove_input_port",
"(",
"input_port_v",
")",
"self",
".",
"canvas",
".",
"request_update",
"(",
"state_v",
",",
"matrix",
"=",
"False",
")",
"self",
".",
"canvas",
".",
"wait_for_update",
"(",
")",
"elif",
"method_name",
"==",
"'remove_output_data_port'",
":",
"state_m",
"=",
"model",
"state_v",
"=",
"self",
".",
"canvas",
".",
"get_view_for_model",
"(",
"state_m",
")",
"if",
"state_v",
"is",
"None",
":",
"logger",
".",
"debug",
"(",
"\"no state_v found for method_name '{}'\"",
".",
"format",
"(",
"method_name",
")",
")",
"else",
":",
"output_port_v",
"=",
"self",
".",
"canvas",
".",
"get_view_for_core_element",
"(",
"result",
")",
"if",
"output_port_v",
":",
"state_v",
".",
"remove_output_port",
"(",
"output_port_v",
")",
"self",
".",
"canvas",
".",
"request_update",
"(",
"state_v",
",",
"matrix",
"=",
"False",
")",
"self",
".",
"canvas",
".",
"wait_for_update",
"(",
")",
"elif",
"method_name",
"in",
"[",
"'data_type'",
",",
"'change_data_type'",
"]",
":",
"pass",
"elif",
"method_name",
"==",
"'default_value'",
":",
"pass",
"# ----------------------------------",
"# SCOPED VARIABLES",
"# ----------------------------------",
"elif",
"method_name",
"==",
"'add_scoped_variable'",
":",
"state_m",
"=",
"model",
"state_v",
"=",
"self",
".",
"canvas",
".",
"get_view_for_model",
"(",
"state_m",
")",
"for",
"scoped_variable_m",
"in",
"state_m",
".",
"scoped_variables",
":",
"if",
"scoped_variable_m",
".",
"scoped_variable",
".",
"data_port_id",
"==",
"result",
":",
"state_v",
".",
"add_scoped_variable",
"(",
"scoped_variable_m",
")",
"self",
".",
"canvas",
".",
"request_update",
"(",
"state_v",
",",
"matrix",
"=",
"False",
")",
"self",
".",
"canvas",
".",
"wait_for_update",
"(",
")",
"break",
"elif",
"method_name",
"==",
"'remove_scoped_variable'",
":",
"state_m",
"=",
"model",
"state_v",
"=",
"self",
".",
"canvas",
".",
"get_view_for_model",
"(",
"state_m",
")",
"if",
"state_v",
"is",
"None",
":",
"logger",
".",
"debug",
"(",
"\"no state_v found for method_name '{}'\"",
".",
"format",
"(",
"method_name",
")",
")",
"else",
":",
"scoped_variable_v",
"=",
"self",
".",
"canvas",
".",
"get_view_for_core_element",
"(",
"result",
")",
"if",
"scoped_variable_v",
":",
"state_v",
".",
"remove_scoped_variable",
"(",
"scoped_variable_v",
")",
"self",
".",
"canvas",
".",
"request_update",
"(",
"state_v",
",",
"matrix",
"=",
"False",
")",
"self",
".",
"canvas",
".",
"wait_for_update",
"(",
")",
"# ----------------------------------",
"# STATE MISCELLANEOUS",
"# ----------------------------------",
"elif",
"method_name",
"==",
"'name'",
":",
"# The name of a state was changed",
"if",
"not",
"isinstance",
"(",
"model",
",",
"AbstractStateModel",
")",
":",
"parent_model",
"=",
"model",
".",
"parent",
"# The name of a port (input, output, scoped var, outcome) was changed",
"else",
":",
"parent_model",
"=",
"model",
"state_v",
"=",
"self",
".",
"canvas",
".",
"get_view_for_model",
"(",
"parent_model",
")",
"if",
"parent_model",
"is",
"model",
":",
"state_v",
".",
"name_view",
".",
"name",
"=",
"arguments",
"[",
"1",
"]",
"self",
".",
"canvas",
".",
"request_update",
"(",
"state_v",
".",
"name_view",
",",
"matrix",
"=",
"False",
")",
"else",
":",
"self",
".",
"canvas",
".",
"request_update",
"(",
"state_v",
",",
"matrix",
"=",
"False",
")",
"self",
".",
"canvas",
".",
"wait_for_update",
"(",
")",
"elif",
"method_name",
"==",
"'parent'",
":",
"pass",
"elif",
"method_name",
"==",
"'description'",
":",
"pass",
"elif",
"method_name",
"==",
"'script_text'",
":",
"pass",
"# TODO handle the following method calls -> for now those are explicit (in the past implicit) ignored",
"# TODO -> correct the complex actions which are used in some test (by test calls or by adapting the model)",
"elif",
"method_name",
"in",
"[",
"'input_data_ports'",
",",
"'output_data_ports'",
",",
"'outcomes'",
",",
"'change_root_state_type'",
",",
"'change_state_type'",
",",
"'group_states'",
",",
"'ungroup_state'",
",",
"'substitute_state'",
"]",
":",
"pass",
"else",
":",
"known_ignore_list",
"=",
"[",
"'set_input_runtime_value'",
",",
"'set_use_input_runtime_value'",
",",
"# from library State",
"'set_output_runtime_value'",
",",
"'set_use_output_runtime_value'",
",",
"'input_data_port_runtime_values'",
",",
"'use_runtime_value_input_data_ports'",
",",
"'output_data_port_runtime_values'",
",",
"'use_runtime_value_output_data_ports'",
",",
"'semantic_data'",
",",
"'add_semantic_data'",
",",
"'remove_semantic_data'",
",",
"'remove_income'",
"]",
"if",
"method_name",
"not",
"in",
"known_ignore_list",
":",
"logger",
".",
"warning",
"(",
"\"Method {0} not caught in GraphicalViewer, details: {1}\"",
".",
"format",
"(",
"method_name",
",",
"info",
")",
")",
"if",
"method_name",
"in",
"[",
"'add_state'",
",",
"'add_transition'",
",",
"'add_data_flow'",
",",
"'add_outcome'",
",",
"'add_input_data_port'",
",",
"'add_output_data_port'",
",",
"'add_scoped_variable'",
",",
"'data_flow_change'",
",",
"'transition_change'",
"]",
":",
"try",
":",
"self",
".",
"_meta_data_changed",
"(",
"None",
",",
"model",
",",
"'append_to_last_change'",
",",
"True",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"exception",
"(",
"'Error while trying to emit meta data signal {0} {1}'",
".",
"format",
"(",
"e",
",",
"model",
")",
")"
] | Called on any change within th state machine
This method is called, when any state, transition, data flow, etc. within the state machine changes. This
then typically requires a redraw of the graphical editor, to display these changes immediately.
:param rafcon.gui.models.state_machine.StateMachineModel model: The state machine model
:param str prop_name: The property that was changed
:param dict info: Information about the change | [
"Called",
"on",
"any",
"change",
"within",
"th",
"state",
"machine"
] | python | train |
eyeseast/propublica-congress | congress/members.py | https://github.com/eyeseast/propublica-congress/blob/03e519341063c5703080b4723112f1831816c77e/congress/members.py#L51-L60 | def compare(self, first, second, chamber, type='votes', congress=CURRENT_CONGRESS):
"""
See how often two members voted together in a given Congress.
Takes two member IDs, a chamber and a Congress number.
"""
check_chamber(chamber)
path = "members/{first}/{type}/{second}/{congress}/{chamber}.json"
path = path.format(first=first, second=second, type=type,
congress=congress, chamber=chamber)
return self.fetch(path) | [
"def",
"compare",
"(",
"self",
",",
"first",
",",
"second",
",",
"chamber",
",",
"type",
"=",
"'votes'",
",",
"congress",
"=",
"CURRENT_CONGRESS",
")",
":",
"check_chamber",
"(",
"chamber",
")",
"path",
"=",
"\"members/{first}/{type}/{second}/{congress}/{chamber}.json\"",
"path",
"=",
"path",
".",
"format",
"(",
"first",
"=",
"first",
",",
"second",
"=",
"second",
",",
"type",
"=",
"type",
",",
"congress",
"=",
"congress",
",",
"chamber",
"=",
"chamber",
")",
"return",
"self",
".",
"fetch",
"(",
"path",
")"
] | See how often two members voted together in a given Congress.
Takes two member IDs, a chamber and a Congress number. | [
"See",
"how",
"often",
"two",
"members",
"voted",
"together",
"in",
"a",
"given",
"Congress",
".",
"Takes",
"two",
"member",
"IDs",
"a",
"chamber",
"and",
"a",
"Congress",
"number",
"."
] | python | train |
MillionIntegrals/vel | vel/rl/algo/policy_gradient/trpo.py | https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/algo/policy_gradient/trpo.py#L263-L272 | def create(max_kl, cg_iters, line_search_iters, cg_damping, entropy_coef, vf_iters, discount_factor,
gae_lambda=1.0, improvement_acceptance_ratio=0.1, max_grad_norm=0.5):
""" Vel factory function """
return TrpoPolicyGradient(
max_kl, int(cg_iters), int(line_search_iters), cg_damping, entropy_coef, vf_iters,
discount_factor=discount_factor,
gae_lambda=gae_lambda,
improvement_acceptance_ratio=improvement_acceptance_ratio,
max_grad_norm=max_grad_norm
) | [
"def",
"create",
"(",
"max_kl",
",",
"cg_iters",
",",
"line_search_iters",
",",
"cg_damping",
",",
"entropy_coef",
",",
"vf_iters",
",",
"discount_factor",
",",
"gae_lambda",
"=",
"1.0",
",",
"improvement_acceptance_ratio",
"=",
"0.1",
",",
"max_grad_norm",
"=",
"0.5",
")",
":",
"return",
"TrpoPolicyGradient",
"(",
"max_kl",
",",
"int",
"(",
"cg_iters",
")",
",",
"int",
"(",
"line_search_iters",
")",
",",
"cg_damping",
",",
"entropy_coef",
",",
"vf_iters",
",",
"discount_factor",
"=",
"discount_factor",
",",
"gae_lambda",
"=",
"gae_lambda",
",",
"improvement_acceptance_ratio",
"=",
"improvement_acceptance_ratio",
",",
"max_grad_norm",
"=",
"max_grad_norm",
")"
] | Vel factory function | [
"Vel",
"factory",
"function"
] | python | train |
dereneaton/ipyrad | ipyrad/analysis/tetrad2.py | https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/tetrad2.py#L1651-L1689 | def find_clades(trees, names):
"""
A subfunc of consensus_tree(). Traverses trees to count clade occurrences.
Names are ordered by names, else they are in the order of the first
tree.
"""
## index names from the first tree
if not names:
names = trees[0].get_leaf_names()
ndict = {j:i for i, j in enumerate(names)}
namedict = {i:j for i, j in enumerate(names)}
## store counts
clade_counts = defaultdict(int)
## count as bitarray clades in each tree
for tree in trees:
tree.unroot()
for node in tree.traverse('postorder'):
#bits = bitarray('0'*len(tree))
bits = np.zeros(len(tree), dtype=np.bool_)
for child in node.iter_leaf_names():
bits[ndict[child]] = True
## if parent is root then mirror flip one child (where bit[0]=0)
# if not node.is_root():
# if node.up.is_root():
# if bits[0]:
# bits.invert()
bitstring = "".join([np.binary_repr(i) for i in bits])
clade_counts[bitstring] += 1
## convert to freq
for key, val in clade_counts.items():
clade_counts[key] = val / float(len(trees))
## return in sorted order
clade_counts = sorted(clade_counts.items(),
key=lambda x: x[1],
reverse=True)
return namedict, clade_counts | [
"def",
"find_clades",
"(",
"trees",
",",
"names",
")",
":",
"## index names from the first tree",
"if",
"not",
"names",
":",
"names",
"=",
"trees",
"[",
"0",
"]",
".",
"get_leaf_names",
"(",
")",
"ndict",
"=",
"{",
"j",
":",
"i",
"for",
"i",
",",
"j",
"in",
"enumerate",
"(",
"names",
")",
"}",
"namedict",
"=",
"{",
"i",
":",
"j",
"for",
"i",
",",
"j",
"in",
"enumerate",
"(",
"names",
")",
"}",
"## store counts",
"clade_counts",
"=",
"defaultdict",
"(",
"int",
")",
"## count as bitarray clades in each tree",
"for",
"tree",
"in",
"trees",
":",
"tree",
".",
"unroot",
"(",
")",
"for",
"node",
"in",
"tree",
".",
"traverse",
"(",
"'postorder'",
")",
":",
"#bits = bitarray('0'*len(tree))",
"bits",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"tree",
")",
",",
"dtype",
"=",
"np",
".",
"bool_",
")",
"for",
"child",
"in",
"node",
".",
"iter_leaf_names",
"(",
")",
":",
"bits",
"[",
"ndict",
"[",
"child",
"]",
"]",
"=",
"True",
"## if parent is root then mirror flip one child (where bit[0]=0)",
"# if not node.is_root():",
"# if node.up.is_root():",
"# if bits[0]:",
"# bits.invert()",
"bitstring",
"=",
"\"\"",
".",
"join",
"(",
"[",
"np",
".",
"binary_repr",
"(",
"i",
")",
"for",
"i",
"in",
"bits",
"]",
")",
"clade_counts",
"[",
"bitstring",
"]",
"+=",
"1",
"## convert to freq",
"for",
"key",
",",
"val",
"in",
"clade_counts",
".",
"items",
"(",
")",
":",
"clade_counts",
"[",
"key",
"]",
"=",
"val",
"/",
"float",
"(",
"len",
"(",
"trees",
")",
")",
"## return in sorted order",
"clade_counts",
"=",
"sorted",
"(",
"clade_counts",
".",
"items",
"(",
")",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"1",
"]",
",",
"reverse",
"=",
"True",
")",
"return",
"namedict",
",",
"clade_counts"
] | A subfunc of consensus_tree(). Traverses trees to count clade occurrences.
Names are ordered by names, else they are in the order of the first
tree. | [
"A",
"subfunc",
"of",
"consensus_tree",
"()",
".",
"Traverses",
"trees",
"to",
"count",
"clade",
"occurrences",
".",
"Names",
"are",
"ordered",
"by",
"names",
"else",
"they",
"are",
"in",
"the",
"order",
"of",
"the",
"first",
"tree",
"."
] | python | valid |
josiah-wolf-oberholtzer/uqbar | uqbar/sphinx/api.py | https://github.com/josiah-wolf-oberholtzer/uqbar/blob/eca7fefebbbee1e2ae13bf5d6baa838be66b1db6/uqbar/sphinx/api.py#L116-L134 | def setup(app) -> Dict[str, Any]:
"""
Sets up Sphinx extension.
"""
app.add_config_value("uqbar_api_directory_name", "api", "env")
app.add_config_value("uqbar_api_document_empty_modules", False, "env")
app.add_config_value("uqbar_api_document_private_members", False, "env")
app.add_config_value("uqbar_api_document_private_modules", False, "env")
app.add_config_value("uqbar_api_member_documenter_classes", None, "env")
app.add_config_value("uqbar_api_module_documenter_class", None, "env")
app.add_config_value("uqbar_api_root_documenter_class", None, "env")
app.add_config_value("uqbar_api_source_paths", None, "env")
app.add_config_value("uqbar_api_title", "API", "html")
app.connect("builder-inited", on_builder_inited)
return {
"version": uqbar.__version__,
"parallel_read_safe": True,
"parallel_write_safe": True,
} | [
"def",
"setup",
"(",
"app",
")",
"->",
"Dict",
"[",
"str",
",",
"Any",
"]",
":",
"app",
".",
"add_config_value",
"(",
"\"uqbar_api_directory_name\"",
",",
"\"api\"",
",",
"\"env\"",
")",
"app",
".",
"add_config_value",
"(",
"\"uqbar_api_document_empty_modules\"",
",",
"False",
",",
"\"env\"",
")",
"app",
".",
"add_config_value",
"(",
"\"uqbar_api_document_private_members\"",
",",
"False",
",",
"\"env\"",
")",
"app",
".",
"add_config_value",
"(",
"\"uqbar_api_document_private_modules\"",
",",
"False",
",",
"\"env\"",
")",
"app",
".",
"add_config_value",
"(",
"\"uqbar_api_member_documenter_classes\"",
",",
"None",
",",
"\"env\"",
")",
"app",
".",
"add_config_value",
"(",
"\"uqbar_api_module_documenter_class\"",
",",
"None",
",",
"\"env\"",
")",
"app",
".",
"add_config_value",
"(",
"\"uqbar_api_root_documenter_class\"",
",",
"None",
",",
"\"env\"",
")",
"app",
".",
"add_config_value",
"(",
"\"uqbar_api_source_paths\"",
",",
"None",
",",
"\"env\"",
")",
"app",
".",
"add_config_value",
"(",
"\"uqbar_api_title\"",
",",
"\"API\"",
",",
"\"html\"",
")",
"app",
".",
"connect",
"(",
"\"builder-inited\"",
",",
"on_builder_inited",
")",
"return",
"{",
"\"version\"",
":",
"uqbar",
".",
"__version__",
",",
"\"parallel_read_safe\"",
":",
"True",
",",
"\"parallel_write_safe\"",
":",
"True",
",",
"}"
] | Sets up Sphinx extension. | [
"Sets",
"up",
"Sphinx",
"extension",
"."
] | python | train |
SKA-ScienceDataProcessor/integration-prototype | sip/tango_control/tango_subarray/app/subarray_device.py | https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/tango_control/tango_subarray/app/subarray_device.py#L16-L20 | def init_device(self):
"""Initialise the device."""
Device.init_device(self)
time.sleep(0.1)
self.set_state(DevState.STANDBY) | [
"def",
"init_device",
"(",
"self",
")",
":",
"Device",
".",
"init_device",
"(",
"self",
")",
"time",
".",
"sleep",
"(",
"0.1",
")",
"self",
".",
"set_state",
"(",
"DevState",
".",
"STANDBY",
")"
] | Initialise the device. | [
"Initialise",
"the",
"device",
"."
] | python | train |
zetaops/zengine | zengine/messaging/views.py | https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/messaging/views.py#L794-L822 | def edit_message(current):
"""
Edit a message a user own.
.. code-block:: python
# request:
{
'view':'_zops_edit_message',
'message': {
'body': string, # message text
'key': key
}
}
# response:
{
'status': string, # 'OK' for success
'code': int, # 200 for success
}
"""
current.output = {'status': 'OK', 'code': 200}
in_msg = current.input['message']
try:
msg = Message(current).objects.get(sender_id=current.user_id, key=in_msg['key'])
msg.body = in_msg['body']
msg.save()
except ObjectDoesNotExist:
raise HTTPError(404, "") | [
"def",
"edit_message",
"(",
"current",
")",
":",
"current",
".",
"output",
"=",
"{",
"'status'",
":",
"'OK'",
",",
"'code'",
":",
"200",
"}",
"in_msg",
"=",
"current",
".",
"input",
"[",
"'message'",
"]",
"try",
":",
"msg",
"=",
"Message",
"(",
"current",
")",
".",
"objects",
".",
"get",
"(",
"sender_id",
"=",
"current",
".",
"user_id",
",",
"key",
"=",
"in_msg",
"[",
"'key'",
"]",
")",
"msg",
".",
"body",
"=",
"in_msg",
"[",
"'body'",
"]",
"msg",
".",
"save",
"(",
")",
"except",
"ObjectDoesNotExist",
":",
"raise",
"HTTPError",
"(",
"404",
",",
"\"\"",
")"
] | Edit a message a user own.
.. code-block:: python
# request:
{
'view':'_zops_edit_message',
'message': {
'body': string, # message text
'key': key
}
}
# response:
{
'status': string, # 'OK' for success
'code': int, # 200 for success
} | [
"Edit",
"a",
"message",
"a",
"user",
"own",
"."
] | python | train |
Atomistica/atomistica | src/python/atomistica/mdcore_io.py | https://github.com/Atomistica/atomistica/blob/5ed79d776c92b91a566be22615bfb304ecc75db7/src/python/atomistica/mdcore_io.py#L164-L190 | def read_cyc(this, fn, conv=1.0):
""" Read the lattice information from a cyc.dat file (i.e., tblmd input file)
"""
f = paropen(fn, "r")
f.readline()
f.readline()
f.readline()
f.readline()
cell = np.array( [ [ 0.0, 0.0, 0.0 ], [ 0.0, 0.0, 0.0 ], [ 0.0, 0.0, 0.0 ] ] )
l = f.readline()
s = map(float, l.split())
cell[0, 0] = s[0]*conv
cell[1, 0] = s[1]*conv
cell[2, 0] = s[2]*conv
l = f.readline()
s = map(float, l.split())
cell[0, 1] = s[0]*conv
cell[1, 1] = s[1]*conv
cell[2, 1] = s[2]*conv
l = f.readline()
s = map(float, l.split())
cell[0, 2] = s[0]*conv
cell[1, 2] = s[1]*conv
cell[2, 2] = s[2]*conv
this.set_cell(cell)
this.set_pbc(True)
f.close() | [
"def",
"read_cyc",
"(",
"this",
",",
"fn",
",",
"conv",
"=",
"1.0",
")",
":",
"f",
"=",
"paropen",
"(",
"fn",
",",
"\"r\"",
")",
"f",
".",
"readline",
"(",
")",
"f",
".",
"readline",
"(",
")",
"f",
".",
"readline",
"(",
")",
"f",
".",
"readline",
"(",
")",
"cell",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"0.0",
",",
"0.0",
",",
"0.0",
"]",
",",
"[",
"0.0",
",",
"0.0",
",",
"0.0",
"]",
",",
"[",
"0.0",
",",
"0.0",
",",
"0.0",
"]",
"]",
")",
"l",
"=",
"f",
".",
"readline",
"(",
")",
"s",
"=",
"map",
"(",
"float",
",",
"l",
".",
"split",
"(",
")",
")",
"cell",
"[",
"0",
",",
"0",
"]",
"=",
"s",
"[",
"0",
"]",
"*",
"conv",
"cell",
"[",
"1",
",",
"0",
"]",
"=",
"s",
"[",
"1",
"]",
"*",
"conv",
"cell",
"[",
"2",
",",
"0",
"]",
"=",
"s",
"[",
"2",
"]",
"*",
"conv",
"l",
"=",
"f",
".",
"readline",
"(",
")",
"s",
"=",
"map",
"(",
"float",
",",
"l",
".",
"split",
"(",
")",
")",
"cell",
"[",
"0",
",",
"1",
"]",
"=",
"s",
"[",
"0",
"]",
"*",
"conv",
"cell",
"[",
"1",
",",
"1",
"]",
"=",
"s",
"[",
"1",
"]",
"*",
"conv",
"cell",
"[",
"2",
",",
"1",
"]",
"=",
"s",
"[",
"2",
"]",
"*",
"conv",
"l",
"=",
"f",
".",
"readline",
"(",
")",
"s",
"=",
"map",
"(",
"float",
",",
"l",
".",
"split",
"(",
")",
")",
"cell",
"[",
"0",
",",
"2",
"]",
"=",
"s",
"[",
"0",
"]",
"*",
"conv",
"cell",
"[",
"1",
",",
"2",
"]",
"=",
"s",
"[",
"1",
"]",
"*",
"conv",
"cell",
"[",
"2",
",",
"2",
"]",
"=",
"s",
"[",
"2",
"]",
"*",
"conv",
"this",
".",
"set_cell",
"(",
"cell",
")",
"this",
".",
"set_pbc",
"(",
"True",
")",
"f",
".",
"close",
"(",
")"
] | Read the lattice information from a cyc.dat file (i.e., tblmd input file) | [
"Read",
"the",
"lattice",
"information",
"from",
"a",
"cyc",
".",
"dat",
"file",
"(",
"i",
".",
"e",
".",
"tblmd",
"input",
"file",
")"
] | python | train |
aouyar/PyMunin | pymunin/plugins/memcachedstats.py | https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/memcachedstats.py#L305-L440 | def retrieveVals(self):
"""Retrieve values for graphs."""
if self._stats is None:
serverInfo = MemcachedInfo(self._host, self._port, self._socket_file)
stats = serverInfo.getStats()
else:
stats = self._stats
if stats is None:
raise Exception("Undetermined error accesing stats.")
stats['set_hits'] = stats.get('total_items')
if stats.has_key('cmd_set') and stats.has_key('total_items'):
stats['set_misses'] = stats['cmd_set'] - stats['total_items']
self.saveState(stats)
if self.hasGraph('memcached_connections'):
self.setGraphVal('memcached_connections', 'conn',
stats.get('curr_connections'))
if self.hasGraph('memcached_items'):
self.setGraphVal('memcached_items', 'items',
stats.get('curr_items'))
if self.hasGraph('memcached_memory'):
self.setGraphVal('memcached_memory', 'bytes',
stats.get('bytes'))
if self.hasGraph('memcached_connrate'):
self.setGraphVal('memcached_connrate', 'conn',
stats.get('total_connections'))
if self.hasGraph('memcached_traffic'):
self.setGraphVal('memcached_traffic', 'rxbytes',
stats.get('bytes_read'))
self.setGraphVal('memcached_traffic', 'txbytes',
stats.get('bytes_written'))
if self.hasGraph('memcached_reqrate'):
self.setGraphVal('memcached_reqrate', 'set',
stats.get('cmd_set'))
self.setGraphVal('memcached_reqrate', 'get',
stats.get('cmd_get'))
if self.graphHasField('memcached_reqrate', 'del'):
self.setGraphVal('memcached_reqrate', 'del',
safe_sum([stats.get('delete_hits'),
stats.get('delete_misses')]))
if self.graphHasField('memcached_reqrate', 'cas'):
self.setGraphVal('memcached_reqrate', 'cas',
safe_sum([stats.get('cas_hits'),
stats.get('cas_misses'),
stats.get('cas_badval')]))
if self.graphHasField('memcached_reqrate', 'incr'):
self.setGraphVal('memcached_reqrate', 'incr',
safe_sum([stats.get('incr_hits'),
stats.get('incr_misses')]))
if self.graphHasField('memcached_reqrate', 'decr'):
self.setGraphVal('memcached_reqrate', 'decr',
safe_sum([stats.get('decr_hits'),
stats.get('decr_misses')]))
if self.hasGraph('memcached_statget'):
self.setGraphVal('memcached_statget', 'hit',
stats.get('get_hits'))
self.setGraphVal('memcached_statget', 'miss',
stats.get('get_misses'))
self.setGraphVal('memcached_statget', 'total',
safe_sum([stats.get('get_hits'),
stats.get('get_misses')]))
if self.hasGraph('memcached_statset'):
self.setGraphVal('memcached_statset', 'hit',
stats.get('set_hits'))
self.setGraphVal('memcached_statset', 'miss',
stats.get('set_misses'))
self.setGraphVal('memcached_statset', 'total',
safe_sum([stats.get('set_hits'),
stats.get('set_misses')]))
if self.hasGraph('memcached_statdel'):
self.setGraphVal('memcached_statdel', 'hit',
stats.get('delete_hits'))
self.setGraphVal('memcached_statdel', 'miss',
stats.get('delete_misses'))
self.setGraphVal('memcached_statdel', 'total',
safe_sum([stats.get('delete_hits'),
stats.get('delete_misses')]))
if self.hasGraph('memcached_statcas'):
self.setGraphVal('memcached_statcas', 'hit',
stats.get('cas_hits'))
self.setGraphVal('memcached_statcas', 'miss',
stats.get('cas_misses'))
self.setGraphVal('memcached_statcas', 'badval',
stats.get('cas_badval'))
self.setGraphVal('memcached_statcas', 'total',
safe_sum([stats.get('cas_hits'),
stats.get('cas_misses'),
stats.get('cas_badval')]))
if self.hasGraph('memcached_statincrdecr'):
self.setGraphVal('memcached_statincrdecr', 'incr_hit',
stats.get('incr_hits'))
self.setGraphVal('memcached_statincrdecr', 'decr_hit',
stats.get('decr_hits'))
self.setGraphVal('memcached_statincrdecr', 'incr_miss',
stats.get('incr_misses'))
self.setGraphVal('memcached_statincrdecr', 'decr_miss',
stats.get('decr_misses'))
self.setGraphVal('memcached_statincrdecr', 'total',
safe_sum([stats.get('incr_hits'),
stats.get('decr_hits'),
stats.get('incr_misses'),
stats.get('decr_misses')]))
if self.hasGraph('memcached_statevict'):
self.setGraphVal('memcached_statevict', 'evict',
stats.get('evictions'))
if self.graphHasField('memcached_statevict', 'reclaim'):
self.setGraphVal('memcached_statevict', 'reclaim',
stats.get('reclaimed'))
if self.hasGraph('memcached_statauth'):
self.setGraphVal('memcached_statauth', 'reqs',
stats.get('auth_cmds'))
self.setGraphVal('memcached_statauth', 'errors',
stats.get('auth_errors'))
if self.hasGraph('memcached_hitpct'):
prev_stats = self._prev_stats
for (field_name, field_hits, field_misses) in (
('set', 'set_hits', 'set_misses'),
('get', 'get_hits', 'get_misses'),
('del', 'delete_hits', 'delete_misses'),
('cas', 'cas_hits', 'cas_misses'),
('incr', 'incr_hits', 'incr_misses'),
('decr', 'decr_hits', 'decr_misses')
):
if prev_stats:
if (stats.has_key(field_hits)
and prev_stats.has_key(field_hits)
and stats.has_key(field_misses)
and prev_stats.has_key(field_misses)):
hits = stats[field_hits] - prev_stats[field_hits]
misses = stats[field_misses] - prev_stats[field_misses]
total = hits + misses
if total > 0:
val = 100.0 * hits / total
else:
val = 0
self.setGraphVal('memcached_hitpct', field_name,
round(val, 2)) | [
"def",
"retrieveVals",
"(",
"self",
")",
":",
"if",
"self",
".",
"_stats",
"is",
"None",
":",
"serverInfo",
"=",
"MemcachedInfo",
"(",
"self",
".",
"_host",
",",
"self",
".",
"_port",
",",
"self",
".",
"_socket_file",
")",
"stats",
"=",
"serverInfo",
".",
"getStats",
"(",
")",
"else",
":",
"stats",
"=",
"self",
".",
"_stats",
"if",
"stats",
"is",
"None",
":",
"raise",
"Exception",
"(",
"\"Undetermined error accesing stats.\"",
")",
"stats",
"[",
"'set_hits'",
"]",
"=",
"stats",
".",
"get",
"(",
"'total_items'",
")",
"if",
"stats",
".",
"has_key",
"(",
"'cmd_set'",
")",
"and",
"stats",
".",
"has_key",
"(",
"'total_items'",
")",
":",
"stats",
"[",
"'set_misses'",
"]",
"=",
"stats",
"[",
"'cmd_set'",
"]",
"-",
"stats",
"[",
"'total_items'",
"]",
"self",
".",
"saveState",
"(",
"stats",
")",
"if",
"self",
".",
"hasGraph",
"(",
"'memcached_connections'",
")",
":",
"self",
".",
"setGraphVal",
"(",
"'memcached_connections'",
",",
"'conn'",
",",
"stats",
".",
"get",
"(",
"'curr_connections'",
")",
")",
"if",
"self",
".",
"hasGraph",
"(",
"'memcached_items'",
")",
":",
"self",
".",
"setGraphVal",
"(",
"'memcached_items'",
",",
"'items'",
",",
"stats",
".",
"get",
"(",
"'curr_items'",
")",
")",
"if",
"self",
".",
"hasGraph",
"(",
"'memcached_memory'",
")",
":",
"self",
".",
"setGraphVal",
"(",
"'memcached_memory'",
",",
"'bytes'",
",",
"stats",
".",
"get",
"(",
"'bytes'",
")",
")",
"if",
"self",
".",
"hasGraph",
"(",
"'memcached_connrate'",
")",
":",
"self",
".",
"setGraphVal",
"(",
"'memcached_connrate'",
",",
"'conn'",
",",
"stats",
".",
"get",
"(",
"'total_connections'",
")",
")",
"if",
"self",
".",
"hasGraph",
"(",
"'memcached_traffic'",
")",
":",
"self",
".",
"setGraphVal",
"(",
"'memcached_traffic'",
",",
"'rxbytes'",
",",
"stats",
".",
"get",
"(",
"'bytes_read'",
")",
")",
"self",
".",
"setGraphVal",
"(",
"'memcached_traffic'",
",",
"'txbytes'",
",",
"stats",
".",
"get",
"(",
"'bytes_written'",
")",
")",
"if",
"self",
".",
"hasGraph",
"(",
"'memcached_reqrate'",
")",
":",
"self",
".",
"setGraphVal",
"(",
"'memcached_reqrate'",
",",
"'set'",
",",
"stats",
".",
"get",
"(",
"'cmd_set'",
")",
")",
"self",
".",
"setGraphVal",
"(",
"'memcached_reqrate'",
",",
"'get'",
",",
"stats",
".",
"get",
"(",
"'cmd_get'",
")",
")",
"if",
"self",
".",
"graphHasField",
"(",
"'memcached_reqrate'",
",",
"'del'",
")",
":",
"self",
".",
"setGraphVal",
"(",
"'memcached_reqrate'",
",",
"'del'",
",",
"safe_sum",
"(",
"[",
"stats",
".",
"get",
"(",
"'delete_hits'",
")",
",",
"stats",
".",
"get",
"(",
"'delete_misses'",
")",
"]",
")",
")",
"if",
"self",
".",
"graphHasField",
"(",
"'memcached_reqrate'",
",",
"'cas'",
")",
":",
"self",
".",
"setGraphVal",
"(",
"'memcached_reqrate'",
",",
"'cas'",
",",
"safe_sum",
"(",
"[",
"stats",
".",
"get",
"(",
"'cas_hits'",
")",
",",
"stats",
".",
"get",
"(",
"'cas_misses'",
")",
",",
"stats",
".",
"get",
"(",
"'cas_badval'",
")",
"]",
")",
")",
"if",
"self",
".",
"graphHasField",
"(",
"'memcached_reqrate'",
",",
"'incr'",
")",
":",
"self",
".",
"setGraphVal",
"(",
"'memcached_reqrate'",
",",
"'incr'",
",",
"safe_sum",
"(",
"[",
"stats",
".",
"get",
"(",
"'incr_hits'",
")",
",",
"stats",
".",
"get",
"(",
"'incr_misses'",
")",
"]",
")",
")",
"if",
"self",
".",
"graphHasField",
"(",
"'memcached_reqrate'",
",",
"'decr'",
")",
":",
"self",
".",
"setGraphVal",
"(",
"'memcached_reqrate'",
",",
"'decr'",
",",
"safe_sum",
"(",
"[",
"stats",
".",
"get",
"(",
"'decr_hits'",
")",
",",
"stats",
".",
"get",
"(",
"'decr_misses'",
")",
"]",
")",
")",
"if",
"self",
".",
"hasGraph",
"(",
"'memcached_statget'",
")",
":",
"self",
".",
"setGraphVal",
"(",
"'memcached_statget'",
",",
"'hit'",
",",
"stats",
".",
"get",
"(",
"'get_hits'",
")",
")",
"self",
".",
"setGraphVal",
"(",
"'memcached_statget'",
",",
"'miss'",
",",
"stats",
".",
"get",
"(",
"'get_misses'",
")",
")",
"self",
".",
"setGraphVal",
"(",
"'memcached_statget'",
",",
"'total'",
",",
"safe_sum",
"(",
"[",
"stats",
".",
"get",
"(",
"'get_hits'",
")",
",",
"stats",
".",
"get",
"(",
"'get_misses'",
")",
"]",
")",
")",
"if",
"self",
".",
"hasGraph",
"(",
"'memcached_statset'",
")",
":",
"self",
".",
"setGraphVal",
"(",
"'memcached_statset'",
",",
"'hit'",
",",
"stats",
".",
"get",
"(",
"'set_hits'",
")",
")",
"self",
".",
"setGraphVal",
"(",
"'memcached_statset'",
",",
"'miss'",
",",
"stats",
".",
"get",
"(",
"'set_misses'",
")",
")",
"self",
".",
"setGraphVal",
"(",
"'memcached_statset'",
",",
"'total'",
",",
"safe_sum",
"(",
"[",
"stats",
".",
"get",
"(",
"'set_hits'",
")",
",",
"stats",
".",
"get",
"(",
"'set_misses'",
")",
"]",
")",
")",
"if",
"self",
".",
"hasGraph",
"(",
"'memcached_statdel'",
")",
":",
"self",
".",
"setGraphVal",
"(",
"'memcached_statdel'",
",",
"'hit'",
",",
"stats",
".",
"get",
"(",
"'delete_hits'",
")",
")",
"self",
".",
"setGraphVal",
"(",
"'memcached_statdel'",
",",
"'miss'",
",",
"stats",
".",
"get",
"(",
"'delete_misses'",
")",
")",
"self",
".",
"setGraphVal",
"(",
"'memcached_statdel'",
",",
"'total'",
",",
"safe_sum",
"(",
"[",
"stats",
".",
"get",
"(",
"'delete_hits'",
")",
",",
"stats",
".",
"get",
"(",
"'delete_misses'",
")",
"]",
")",
")",
"if",
"self",
".",
"hasGraph",
"(",
"'memcached_statcas'",
")",
":",
"self",
".",
"setGraphVal",
"(",
"'memcached_statcas'",
",",
"'hit'",
",",
"stats",
".",
"get",
"(",
"'cas_hits'",
")",
")",
"self",
".",
"setGraphVal",
"(",
"'memcached_statcas'",
",",
"'miss'",
",",
"stats",
".",
"get",
"(",
"'cas_misses'",
")",
")",
"self",
".",
"setGraphVal",
"(",
"'memcached_statcas'",
",",
"'badval'",
",",
"stats",
".",
"get",
"(",
"'cas_badval'",
")",
")",
"self",
".",
"setGraphVal",
"(",
"'memcached_statcas'",
",",
"'total'",
",",
"safe_sum",
"(",
"[",
"stats",
".",
"get",
"(",
"'cas_hits'",
")",
",",
"stats",
".",
"get",
"(",
"'cas_misses'",
")",
",",
"stats",
".",
"get",
"(",
"'cas_badval'",
")",
"]",
")",
")",
"if",
"self",
".",
"hasGraph",
"(",
"'memcached_statincrdecr'",
")",
":",
"self",
".",
"setGraphVal",
"(",
"'memcached_statincrdecr'",
",",
"'incr_hit'",
",",
"stats",
".",
"get",
"(",
"'incr_hits'",
")",
")",
"self",
".",
"setGraphVal",
"(",
"'memcached_statincrdecr'",
",",
"'decr_hit'",
",",
"stats",
".",
"get",
"(",
"'decr_hits'",
")",
")",
"self",
".",
"setGraphVal",
"(",
"'memcached_statincrdecr'",
",",
"'incr_miss'",
",",
"stats",
".",
"get",
"(",
"'incr_misses'",
")",
")",
"self",
".",
"setGraphVal",
"(",
"'memcached_statincrdecr'",
",",
"'decr_miss'",
",",
"stats",
".",
"get",
"(",
"'decr_misses'",
")",
")",
"self",
".",
"setGraphVal",
"(",
"'memcached_statincrdecr'",
",",
"'total'",
",",
"safe_sum",
"(",
"[",
"stats",
".",
"get",
"(",
"'incr_hits'",
")",
",",
"stats",
".",
"get",
"(",
"'decr_hits'",
")",
",",
"stats",
".",
"get",
"(",
"'incr_misses'",
")",
",",
"stats",
".",
"get",
"(",
"'decr_misses'",
")",
"]",
")",
")",
"if",
"self",
".",
"hasGraph",
"(",
"'memcached_statevict'",
")",
":",
"self",
".",
"setGraphVal",
"(",
"'memcached_statevict'",
",",
"'evict'",
",",
"stats",
".",
"get",
"(",
"'evictions'",
")",
")",
"if",
"self",
".",
"graphHasField",
"(",
"'memcached_statevict'",
",",
"'reclaim'",
")",
":",
"self",
".",
"setGraphVal",
"(",
"'memcached_statevict'",
",",
"'reclaim'",
",",
"stats",
".",
"get",
"(",
"'reclaimed'",
")",
")",
"if",
"self",
".",
"hasGraph",
"(",
"'memcached_statauth'",
")",
":",
"self",
".",
"setGraphVal",
"(",
"'memcached_statauth'",
",",
"'reqs'",
",",
"stats",
".",
"get",
"(",
"'auth_cmds'",
")",
")",
"self",
".",
"setGraphVal",
"(",
"'memcached_statauth'",
",",
"'errors'",
",",
"stats",
".",
"get",
"(",
"'auth_errors'",
")",
")",
"if",
"self",
".",
"hasGraph",
"(",
"'memcached_hitpct'",
")",
":",
"prev_stats",
"=",
"self",
".",
"_prev_stats",
"for",
"(",
"field_name",
",",
"field_hits",
",",
"field_misses",
")",
"in",
"(",
"(",
"'set'",
",",
"'set_hits'",
",",
"'set_misses'",
")",
",",
"(",
"'get'",
",",
"'get_hits'",
",",
"'get_misses'",
")",
",",
"(",
"'del'",
",",
"'delete_hits'",
",",
"'delete_misses'",
")",
",",
"(",
"'cas'",
",",
"'cas_hits'",
",",
"'cas_misses'",
")",
",",
"(",
"'incr'",
",",
"'incr_hits'",
",",
"'incr_misses'",
")",
",",
"(",
"'decr'",
",",
"'decr_hits'",
",",
"'decr_misses'",
")",
")",
":",
"if",
"prev_stats",
":",
"if",
"(",
"stats",
".",
"has_key",
"(",
"field_hits",
")",
"and",
"prev_stats",
".",
"has_key",
"(",
"field_hits",
")",
"and",
"stats",
".",
"has_key",
"(",
"field_misses",
")",
"and",
"prev_stats",
".",
"has_key",
"(",
"field_misses",
")",
")",
":",
"hits",
"=",
"stats",
"[",
"field_hits",
"]",
"-",
"prev_stats",
"[",
"field_hits",
"]",
"misses",
"=",
"stats",
"[",
"field_misses",
"]",
"-",
"prev_stats",
"[",
"field_misses",
"]",
"total",
"=",
"hits",
"+",
"misses",
"if",
"total",
">",
"0",
":",
"val",
"=",
"100.0",
"*",
"hits",
"/",
"total",
"else",
":",
"val",
"=",
"0",
"self",
".",
"setGraphVal",
"(",
"'memcached_hitpct'",
",",
"field_name",
",",
"round",
"(",
"val",
",",
"2",
")",
")"
] | Retrieve values for graphs. | [
"Retrieve",
"values",
"for",
"graphs",
"."
] | python | train |
watson-developer-cloud/python-sdk | ibm_watson/assistant_v1.py | https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/assistant_v1.py#L5485-L5492 | def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'intents') and self.intents is not None:
_dict['intents'] = [x._to_dict() for x in self.intents]
if hasattr(self, 'pagination') and self.pagination is not None:
_dict['pagination'] = self.pagination._to_dict()
return _dict | [
"def",
"_to_dict",
"(",
"self",
")",
":",
"_dict",
"=",
"{",
"}",
"if",
"hasattr",
"(",
"self",
",",
"'intents'",
")",
"and",
"self",
".",
"intents",
"is",
"not",
"None",
":",
"_dict",
"[",
"'intents'",
"]",
"=",
"[",
"x",
".",
"_to_dict",
"(",
")",
"for",
"x",
"in",
"self",
".",
"intents",
"]",
"if",
"hasattr",
"(",
"self",
",",
"'pagination'",
")",
"and",
"self",
".",
"pagination",
"is",
"not",
"None",
":",
"_dict",
"[",
"'pagination'",
"]",
"=",
"self",
".",
"pagination",
".",
"_to_dict",
"(",
")",
"return",
"_dict"
] | Return a json dictionary representing this model. | [
"Return",
"a",
"json",
"dictionary",
"representing",
"this",
"model",
"."
] | python | train |
michael-lazar/rtv | rtv/packages/praw/__init__.py | https://github.com/michael-lazar/rtv/blob/ccef2af042566ad384977028cf0bde01bc524dda/rtv/packages/praw/__init__.py#L491-L500 | def evict(self, urls):
"""Evict url(s) from the cache.
:param urls: An iterable containing normalized urls.
:returns: The number of items removed from the cache.
"""
if isinstance(urls, six.string_types):
urls = (urls,)
return self.handler.evict(urls) | [
"def",
"evict",
"(",
"self",
",",
"urls",
")",
":",
"if",
"isinstance",
"(",
"urls",
",",
"six",
".",
"string_types",
")",
":",
"urls",
"=",
"(",
"urls",
",",
")",
"return",
"self",
".",
"handler",
".",
"evict",
"(",
"urls",
")"
] | Evict url(s) from the cache.
:param urls: An iterable containing normalized urls.
:returns: The number of items removed from the cache. | [
"Evict",
"url",
"(",
"s",
")",
"from",
"the",
"cache",
"."
] | python | train |
evonove/django-stored-messages | stored_messages/api.py | https://github.com/evonove/django-stored-messages/blob/23b71f952d5d3fd03285f5e700879d05796ef7ba/stored_messages/api.py#L12-L28 | def add_message_for(users, level, message_text, extra_tags='', date=None, url=None, fail_silently=False):
"""
Send a message to a list of users without passing through `django.contrib.messages`
:param users: an iterable containing the recipients of the messages
:param level: message level
:param message_text: the string containing the message
:param extra_tags: like the Django api, a string containing extra tags for the message
:param date: a date, different than the default timezone.now
:param url: an optional url
:param fail_silently: not used at the moment
"""
BackendClass = stored_messages_settings.STORAGE_BACKEND
backend = BackendClass()
m = backend.create_message(level, message_text, extra_tags, date, url)
backend.archive_store(users, m)
backend.inbox_store(users, m) | [
"def",
"add_message_for",
"(",
"users",
",",
"level",
",",
"message_text",
",",
"extra_tags",
"=",
"''",
",",
"date",
"=",
"None",
",",
"url",
"=",
"None",
",",
"fail_silently",
"=",
"False",
")",
":",
"BackendClass",
"=",
"stored_messages_settings",
".",
"STORAGE_BACKEND",
"backend",
"=",
"BackendClass",
"(",
")",
"m",
"=",
"backend",
".",
"create_message",
"(",
"level",
",",
"message_text",
",",
"extra_tags",
",",
"date",
",",
"url",
")",
"backend",
".",
"archive_store",
"(",
"users",
",",
"m",
")",
"backend",
".",
"inbox_store",
"(",
"users",
",",
"m",
")"
] | Send a message to a list of users without passing through `django.contrib.messages`
:param users: an iterable containing the recipients of the messages
:param level: message level
:param message_text: the string containing the message
:param extra_tags: like the Django api, a string containing extra tags for the message
:param date: a date, different than the default timezone.now
:param url: an optional url
:param fail_silently: not used at the moment | [
"Send",
"a",
"message",
"to",
"a",
"list",
"of",
"users",
"without",
"passing",
"through",
"django",
".",
"contrib",
".",
"messages"
] | python | valid |
etcher-be/emiz | emiz/avwx/__init__.py | https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/avwx/__init__.py#L148-L154 | def summary(self): # type: ignore
"""
Condensed summary for each forecast created from translations
"""
if not self.translations:
self.update()
return [summary.taf(trans) for trans in self.translations.forecast] | [
"def",
"summary",
"(",
"self",
")",
":",
"# type: ignore",
"if",
"not",
"self",
".",
"translations",
":",
"self",
".",
"update",
"(",
")",
"return",
"[",
"summary",
".",
"taf",
"(",
"trans",
")",
"for",
"trans",
"in",
"self",
".",
"translations",
".",
"forecast",
"]"
] | Condensed summary for each forecast created from translations | [
"Condensed",
"summary",
"for",
"each",
"forecast",
"created",
"from",
"translations"
] | python | train |
modin-project/modin | modin/pandas/indexing.py | https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/indexing.py#L127-L140 | def _compute_ndim(row_loc, col_loc):
"""Compute the ndim of result from locators
"""
row_scaler = is_scalar(row_loc)
col_scaler = is_scalar(col_loc)
if row_scaler and col_scaler:
ndim = 0
elif row_scaler ^ col_scaler:
ndim = 1
else:
ndim = 2
return ndim | [
"def",
"_compute_ndim",
"(",
"row_loc",
",",
"col_loc",
")",
":",
"row_scaler",
"=",
"is_scalar",
"(",
"row_loc",
")",
"col_scaler",
"=",
"is_scalar",
"(",
"col_loc",
")",
"if",
"row_scaler",
"and",
"col_scaler",
":",
"ndim",
"=",
"0",
"elif",
"row_scaler",
"^",
"col_scaler",
":",
"ndim",
"=",
"1",
"else",
":",
"ndim",
"=",
"2",
"return",
"ndim"
] | Compute the ndim of result from locators | [
"Compute",
"the",
"ndim",
"of",
"result",
"from",
"locators"
] | python | train |
sampsyo/confuse | setup.py | https://github.com/sampsyo/confuse/blob/9ff0992e30470f6822824711950e6dd906e253fb/setup.py#L21-L30 | def export_live_eggs(self, env=False):
"""Adds all of the eggs in the current environment to PYTHONPATH."""
path_eggs = [p for p in sys.path if p.endswith('.egg')]
command = self.get_finalized_command("egg_info")
egg_base = path.abspath(command.egg_base)
unique_path_eggs = set(path_eggs + [egg_base])
os.environ['PYTHONPATH'] = ':'.join(unique_path_eggs) | [
"def",
"export_live_eggs",
"(",
"self",
",",
"env",
"=",
"False",
")",
":",
"path_eggs",
"=",
"[",
"p",
"for",
"p",
"in",
"sys",
".",
"path",
"if",
"p",
".",
"endswith",
"(",
"'.egg'",
")",
"]",
"command",
"=",
"self",
".",
"get_finalized_command",
"(",
"\"egg_info\"",
")",
"egg_base",
"=",
"path",
".",
"abspath",
"(",
"command",
".",
"egg_base",
")",
"unique_path_eggs",
"=",
"set",
"(",
"path_eggs",
"+",
"[",
"egg_base",
"]",
")",
"os",
".",
"environ",
"[",
"'PYTHONPATH'",
"]",
"=",
"':'",
".",
"join",
"(",
"unique_path_eggs",
")"
] | Adds all of the eggs in the current environment to PYTHONPATH. | [
"Adds",
"all",
"of",
"the",
"eggs",
"in",
"the",
"current",
"environment",
"to",
"PYTHONPATH",
"."
] | python | train |
volafiled/python-volapi | volapi/volapi.py | https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L232-L267 | def on_message(self, new_data):
"""Processes incoming messages according to engine-io rules"""
# https://github.com/socketio/engine.io-protocol
LOGGER.debug("new frame [%r]", new_data)
try:
what = int(new_data[0])
data = new_data[1:]
data = data and from_json(data)
if what == 0:
self.ping_interval = float(data["pingInterval"]) / 1000
LOGGER.debug("adjusted ping interval")
return
if what == 1:
LOGGER.debug("received close")
self.reraise(IOError("Connection closed remotely"))
return
if what == 3:
self.__lastpong = time.time()
LOGGER.debug("received a pong")
return
if what == 4:
self._on_frame(data)
return
if what == 6:
LOGGER.debug("received noop")
self.send_message("5")
return
LOGGER.debug("unhandled message: [%d] [%r]", what, data)
except Exception as ex:
self.reraise(ex) | [
"def",
"on_message",
"(",
"self",
",",
"new_data",
")",
":",
"# https://github.com/socketio/engine.io-protocol",
"LOGGER",
".",
"debug",
"(",
"\"new frame [%r]\"",
",",
"new_data",
")",
"try",
":",
"what",
"=",
"int",
"(",
"new_data",
"[",
"0",
"]",
")",
"data",
"=",
"new_data",
"[",
"1",
":",
"]",
"data",
"=",
"data",
"and",
"from_json",
"(",
"data",
")",
"if",
"what",
"==",
"0",
":",
"self",
".",
"ping_interval",
"=",
"float",
"(",
"data",
"[",
"\"pingInterval\"",
"]",
")",
"/",
"1000",
"LOGGER",
".",
"debug",
"(",
"\"adjusted ping interval\"",
")",
"return",
"if",
"what",
"==",
"1",
":",
"LOGGER",
".",
"debug",
"(",
"\"received close\"",
")",
"self",
".",
"reraise",
"(",
"IOError",
"(",
"\"Connection closed remotely\"",
")",
")",
"return",
"if",
"what",
"==",
"3",
":",
"self",
".",
"__lastpong",
"=",
"time",
".",
"time",
"(",
")",
"LOGGER",
".",
"debug",
"(",
"\"received a pong\"",
")",
"return",
"if",
"what",
"==",
"4",
":",
"self",
".",
"_on_frame",
"(",
"data",
")",
"return",
"if",
"what",
"==",
"6",
":",
"LOGGER",
".",
"debug",
"(",
"\"received noop\"",
")",
"self",
".",
"send_message",
"(",
"\"5\"",
")",
"return",
"LOGGER",
".",
"debug",
"(",
"\"unhandled message: [%d] [%r]\"",
",",
"what",
",",
"data",
")",
"except",
"Exception",
"as",
"ex",
":",
"self",
".",
"reraise",
"(",
"ex",
")"
] | Processes incoming messages according to engine-io rules | [
"Processes",
"incoming",
"messages",
"according",
"to",
"engine",
"-",
"io",
"rules"
] | python | train |
rflamary/POT | ot/dr.py | https://github.com/rflamary/POT/blob/c5108efc7b6702e1af3928bef1032e6b37734d1c/ot/dr.py#L110-L203 | def wda(X, y, p=2, reg=1, k=10, solver=None, maxiter=100, verbose=0, P0=None):
"""
Wasserstein Discriminant Analysis [11]_
The function solves the following optimization problem:
.. math::
P = \\text{arg}\min_P \\frac{\\sum_i W(PX^i,PX^i)}{\\sum_{i,j\\neq i} W(PX^i,PX^j)}
where :
- :math:`P` is a linear projection operator in the Stiefel(p,d) manifold
- :math:`W` is entropic regularized Wasserstein distances
- :math:`X^i` are samples in the dataset corresponding to class i
Parameters
----------
X : numpy.ndarray (n,d)
Training samples
y : np.ndarray (n,)
labels for training samples
p : int, optional
size of dimensionnality reduction
reg : float, optional
Regularization term >0 (entropic regularization)
solver : str, optional
None for steepest decsent or 'TrustRegions' for trust regions algorithm
else shoudl be a pymanopt.solvers
P0 : numpy.ndarray (d,p)
Initial starting point for projection
verbose : int, optional
Print information along iterations
Returns
-------
P : (d x p) ndarray
Optimal transportation matrix for the given parameters
proj : fun
projection function including mean centering
References
----------
.. [11] Flamary, R., Cuturi, M., Courty, N., & Rakotomamonjy, A. (2016). Wasserstein Discriminant Analysis. arXiv preprint arXiv:1608.08063.
""" # noqa
mx = np.mean(X)
X -= mx.reshape((1, -1))
# data split between classes
d = X.shape[1]
xc = split_classes(X, y)
# compute uniform weighs
wc = [np.ones((x.shape[0]), dtype=np.float32) / x.shape[0] for x in xc]
def cost(P):
# wda loss
loss_b = 0
loss_w = 0
for i, xi in enumerate(xc):
xi = np.dot(xi, P)
for j, xj in enumerate(xc[i:]):
xj = np.dot(xj, P)
M = dist(xi, xj)
G = sinkhorn(wc[i], wc[j + i], M, reg, k)
if j == 0:
loss_w += np.sum(G * M)
else:
loss_b += np.sum(G * M)
# loss inversed because minimization
return loss_w / loss_b
# declare manifold and problem
manifold = Stiefel(d, p)
problem = Problem(manifold=manifold, cost=cost)
# declare solver and solve
if solver is None:
solver = SteepestDescent(maxiter=maxiter, logverbosity=verbose)
elif solver in ['tr', 'TrustRegions']:
solver = TrustRegions(maxiter=maxiter, logverbosity=verbose)
Popt = solver.solve(problem, x=P0)
def proj(X):
return (X - mx.reshape((1, -1))).dot(Popt)
return Popt, proj | [
"def",
"wda",
"(",
"X",
",",
"y",
",",
"p",
"=",
"2",
",",
"reg",
"=",
"1",
",",
"k",
"=",
"10",
",",
"solver",
"=",
"None",
",",
"maxiter",
"=",
"100",
",",
"verbose",
"=",
"0",
",",
"P0",
"=",
"None",
")",
":",
"# noqa",
"mx",
"=",
"np",
".",
"mean",
"(",
"X",
")",
"X",
"-=",
"mx",
".",
"reshape",
"(",
"(",
"1",
",",
"-",
"1",
")",
")",
"# data split between classes",
"d",
"=",
"X",
".",
"shape",
"[",
"1",
"]",
"xc",
"=",
"split_classes",
"(",
"X",
",",
"y",
")",
"# compute uniform weighs",
"wc",
"=",
"[",
"np",
".",
"ones",
"(",
"(",
"x",
".",
"shape",
"[",
"0",
"]",
")",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"/",
"x",
".",
"shape",
"[",
"0",
"]",
"for",
"x",
"in",
"xc",
"]",
"def",
"cost",
"(",
"P",
")",
":",
"# wda loss",
"loss_b",
"=",
"0",
"loss_w",
"=",
"0",
"for",
"i",
",",
"xi",
"in",
"enumerate",
"(",
"xc",
")",
":",
"xi",
"=",
"np",
".",
"dot",
"(",
"xi",
",",
"P",
")",
"for",
"j",
",",
"xj",
"in",
"enumerate",
"(",
"xc",
"[",
"i",
":",
"]",
")",
":",
"xj",
"=",
"np",
".",
"dot",
"(",
"xj",
",",
"P",
")",
"M",
"=",
"dist",
"(",
"xi",
",",
"xj",
")",
"G",
"=",
"sinkhorn",
"(",
"wc",
"[",
"i",
"]",
",",
"wc",
"[",
"j",
"+",
"i",
"]",
",",
"M",
",",
"reg",
",",
"k",
")",
"if",
"j",
"==",
"0",
":",
"loss_w",
"+=",
"np",
".",
"sum",
"(",
"G",
"*",
"M",
")",
"else",
":",
"loss_b",
"+=",
"np",
".",
"sum",
"(",
"G",
"*",
"M",
")",
"# loss inversed because minimization",
"return",
"loss_w",
"/",
"loss_b",
"# declare manifold and problem",
"manifold",
"=",
"Stiefel",
"(",
"d",
",",
"p",
")",
"problem",
"=",
"Problem",
"(",
"manifold",
"=",
"manifold",
",",
"cost",
"=",
"cost",
")",
"# declare solver and solve",
"if",
"solver",
"is",
"None",
":",
"solver",
"=",
"SteepestDescent",
"(",
"maxiter",
"=",
"maxiter",
",",
"logverbosity",
"=",
"verbose",
")",
"elif",
"solver",
"in",
"[",
"'tr'",
",",
"'TrustRegions'",
"]",
":",
"solver",
"=",
"TrustRegions",
"(",
"maxiter",
"=",
"maxiter",
",",
"logverbosity",
"=",
"verbose",
")",
"Popt",
"=",
"solver",
".",
"solve",
"(",
"problem",
",",
"x",
"=",
"P0",
")",
"def",
"proj",
"(",
"X",
")",
":",
"return",
"(",
"X",
"-",
"mx",
".",
"reshape",
"(",
"(",
"1",
",",
"-",
"1",
")",
")",
")",
".",
"dot",
"(",
"Popt",
")",
"return",
"Popt",
",",
"proj"
] | Wasserstein Discriminant Analysis [11]_
The function solves the following optimization problem:
.. math::
P = \\text{arg}\min_P \\frac{\\sum_i W(PX^i,PX^i)}{\\sum_{i,j\\neq i} W(PX^i,PX^j)}
where :
- :math:`P` is a linear projection operator in the Stiefel(p,d) manifold
- :math:`W` is entropic regularized Wasserstein distances
- :math:`X^i` are samples in the dataset corresponding to class i
Parameters
----------
X : numpy.ndarray (n,d)
Training samples
y : np.ndarray (n,)
labels for training samples
p : int, optional
size of dimensionnality reduction
reg : float, optional
Regularization term >0 (entropic regularization)
solver : str, optional
None for steepest decsent or 'TrustRegions' for trust regions algorithm
else shoudl be a pymanopt.solvers
P0 : numpy.ndarray (d,p)
Initial starting point for projection
verbose : int, optional
Print information along iterations
Returns
-------
P : (d x p) ndarray
Optimal transportation matrix for the given parameters
proj : fun
projection function including mean centering
References
----------
.. [11] Flamary, R., Cuturi, M., Courty, N., & Rakotomamonjy, A. (2016). Wasserstein Discriminant Analysis. arXiv preprint arXiv:1608.08063. | [
"Wasserstein",
"Discriminant",
"Analysis",
"[",
"11",
"]",
"_"
] | python | train |
ankitmathur3193/song-cli | song/commands/FileDownload.py | https://github.com/ankitmathur3193/song-cli/blob/ca8ccfe547e9d702313ff6d14e81ae4355989a67/song/commands/FileDownload.py#L27-L74 | def file_download_using_requests(self,url):
'''It will download file specified by url using requests module'''
file_name=url.split('/')[-1]
if os.path.exists(os.path.join(os.getcwd(),file_name)):
print 'File already exists'
return
#print 'Downloading file %s '%file_name
#print 'Downloading from %s'%url
try:
r=requests.get(url,stream=True,timeout=200)
except requests.exceptions.SSLError:
try:
response=requests.get(url,stream=True,verify=False,timeout=200)
except requests.exceptions.RequestException as e:
print e
quit()
except requests.exceptions.RequestException as e:
print e
quit()
chunk_size = 1024
total_size = int(r.headers['Content-Length'])
total_chunks = total_size/chunk_size
file_iterable = r.iter_content(chunk_size = chunk_size)
tqdm_iter = tqdm(iterable = file_iterable,total = total_chunks,unit = 'KB',
leave = False
)
with open(file_name,'wb') as f:
for data in tqdm_iter:
f.write(data)
#total_size=float(r.headers['Content-Length'])/(1024*1024)
'''print 'Total size of file to be downloaded %.2f MB '%total_size
total_downloaded_size=0.0
with open(file_name,'wb') as f:
for chunk in r.iter_content(chunk_size=1*1024*1024):
if chunk:
size_of_chunk=float(len(chunk))/(1024*1024)
total_downloaded_size+=size_of_chunk
print '{0:.0%} Downloaded'.format(total_downloaded_size/total_size)
f.write(chunk)'''
print 'Downloaded file %s '%file_name | [
"def",
"file_download_using_requests",
"(",
"self",
",",
"url",
")",
":",
"file_name",
"=",
"url",
".",
"split",
"(",
"'/'",
")",
"[",
"-",
"1",
"]",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"getcwd",
"(",
")",
",",
"file_name",
")",
")",
":",
"print",
"'File already exists'",
"return",
"#print 'Downloading file %s '%file_name",
"#print 'Downloading from %s'%url",
"try",
":",
"r",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"stream",
"=",
"True",
",",
"timeout",
"=",
"200",
")",
"except",
"requests",
".",
"exceptions",
".",
"SSLError",
":",
"try",
":",
"response",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"stream",
"=",
"True",
",",
"verify",
"=",
"False",
",",
"timeout",
"=",
"200",
")",
"except",
"requests",
".",
"exceptions",
".",
"RequestException",
"as",
"e",
":",
"print",
"e",
"quit",
"(",
")",
"except",
"requests",
".",
"exceptions",
".",
"RequestException",
"as",
"e",
":",
"print",
"e",
"quit",
"(",
")",
"chunk_size",
"=",
"1024",
"total_size",
"=",
"int",
"(",
"r",
".",
"headers",
"[",
"'Content-Length'",
"]",
")",
"total_chunks",
"=",
"total_size",
"/",
"chunk_size",
"file_iterable",
"=",
"r",
".",
"iter_content",
"(",
"chunk_size",
"=",
"chunk_size",
")",
"tqdm_iter",
"=",
"tqdm",
"(",
"iterable",
"=",
"file_iterable",
",",
"total",
"=",
"total_chunks",
",",
"unit",
"=",
"'KB'",
",",
"leave",
"=",
"False",
")",
"with",
"open",
"(",
"file_name",
",",
"'wb'",
")",
"as",
"f",
":",
"for",
"data",
"in",
"tqdm_iter",
":",
"f",
".",
"write",
"(",
"data",
")",
"#total_size=float(r.headers['Content-Length'])/(1024*1024)",
"'''print 'Total size of file to be downloaded %.2f MB '%total_size\n\t\ttotal_downloaded_size=0.0\n\t\twith open(file_name,'wb') as f:\n\t\t\tfor chunk in r.iter_content(chunk_size=1*1024*1024):\n\t\t\t\tif chunk:\n\t\t\t\t\tsize_of_chunk=float(len(chunk))/(1024*1024)\n\t\t\t\t\ttotal_downloaded_size+=size_of_chunk\n\t\t\t\t\tprint '{0:.0%} Downloaded'.format(total_downloaded_size/total_size)\n\t\t\t\t\tf.write(chunk)'''",
"print",
"'Downloaded file %s '",
"%",
"file_name"
] | It will download file specified by url using requests module | [
"It",
"will",
"download",
"file",
"specified",
"by",
"url",
"using",
"requests",
"module"
] | python | test |
GoogleCloudPlatform/cloud-debug-python | src/googleclouddebugger/capture_collector.py | https://github.com/GoogleCloudPlatform/cloud-debug-python/blob/89ce3782c98b814838a3ecb5479ed3882368cbee/src/googleclouddebugger/capture_collector.py#L360-L383 | def CaptureFrameLocals(self, frame):
"""Captures local variables and arguments of the specified frame.
Args:
frame: frame to capture locals and arguments.
Returns:
(arguments, locals) tuple.
"""
# Capture all local variables (including method arguments).
variables = {n: self.CaptureNamedVariable(n, v, 1,
self.default_capture_limits)
for n, v in six.viewitems(frame.f_locals)}
# Split between locals and arguments (keeping arguments in the right order).
nargs = frame.f_code.co_argcount
if frame.f_code.co_flags & inspect.CO_VARARGS: nargs += 1
if frame.f_code.co_flags & inspect.CO_VARKEYWORDS: nargs += 1
frame_arguments = []
for argname in frame.f_code.co_varnames[:nargs]:
if argname in variables: frame_arguments.append(variables.pop(argname))
return (frame_arguments, list(six.viewvalues(variables))) | [
"def",
"CaptureFrameLocals",
"(",
"self",
",",
"frame",
")",
":",
"# Capture all local variables (including method arguments).",
"variables",
"=",
"{",
"n",
":",
"self",
".",
"CaptureNamedVariable",
"(",
"n",
",",
"v",
",",
"1",
",",
"self",
".",
"default_capture_limits",
")",
"for",
"n",
",",
"v",
"in",
"six",
".",
"viewitems",
"(",
"frame",
".",
"f_locals",
")",
"}",
"# Split between locals and arguments (keeping arguments in the right order).",
"nargs",
"=",
"frame",
".",
"f_code",
".",
"co_argcount",
"if",
"frame",
".",
"f_code",
".",
"co_flags",
"&",
"inspect",
".",
"CO_VARARGS",
":",
"nargs",
"+=",
"1",
"if",
"frame",
".",
"f_code",
".",
"co_flags",
"&",
"inspect",
".",
"CO_VARKEYWORDS",
":",
"nargs",
"+=",
"1",
"frame_arguments",
"=",
"[",
"]",
"for",
"argname",
"in",
"frame",
".",
"f_code",
".",
"co_varnames",
"[",
":",
"nargs",
"]",
":",
"if",
"argname",
"in",
"variables",
":",
"frame_arguments",
".",
"append",
"(",
"variables",
".",
"pop",
"(",
"argname",
")",
")",
"return",
"(",
"frame_arguments",
",",
"list",
"(",
"six",
".",
"viewvalues",
"(",
"variables",
")",
")",
")"
] | Captures local variables and arguments of the specified frame.
Args:
frame: frame to capture locals and arguments.
Returns:
(arguments, locals) tuple. | [
"Captures",
"local",
"variables",
"and",
"arguments",
"of",
"the",
"specified",
"frame",
"."
] | python | train |
pyviz/holoviews | holoviews/core/data/interface.py | https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/data/interface.py#L278-L314 | def select_mask(cls, dataset, selection):
"""
Given a Dataset object and a dictionary with dimension keys and
selection keys (i.e tuple ranges, slices, sets, lists or literals)
return a boolean mask over the rows in the Dataset object that
have been selected.
"""
mask = np.ones(len(dataset), dtype=np.bool)
for dim, k in selection.items():
if isinstance(k, tuple):
k = slice(*k)
arr = cls.values(dataset, dim)
if isinstance(k, slice):
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'invalid value encountered')
if k.start is not None:
mask &= k.start <= arr
if k.stop is not None:
mask &= arr < k.stop
elif isinstance(k, (set, list)):
iter_slcs = []
for ik in k:
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'invalid value encountered')
iter_slcs.append(arr == ik)
mask &= np.logical_or.reduce(iter_slcs)
elif callable(k):
mask &= k(arr)
else:
index_mask = arr == k
if dataset.ndims == 1 and np.sum(index_mask) == 0:
data_index = np.argmin(np.abs(arr - k))
mask = np.zeros(len(dataset), dtype=np.bool)
mask[data_index] = True
else:
mask &= index_mask
return mask | [
"def",
"select_mask",
"(",
"cls",
",",
"dataset",
",",
"selection",
")",
":",
"mask",
"=",
"np",
".",
"ones",
"(",
"len",
"(",
"dataset",
")",
",",
"dtype",
"=",
"np",
".",
"bool",
")",
"for",
"dim",
",",
"k",
"in",
"selection",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"k",
",",
"tuple",
")",
":",
"k",
"=",
"slice",
"(",
"*",
"k",
")",
"arr",
"=",
"cls",
".",
"values",
"(",
"dataset",
",",
"dim",
")",
"if",
"isinstance",
"(",
"k",
",",
"slice",
")",
":",
"with",
"warnings",
".",
"catch_warnings",
"(",
")",
":",
"warnings",
".",
"filterwarnings",
"(",
"'ignore'",
",",
"r'invalid value encountered'",
")",
"if",
"k",
".",
"start",
"is",
"not",
"None",
":",
"mask",
"&=",
"k",
".",
"start",
"<=",
"arr",
"if",
"k",
".",
"stop",
"is",
"not",
"None",
":",
"mask",
"&=",
"arr",
"<",
"k",
".",
"stop",
"elif",
"isinstance",
"(",
"k",
",",
"(",
"set",
",",
"list",
")",
")",
":",
"iter_slcs",
"=",
"[",
"]",
"for",
"ik",
"in",
"k",
":",
"with",
"warnings",
".",
"catch_warnings",
"(",
")",
":",
"warnings",
".",
"filterwarnings",
"(",
"'ignore'",
",",
"r'invalid value encountered'",
")",
"iter_slcs",
".",
"append",
"(",
"arr",
"==",
"ik",
")",
"mask",
"&=",
"np",
".",
"logical_or",
".",
"reduce",
"(",
"iter_slcs",
")",
"elif",
"callable",
"(",
"k",
")",
":",
"mask",
"&=",
"k",
"(",
"arr",
")",
"else",
":",
"index_mask",
"=",
"arr",
"==",
"k",
"if",
"dataset",
".",
"ndims",
"==",
"1",
"and",
"np",
".",
"sum",
"(",
"index_mask",
")",
"==",
"0",
":",
"data_index",
"=",
"np",
".",
"argmin",
"(",
"np",
".",
"abs",
"(",
"arr",
"-",
"k",
")",
")",
"mask",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"dataset",
")",
",",
"dtype",
"=",
"np",
".",
"bool",
")",
"mask",
"[",
"data_index",
"]",
"=",
"True",
"else",
":",
"mask",
"&=",
"index_mask",
"return",
"mask"
] | Given a Dataset object and a dictionary with dimension keys and
selection keys (i.e tuple ranges, slices, sets, lists or literals)
return a boolean mask over the rows in the Dataset object that
have been selected. | [
"Given",
"a",
"Dataset",
"object",
"and",
"a",
"dictionary",
"with",
"dimension",
"keys",
"and",
"selection",
"keys",
"(",
"i",
".",
"e",
"tuple",
"ranges",
"slices",
"sets",
"lists",
"or",
"literals",
")",
"return",
"a",
"boolean",
"mask",
"over",
"the",
"rows",
"in",
"the",
"Dataset",
"object",
"that",
"have",
"been",
"selected",
"."
] | python | train |
gwastro/pycbc | pycbc/psd/analytical.py | https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/psd/analytical.py#L124-L144 | def flat_unity(length, delta_f, low_freq_cutoff):
""" Returns a FrequencySeries of ones above the low_frequency_cutoff.
Parameters
----------
length : int
Length of output Frequencyseries.
delta_f : float
Frequency step for output FrequencySeries.
low_freq_cutoff : int
Low-frequency cutoff for output FrequencySeries.
Returns
-------
FrequencySeries
Returns a FrequencySeries containing the unity PSD model.
"""
fseries = FrequencySeries(numpy.ones(length), delta_f=delta_f)
kmin = int(low_freq_cutoff / fseries.delta_f)
fseries.data[:kmin] = 0
return fseries | [
"def",
"flat_unity",
"(",
"length",
",",
"delta_f",
",",
"low_freq_cutoff",
")",
":",
"fseries",
"=",
"FrequencySeries",
"(",
"numpy",
".",
"ones",
"(",
"length",
")",
",",
"delta_f",
"=",
"delta_f",
")",
"kmin",
"=",
"int",
"(",
"low_freq_cutoff",
"/",
"fseries",
".",
"delta_f",
")",
"fseries",
".",
"data",
"[",
":",
"kmin",
"]",
"=",
"0",
"return",
"fseries"
] | Returns a FrequencySeries of ones above the low_frequency_cutoff.
Parameters
----------
length : int
Length of output Frequencyseries.
delta_f : float
Frequency step for output FrequencySeries.
low_freq_cutoff : int
Low-frequency cutoff for output FrequencySeries.
Returns
-------
FrequencySeries
Returns a FrequencySeries containing the unity PSD model. | [
"Returns",
"a",
"FrequencySeries",
"of",
"ones",
"above",
"the",
"low_frequency_cutoff",
"."
] | python | train |
projectatomic/atomic-reactor | atomic_reactor/util.py | https://github.com/projectatomic/atomic-reactor/blob/fd31c01b964097210bf169960d051e5f04019a80/atomic_reactor/util.py#L1584-L1592 | def update_from_dict(self, source):
"""Update records of the digests of images from a dictionary
(no validation is performed)
:param dict source: data
"""
assert isinstance(source, dict)
source_copy = deepcopy(source) # no mutable side effects
self._images_digests.update(source_copy) | [
"def",
"update_from_dict",
"(",
"self",
",",
"source",
")",
":",
"assert",
"isinstance",
"(",
"source",
",",
"dict",
")",
"source_copy",
"=",
"deepcopy",
"(",
"source",
")",
"# no mutable side effects",
"self",
".",
"_images_digests",
".",
"update",
"(",
"source_copy",
")"
] | Update records of the digests of images from a dictionary
(no validation is performed)
:param dict source: data | [
"Update",
"records",
"of",
"the",
"digests",
"of",
"images",
"from",
"a",
"dictionary",
"(",
"no",
"validation",
"is",
"performed",
")"
] | python | train |
internetarchive/brozzler | brozzler/model.py | https://github.com/internetarchive/brozzler/blob/411b3f266a38b9bb942021c0121ebd8e5ca66447/brozzler/model.py#L74-L108 | def new_job(frontier, job_conf):
'''Returns new Job.'''
validate_conf(job_conf)
job = Job(frontier.rr, {
"conf": job_conf, "status": "ACTIVE",
"started": doublethink.utcnow()})
if "id" in job_conf:
job.id = job_conf["id"]
if "max_claimed_sites" in job_conf:
job.max_claimed_sites = job_conf["max_claimed_sites"]
job.save()
sites = []
pages = []
for seed_conf in job_conf["seeds"]:
merged_conf = merge(seed_conf, job_conf)
merged_conf.pop("seeds")
merged_conf["job_id"] = job.id
merged_conf["seed"] = merged_conf.pop("url")
site = brozzler.Site(frontier.rr, merged_conf)
site.id = str(uuid.uuid4())
sites.append(site)
pages.append(new_seed_page(frontier, site))
# insert in batches to avoid this error
# rethinkdb.errors.ReqlDriverError: Query size (167883036) greater than maximum (134217727) in:
for batch in (pages[i:i+500] for i in range(0, len(pages), 500)):
logging.info('inserting batch of %s pages', len(batch))
result = frontier.rr.table('pages').insert(batch).run()
for batch in (sites[i:i+100] for i in range(0, len(sites), 100)):
logging.info('inserting batch of %s sites', len(batch))
result = frontier.rr.table('sites').insert(batch).run()
logging.info('job %s fully started', job.id)
return job | [
"def",
"new_job",
"(",
"frontier",
",",
"job_conf",
")",
":",
"validate_conf",
"(",
"job_conf",
")",
"job",
"=",
"Job",
"(",
"frontier",
".",
"rr",
",",
"{",
"\"conf\"",
":",
"job_conf",
",",
"\"status\"",
":",
"\"ACTIVE\"",
",",
"\"started\"",
":",
"doublethink",
".",
"utcnow",
"(",
")",
"}",
")",
"if",
"\"id\"",
"in",
"job_conf",
":",
"job",
".",
"id",
"=",
"job_conf",
"[",
"\"id\"",
"]",
"if",
"\"max_claimed_sites\"",
"in",
"job_conf",
":",
"job",
".",
"max_claimed_sites",
"=",
"job_conf",
"[",
"\"max_claimed_sites\"",
"]",
"job",
".",
"save",
"(",
")",
"sites",
"=",
"[",
"]",
"pages",
"=",
"[",
"]",
"for",
"seed_conf",
"in",
"job_conf",
"[",
"\"seeds\"",
"]",
":",
"merged_conf",
"=",
"merge",
"(",
"seed_conf",
",",
"job_conf",
")",
"merged_conf",
".",
"pop",
"(",
"\"seeds\"",
")",
"merged_conf",
"[",
"\"job_id\"",
"]",
"=",
"job",
".",
"id",
"merged_conf",
"[",
"\"seed\"",
"]",
"=",
"merged_conf",
".",
"pop",
"(",
"\"url\"",
")",
"site",
"=",
"brozzler",
".",
"Site",
"(",
"frontier",
".",
"rr",
",",
"merged_conf",
")",
"site",
".",
"id",
"=",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
"sites",
".",
"append",
"(",
"site",
")",
"pages",
".",
"append",
"(",
"new_seed_page",
"(",
"frontier",
",",
"site",
")",
")",
"# insert in batches to avoid this error",
"# rethinkdb.errors.ReqlDriverError: Query size (167883036) greater than maximum (134217727) in:",
"for",
"batch",
"in",
"(",
"pages",
"[",
"i",
":",
"i",
"+",
"500",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"pages",
")",
",",
"500",
")",
")",
":",
"logging",
".",
"info",
"(",
"'inserting batch of %s pages'",
",",
"len",
"(",
"batch",
")",
")",
"result",
"=",
"frontier",
".",
"rr",
".",
"table",
"(",
"'pages'",
")",
".",
"insert",
"(",
"batch",
")",
".",
"run",
"(",
")",
"for",
"batch",
"in",
"(",
"sites",
"[",
"i",
":",
"i",
"+",
"100",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"sites",
")",
",",
"100",
")",
")",
":",
"logging",
".",
"info",
"(",
"'inserting batch of %s sites'",
",",
"len",
"(",
"batch",
")",
")",
"result",
"=",
"frontier",
".",
"rr",
".",
"table",
"(",
"'sites'",
")",
".",
"insert",
"(",
"batch",
")",
".",
"run",
"(",
")",
"logging",
".",
"info",
"(",
"'job %s fully started'",
",",
"job",
".",
"id",
")",
"return",
"job"
] | Returns new Job. | [
"Returns",
"new",
"Job",
"."
] | python | train |
threeML/astromodels | astromodels/core/model.py | https://github.com/threeML/astromodels/blob/9aac365a372f77603039533df9a6b694c1e360d5/astromodels/core/model.py#L392-L403 | def remove_independent_variable(self, variable_name):
"""
Remove an independent variable which was added with add_independent_variable
:param variable_name: name of variable to remove
:return:
"""
self._remove_child(variable_name)
# Remove also from the list of independent variables
self._independent_variables.pop(variable_name) | [
"def",
"remove_independent_variable",
"(",
"self",
",",
"variable_name",
")",
":",
"self",
".",
"_remove_child",
"(",
"variable_name",
")",
"# Remove also from the list of independent variables",
"self",
".",
"_independent_variables",
".",
"pop",
"(",
"variable_name",
")"
] | Remove an independent variable which was added with add_independent_variable
:param variable_name: name of variable to remove
:return: | [
"Remove",
"an",
"independent",
"variable",
"which",
"was",
"added",
"with",
"add_independent_variable"
] | python | train |
senaite/senaite.core | bika/lims/content/abstractanalysis.py | https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/content/abstractanalysis.py#L1123-L1129 | def getAttachmentUIDs(self):
"""Used to populate metadata, so that we don't need full objects of
analyses when working with their attachments.
"""
attachments = self.getAttachment()
uids = [att.UID() for att in attachments]
return uids | [
"def",
"getAttachmentUIDs",
"(",
"self",
")",
":",
"attachments",
"=",
"self",
".",
"getAttachment",
"(",
")",
"uids",
"=",
"[",
"att",
".",
"UID",
"(",
")",
"for",
"att",
"in",
"attachments",
"]",
"return",
"uids"
] | Used to populate metadata, so that we don't need full objects of
analyses when working with their attachments. | [
"Used",
"to",
"populate",
"metadata",
"so",
"that",
"we",
"don",
"t",
"need",
"full",
"objects",
"of",
"analyses",
"when",
"working",
"with",
"their",
"attachments",
"."
] | python | train |
titusjan/argos | argos/utils/masks.py | https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/utils/masks.py#L338-L367 | def maskedEqual(array, missingValue):
""" Mask an array where equal to a given (missing)value.
Unfortunately ma.masked_equal does not work with structured arrays. See:
https://mail.scipy.org/pipermail/numpy-discussion/2011-July/057669.html
If the data is a structured array the mask is applied for every field (i.e. forming a
logical-and). Otherwise ma.masked_equal is called.
"""
if array_is_structured(array):
# Enforce the array to be masked
if not isinstance(array, ma.MaskedArray):
array = ma.MaskedArray(array)
# Set the mask separately per field
for nr, field in enumerate(array.dtype.names):
if hasattr(missingValue, '__len__'):
fieldMissingValue = missingValue[nr]
else:
fieldMissingValue = missingValue
array[field] = ma.masked_equal(array[field], fieldMissingValue)
check_class(array, ma.MaskedArray) # post-condition check
return array
else:
# masked_equal works with missing is None
result = ma.masked_equal(array, missingValue, copy=False)
check_class(result, ma.MaskedArray) # post-condition check
return result | [
"def",
"maskedEqual",
"(",
"array",
",",
"missingValue",
")",
":",
"if",
"array_is_structured",
"(",
"array",
")",
":",
"# Enforce the array to be masked",
"if",
"not",
"isinstance",
"(",
"array",
",",
"ma",
".",
"MaskedArray",
")",
":",
"array",
"=",
"ma",
".",
"MaskedArray",
"(",
"array",
")",
"# Set the mask separately per field",
"for",
"nr",
",",
"field",
"in",
"enumerate",
"(",
"array",
".",
"dtype",
".",
"names",
")",
":",
"if",
"hasattr",
"(",
"missingValue",
",",
"'__len__'",
")",
":",
"fieldMissingValue",
"=",
"missingValue",
"[",
"nr",
"]",
"else",
":",
"fieldMissingValue",
"=",
"missingValue",
"array",
"[",
"field",
"]",
"=",
"ma",
".",
"masked_equal",
"(",
"array",
"[",
"field",
"]",
",",
"fieldMissingValue",
")",
"check_class",
"(",
"array",
",",
"ma",
".",
"MaskedArray",
")",
"# post-condition check",
"return",
"array",
"else",
":",
"# masked_equal works with missing is None",
"result",
"=",
"ma",
".",
"masked_equal",
"(",
"array",
",",
"missingValue",
",",
"copy",
"=",
"False",
")",
"check_class",
"(",
"result",
",",
"ma",
".",
"MaskedArray",
")",
"# post-condition check",
"return",
"result"
] | Mask an array where equal to a given (missing)value.
Unfortunately ma.masked_equal does not work with structured arrays. See:
https://mail.scipy.org/pipermail/numpy-discussion/2011-July/057669.html
If the data is a structured array the mask is applied for every field (i.e. forming a
logical-and). Otherwise ma.masked_equal is called. | [
"Mask",
"an",
"array",
"where",
"equal",
"to",
"a",
"given",
"(",
"missing",
")",
"value",
"."
] | python | train |
agile-geoscience/striplog | striplog/legend.py | https://github.com/agile-geoscience/striplog/blob/8033b673a151f96c29802b43763e863519a3124c/striplog/legend.py#L637-L682 | def to_csv(self):
"""
Renders a legend as a CSV string.
No arguments.
Returns:
str: The legend as a CSV.
"""
# We can't delegate this to Decor because we need to know the superset
# of all Decor properties. There may be lots of blanks.
header = []
component_header = []
for row in self:
for j in row.__dict__.keys():
if j == '_colour':
j = 'colour'
header.append(j)
for k in row.component.__dict__.keys():
component_header.append(k)
header = set(header)
component_header = set(component_header)
header.remove('component')
header_row = ''
if 'colour' in header:
header_row += 'colour,'
header.remove('colour')
has_colour = True
for item in header:
header_row += item + ','
for item in component_header:
header_row += 'component ' + item + ','
# Now we have a header row! Phew.
# Next we'll go back over the legend and collect everything.
result = header_row.strip(',') + '\n'
for row in self:
if has_colour:
result += row.__dict__.get('_colour', '') + ','
for item in header:
result += str(row.__dict__.get(item, '')) + ','
for item in component_header:
result += str(row.component.__dict__.get(item, '')) + ','
result += '\n'
return result | [
"def",
"to_csv",
"(",
"self",
")",
":",
"# We can't delegate this to Decor because we need to know the superset",
"# of all Decor properties. There may be lots of blanks.",
"header",
"=",
"[",
"]",
"component_header",
"=",
"[",
"]",
"for",
"row",
"in",
"self",
":",
"for",
"j",
"in",
"row",
".",
"__dict__",
".",
"keys",
"(",
")",
":",
"if",
"j",
"==",
"'_colour'",
":",
"j",
"=",
"'colour'",
"header",
".",
"append",
"(",
"j",
")",
"for",
"k",
"in",
"row",
".",
"component",
".",
"__dict__",
".",
"keys",
"(",
")",
":",
"component_header",
".",
"append",
"(",
"k",
")",
"header",
"=",
"set",
"(",
"header",
")",
"component_header",
"=",
"set",
"(",
"component_header",
")",
"header",
".",
"remove",
"(",
"'component'",
")",
"header_row",
"=",
"''",
"if",
"'colour'",
"in",
"header",
":",
"header_row",
"+=",
"'colour,'",
"header",
".",
"remove",
"(",
"'colour'",
")",
"has_colour",
"=",
"True",
"for",
"item",
"in",
"header",
":",
"header_row",
"+=",
"item",
"+",
"','",
"for",
"item",
"in",
"component_header",
":",
"header_row",
"+=",
"'component '",
"+",
"item",
"+",
"','",
"# Now we have a header row! Phew.",
"# Next we'll go back over the legend and collect everything.",
"result",
"=",
"header_row",
".",
"strip",
"(",
"','",
")",
"+",
"'\\n'",
"for",
"row",
"in",
"self",
":",
"if",
"has_colour",
":",
"result",
"+=",
"row",
".",
"__dict__",
".",
"get",
"(",
"'_colour'",
",",
"''",
")",
"+",
"','",
"for",
"item",
"in",
"header",
":",
"result",
"+=",
"str",
"(",
"row",
".",
"__dict__",
".",
"get",
"(",
"item",
",",
"''",
")",
")",
"+",
"','",
"for",
"item",
"in",
"component_header",
":",
"result",
"+=",
"str",
"(",
"row",
".",
"component",
".",
"__dict__",
".",
"get",
"(",
"item",
",",
"''",
")",
")",
"+",
"','",
"result",
"+=",
"'\\n'",
"return",
"result"
] | Renders a legend as a CSV string.
No arguments.
Returns:
str: The legend as a CSV. | [
"Renders",
"a",
"legend",
"as",
"a",
"CSV",
"string",
"."
] | python | test |
benedictpaten/sonLib | bioio.py | https://github.com/benedictpaten/sonLib/blob/1decb75bb439b70721ec776f685ce98e25217d26/bioio.py#L199-L213 | def popenCatch(command, stdinString=None):
"""Runs a command and return standard out.
"""
logger.debug("Running the command: %s" % command)
if stdinString != None:
process = subprocess.Popen(command, shell=True,
stdin=subprocess.PIPE, stdout=subprocess.PIPE, bufsize=-1)
output, nothing = process.communicate(stdinString)
else:
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=sys.stderr, bufsize=-1)
output, nothing = process.communicate() #process.stdout.read().strip()
sts = process.wait()
if sts != 0:
raise RuntimeError("Command: %s with stdin string '%s' exited with non-zero status %i" % (command, stdinString, sts))
return output | [
"def",
"popenCatch",
"(",
"command",
",",
"stdinString",
"=",
"None",
")",
":",
"logger",
".",
"debug",
"(",
"\"Running the command: %s\"",
"%",
"command",
")",
"if",
"stdinString",
"!=",
"None",
":",
"process",
"=",
"subprocess",
".",
"Popen",
"(",
"command",
",",
"shell",
"=",
"True",
",",
"stdin",
"=",
"subprocess",
".",
"PIPE",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"bufsize",
"=",
"-",
"1",
")",
"output",
",",
"nothing",
"=",
"process",
".",
"communicate",
"(",
"stdinString",
")",
"else",
":",
"process",
"=",
"subprocess",
".",
"Popen",
"(",
"command",
",",
"shell",
"=",
"True",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"sys",
".",
"stderr",
",",
"bufsize",
"=",
"-",
"1",
")",
"output",
",",
"nothing",
"=",
"process",
".",
"communicate",
"(",
")",
"#process.stdout.read().strip()",
"sts",
"=",
"process",
".",
"wait",
"(",
")",
"if",
"sts",
"!=",
"0",
":",
"raise",
"RuntimeError",
"(",
"\"Command: %s with stdin string '%s' exited with non-zero status %i\"",
"%",
"(",
"command",
",",
"stdinString",
",",
"sts",
")",
")",
"return",
"output"
] | Runs a command and return standard out. | [
"Runs",
"a",
"command",
"and",
"return",
"standard",
"out",
"."
] | python | train |
jwodder/javaproperties | javaproperties/reading.py | https://github.com/jwodder/javaproperties/blob/8b48f040305217ebeb80c98c4354691bbb01429b/javaproperties/reading.py#L38-L66 | def loads(s, object_pairs_hook=dict):
"""
Parse the contents of the string ``s`` as a simple line-oriented
``.properties`` file and return a `dict` of the key-value pairs.
``s`` may be either a text string or bytes string. If it is a bytes
string, its contents are decoded as Latin-1.
By default, the key-value pairs extracted from ``s`` are combined into a
`dict` with later occurrences of a key overriding previous occurrences of
the same key. To change this behavior, pass a callable as the
``object_pairs_hook`` argument; it will be called with one argument, a
generator of ``(key, value)`` pairs representing the key-value entries in
``s`` (including duplicates) in order of occurrence. `loads` will then
return the value returned by ``object_pairs_hook``.
.. versionchanged:: 0.5.0
Invalid ``\\uXXXX`` escape sequences will now cause an
`InvalidUEscapeError` to be raised
:param string s: the string from which to read the ``.properties`` document
:param callable object_pairs_hook: class or function for combining the
key-value pairs
:rtype: `dict` of text strings or the return value of ``object_pairs_hook``
:raises InvalidUEscapeError: if an invalid ``\\uXXXX`` escape sequence
occurs in the input
"""
fp = BytesIO(s) if isinstance(s, binary_type) else StringIO(s)
return load(fp, object_pairs_hook=object_pairs_hook) | [
"def",
"loads",
"(",
"s",
",",
"object_pairs_hook",
"=",
"dict",
")",
":",
"fp",
"=",
"BytesIO",
"(",
"s",
")",
"if",
"isinstance",
"(",
"s",
",",
"binary_type",
")",
"else",
"StringIO",
"(",
"s",
")",
"return",
"load",
"(",
"fp",
",",
"object_pairs_hook",
"=",
"object_pairs_hook",
")"
] | Parse the contents of the string ``s`` as a simple line-oriented
``.properties`` file and return a `dict` of the key-value pairs.
``s`` may be either a text string or bytes string. If it is a bytes
string, its contents are decoded as Latin-1.
By default, the key-value pairs extracted from ``s`` are combined into a
`dict` with later occurrences of a key overriding previous occurrences of
the same key. To change this behavior, pass a callable as the
``object_pairs_hook`` argument; it will be called with one argument, a
generator of ``(key, value)`` pairs representing the key-value entries in
``s`` (including duplicates) in order of occurrence. `loads` will then
return the value returned by ``object_pairs_hook``.
.. versionchanged:: 0.5.0
Invalid ``\\uXXXX`` escape sequences will now cause an
`InvalidUEscapeError` to be raised
:param string s: the string from which to read the ``.properties`` document
:param callable object_pairs_hook: class or function for combining the
key-value pairs
:rtype: `dict` of text strings or the return value of ``object_pairs_hook``
:raises InvalidUEscapeError: if an invalid ``\\uXXXX`` escape sequence
occurs in the input | [
"Parse",
"the",
"contents",
"of",
"the",
"string",
"s",
"as",
"a",
"simple",
"line",
"-",
"oriented",
".",
"properties",
"file",
"and",
"return",
"a",
"dict",
"of",
"the",
"key",
"-",
"value",
"pairs",
"."
] | python | train |
obulpathi/cdn-fastly-python | fastly/__init__.py | https://github.com/obulpathi/cdn-fastly-python/blob/db2564b047e8af4bce72c3b88d6c27d3d0291425/fastly/__init__.py#L965-L968 | def deactivate_version(self, service_id, version_number):
"""Deactivate the current version."""
content = self._fetch("/service/%s/version/%d/deactivate" % (service_id, version_number), method="PUT")
return FastlyVersion(self, content) | [
"def",
"deactivate_version",
"(",
"self",
",",
"service_id",
",",
"version_number",
")",
":",
"content",
"=",
"self",
".",
"_fetch",
"(",
"\"/service/%s/version/%d/deactivate\"",
"%",
"(",
"service_id",
",",
"version_number",
")",
",",
"method",
"=",
"\"PUT\"",
")",
"return",
"FastlyVersion",
"(",
"self",
",",
"content",
")"
] | Deactivate the current version. | [
"Deactivate",
"the",
"current",
"version",
"."
] | python | train |
AltSchool/dynamic-rest | dynamic_rest/viewsets.py | https://github.com/AltSchool/dynamic-rest/blob/5b0338c3dd8bc638d60c3bb92645857c5b89c920/dynamic_rest/viewsets.py#L94-L144 | def initialize_request(self, request, *args, **kargs):
"""
Override DRF initialize_request() method to swap request.GET
(which is aliased by request.query_params) with a mutable instance
of QueryParams, and to convert request MergeDict to a subclass of dict
for consistency (MergeDict is not a subclass of dict)
"""
def handle_encodings(request):
"""
WSGIRequest does not support Unicode values in the query string.
WSGIRequest handling has a history of drifting behavior between
combinations of Python versions, Django versions and DRF versions.
Django changed its QUERY_STRING handling here:
https://goo.gl/WThXo6. DRF 3.4.7 changed its behavior here:
https://goo.gl/0ojIIO.
"""
try:
return QueryParams(request.GET)
except UnicodeEncodeError:
pass
s = request.environ.get('QUERY_STRING', '')
try:
s = s.encode('utf-8')
except UnicodeDecodeError:
pass
return QueryParams(s)
request.GET = handle_encodings(request)
request = super(WithDynamicViewSetMixin, self).initialize_request(
request, *args, **kargs
)
try:
# Django<1.9, DRF<3.2
# MergeDict doesn't have the same API as dict.
# Django has deprecated MergeDict and DRF is moving away from
# using it - thus, were comfortable replacing it with a QueryDict
# This will allow the data property to have normal dict methods.
from django.utils.datastructures import MergeDict
if isinstance(request._full_data, MergeDict):
data_as_dict = request.data.dicts[0]
for d in request.data.dicts[1:]:
data_as_dict.update(d)
request._full_data = data_as_dict
except:
pass
return request | [
"def",
"initialize_request",
"(",
"self",
",",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kargs",
")",
":",
"def",
"handle_encodings",
"(",
"request",
")",
":",
"\"\"\"\n WSGIRequest does not support Unicode values in the query string.\n WSGIRequest handling has a history of drifting behavior between\n combinations of Python versions, Django versions and DRF versions.\n Django changed its QUERY_STRING handling here:\n https://goo.gl/WThXo6. DRF 3.4.7 changed its behavior here:\n https://goo.gl/0ojIIO.\n \"\"\"",
"try",
":",
"return",
"QueryParams",
"(",
"request",
".",
"GET",
")",
"except",
"UnicodeEncodeError",
":",
"pass",
"s",
"=",
"request",
".",
"environ",
".",
"get",
"(",
"'QUERY_STRING'",
",",
"''",
")",
"try",
":",
"s",
"=",
"s",
".",
"encode",
"(",
"'utf-8'",
")",
"except",
"UnicodeDecodeError",
":",
"pass",
"return",
"QueryParams",
"(",
"s",
")",
"request",
".",
"GET",
"=",
"handle_encodings",
"(",
"request",
")",
"request",
"=",
"super",
"(",
"WithDynamicViewSetMixin",
",",
"self",
")",
".",
"initialize_request",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kargs",
")",
"try",
":",
"# Django<1.9, DRF<3.2",
"# MergeDict doesn't have the same API as dict.",
"# Django has deprecated MergeDict and DRF is moving away from",
"# using it - thus, were comfortable replacing it with a QueryDict",
"# This will allow the data property to have normal dict methods.",
"from",
"django",
".",
"utils",
".",
"datastructures",
"import",
"MergeDict",
"if",
"isinstance",
"(",
"request",
".",
"_full_data",
",",
"MergeDict",
")",
":",
"data_as_dict",
"=",
"request",
".",
"data",
".",
"dicts",
"[",
"0",
"]",
"for",
"d",
"in",
"request",
".",
"data",
".",
"dicts",
"[",
"1",
":",
"]",
":",
"data_as_dict",
".",
"update",
"(",
"d",
")",
"request",
".",
"_full_data",
"=",
"data_as_dict",
"except",
":",
"pass",
"return",
"request"
] | Override DRF initialize_request() method to swap request.GET
(which is aliased by request.query_params) with a mutable instance
of QueryParams, and to convert request MergeDict to a subclass of dict
for consistency (MergeDict is not a subclass of dict) | [
"Override",
"DRF",
"initialize_request",
"()",
"method",
"to",
"swap",
"request",
".",
"GET",
"(",
"which",
"is",
"aliased",
"by",
"request",
".",
"query_params",
")",
"with",
"a",
"mutable",
"instance",
"of",
"QueryParams",
"and",
"to",
"convert",
"request",
"MergeDict",
"to",
"a",
"subclass",
"of",
"dict",
"for",
"consistency",
"(",
"MergeDict",
"is",
"not",
"a",
"subclass",
"of",
"dict",
")"
] | python | train |
inspirehep/harvesting-kit | harvestingkit/ftp_utils.py | https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/ftp_utils.py#L143-L162 | def ls(self, folder=''):
""" Lists the files and folders of a specific directory
default is the current working directory.
:param folder: the folder to be listed.
:type folder: string
:returns: a tuple with the list of files in the folder
and the list of subfolders in the folder.
"""
current_folder = self._ftp.pwd()
self.cd(folder)
contents = []
self._ftp.retrlines('LIST', lambda a: contents.append(a))
files = filter(lambda a: a.split()[0].startswith('-'), contents)
folders = filter(lambda a: a.split()[0].startswith('d'), contents)
files = map(lambda a: ' '.join(a.split()[8:]), files)
folders = map(lambda a: ' '.join(a.split()[8:]), folders)
self._ftp.cwd(current_folder)
return files, folders | [
"def",
"ls",
"(",
"self",
",",
"folder",
"=",
"''",
")",
":",
"current_folder",
"=",
"self",
".",
"_ftp",
".",
"pwd",
"(",
")",
"self",
".",
"cd",
"(",
"folder",
")",
"contents",
"=",
"[",
"]",
"self",
".",
"_ftp",
".",
"retrlines",
"(",
"'LIST'",
",",
"lambda",
"a",
":",
"contents",
".",
"append",
"(",
"a",
")",
")",
"files",
"=",
"filter",
"(",
"lambda",
"a",
":",
"a",
".",
"split",
"(",
")",
"[",
"0",
"]",
".",
"startswith",
"(",
"'-'",
")",
",",
"contents",
")",
"folders",
"=",
"filter",
"(",
"lambda",
"a",
":",
"a",
".",
"split",
"(",
")",
"[",
"0",
"]",
".",
"startswith",
"(",
"'d'",
")",
",",
"contents",
")",
"files",
"=",
"map",
"(",
"lambda",
"a",
":",
"' '",
".",
"join",
"(",
"a",
".",
"split",
"(",
")",
"[",
"8",
":",
"]",
")",
",",
"files",
")",
"folders",
"=",
"map",
"(",
"lambda",
"a",
":",
"' '",
".",
"join",
"(",
"a",
".",
"split",
"(",
")",
"[",
"8",
":",
"]",
")",
",",
"folders",
")",
"self",
".",
"_ftp",
".",
"cwd",
"(",
"current_folder",
")",
"return",
"files",
",",
"folders"
] | Lists the files and folders of a specific directory
default is the current working directory.
:param folder: the folder to be listed.
:type folder: string
:returns: a tuple with the list of files in the folder
and the list of subfolders in the folder. | [
"Lists",
"the",
"files",
"and",
"folders",
"of",
"a",
"specific",
"directory",
"default",
"is",
"the",
"current",
"working",
"directory",
"."
] | python | valid |
urinieto/msaf | msaf/base.py | https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/base.py#L112-L140 | def estimate_beats(self):
"""Estimates the beats using librosa.
Returns
-------
times: np.array
Times of estimated beats in seconds.
frames: np.array
Frame indeces of estimated beats.
"""
# Compute harmonic-percussive source separation if needed
if self._audio_percussive is None:
self._audio_harmonic, self._audio_percussive = self.compute_HPSS()
# Compute beats
tempo, frames = librosa.beat.beat_track(
y=self._audio_percussive, sr=self.sr,
hop_length=self.hop_length)
# To times
times = librosa.frames_to_time(frames, sr=self.sr,
hop_length=self.hop_length)
# TODO: Is this really necessary?
if len(times) > 0 and times[0] == 0:
times = times[1:]
frames = frames[1:]
return times, frames | [
"def",
"estimate_beats",
"(",
"self",
")",
":",
"# Compute harmonic-percussive source separation if needed",
"if",
"self",
".",
"_audio_percussive",
"is",
"None",
":",
"self",
".",
"_audio_harmonic",
",",
"self",
".",
"_audio_percussive",
"=",
"self",
".",
"compute_HPSS",
"(",
")",
"# Compute beats",
"tempo",
",",
"frames",
"=",
"librosa",
".",
"beat",
".",
"beat_track",
"(",
"y",
"=",
"self",
".",
"_audio_percussive",
",",
"sr",
"=",
"self",
".",
"sr",
",",
"hop_length",
"=",
"self",
".",
"hop_length",
")",
"# To times",
"times",
"=",
"librosa",
".",
"frames_to_time",
"(",
"frames",
",",
"sr",
"=",
"self",
".",
"sr",
",",
"hop_length",
"=",
"self",
".",
"hop_length",
")",
"# TODO: Is this really necessary?",
"if",
"len",
"(",
"times",
")",
">",
"0",
"and",
"times",
"[",
"0",
"]",
"==",
"0",
":",
"times",
"=",
"times",
"[",
"1",
":",
"]",
"frames",
"=",
"frames",
"[",
"1",
":",
"]",
"return",
"times",
",",
"frames"
] | Estimates the beats using librosa.
Returns
-------
times: np.array
Times of estimated beats in seconds.
frames: np.array
Frame indeces of estimated beats. | [
"Estimates",
"the",
"beats",
"using",
"librosa",
"."
] | python | test |
astrorafael/twisted-mqtt | examples/subscriber.py | https://github.com/astrorafael/twisted-mqtt/blob/5b322f7c2b82a502b1e1b70703ae45f1f668d07d/examples/subscriber.py#L72-L87 | def connectToBroker(self, protocol):
'''
Connect to MQTT broker
'''
self.protocol = protocol
self.protocol.onPublish = self.onPublish
self.protocol.onDisconnection = self.onDisconnection
self.protocol.setWindowSize(3)
try:
yield self.protocol.connect("TwistedMQTT-subs", keepalive=60)
yield self.subscribe()
except Exception as e:
log.error("Connecting to {broker} raised {excp!s}",
broker=BROKER, excp=e)
else:
log.info("Connected and subscribed to {broker}", broker=BROKER) | [
"def",
"connectToBroker",
"(",
"self",
",",
"protocol",
")",
":",
"self",
".",
"protocol",
"=",
"protocol",
"self",
".",
"protocol",
".",
"onPublish",
"=",
"self",
".",
"onPublish",
"self",
".",
"protocol",
".",
"onDisconnection",
"=",
"self",
".",
"onDisconnection",
"self",
".",
"protocol",
".",
"setWindowSize",
"(",
"3",
")",
"try",
":",
"yield",
"self",
".",
"protocol",
".",
"connect",
"(",
"\"TwistedMQTT-subs\"",
",",
"keepalive",
"=",
"60",
")",
"yield",
"self",
".",
"subscribe",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"log",
".",
"error",
"(",
"\"Connecting to {broker} raised {excp!s}\"",
",",
"broker",
"=",
"BROKER",
",",
"excp",
"=",
"e",
")",
"else",
":",
"log",
".",
"info",
"(",
"\"Connected and subscribed to {broker}\"",
",",
"broker",
"=",
"BROKER",
")"
] | Connect to MQTT broker | [
"Connect",
"to",
"MQTT",
"broker"
] | python | test |
openstack/networking-arista | networking_arista/common/db_lib.py | https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/common/db_lib.py#L204-L216 | def get_tenants(tenant_id=None):
"""Returns list of all project/tenant ids that may be relevant on CVX"""
if tenant_id == '':
return []
session = db.get_reader_session()
project_ids = set()
with session.begin():
for m in [models_v2.Network, models_v2.Port]:
q = session.query(m.project_id).filter(m.project_id != '')
if tenant_id:
q = q.filter(m.project_id == tenant_id)
project_ids.update(pid[0] for pid in q.distinct())
return [{'project_id': project_id} for project_id in project_ids] | [
"def",
"get_tenants",
"(",
"tenant_id",
"=",
"None",
")",
":",
"if",
"tenant_id",
"==",
"''",
":",
"return",
"[",
"]",
"session",
"=",
"db",
".",
"get_reader_session",
"(",
")",
"project_ids",
"=",
"set",
"(",
")",
"with",
"session",
".",
"begin",
"(",
")",
":",
"for",
"m",
"in",
"[",
"models_v2",
".",
"Network",
",",
"models_v2",
".",
"Port",
"]",
":",
"q",
"=",
"session",
".",
"query",
"(",
"m",
".",
"project_id",
")",
".",
"filter",
"(",
"m",
".",
"project_id",
"!=",
"''",
")",
"if",
"tenant_id",
":",
"q",
"=",
"q",
".",
"filter",
"(",
"m",
".",
"project_id",
"==",
"tenant_id",
")",
"project_ids",
".",
"update",
"(",
"pid",
"[",
"0",
"]",
"for",
"pid",
"in",
"q",
".",
"distinct",
"(",
")",
")",
"return",
"[",
"{",
"'project_id'",
":",
"project_id",
"}",
"for",
"project_id",
"in",
"project_ids",
"]"
] | Returns list of all project/tenant ids that may be relevant on CVX | [
"Returns",
"list",
"of",
"all",
"project",
"/",
"tenant",
"ids",
"that",
"may",
"be",
"relevant",
"on",
"CVX"
] | python | train |
Skype4Py/Skype4Py | Skype4Py/utils.py | https://github.com/Skype4Py/Skype4Py/blob/c48d83f7034109fe46315d45a066126002c6e0d4/Skype4Py/utils.py#L493-L507 | def _AddEvents(cls, Class):
"""Adds events based on the attributes of the given ``...Events`` class.
:Parameters:
Class : class
An `...Events` class whose methods define events that may occur in the
instances of the current class.
"""
def make_event(event):
return property(lambda self: self._GetDefaultEventHandler(event),
lambda self, Value: self._SetDefaultEventHandler(event, Value))
for event in dir(Class):
if not event.startswith('_'):
setattr(cls, 'On%s' % event, make_event(event))
cls._EventNames.append(event) | [
"def",
"_AddEvents",
"(",
"cls",
",",
"Class",
")",
":",
"def",
"make_event",
"(",
"event",
")",
":",
"return",
"property",
"(",
"lambda",
"self",
":",
"self",
".",
"_GetDefaultEventHandler",
"(",
"event",
")",
",",
"lambda",
"self",
",",
"Value",
":",
"self",
".",
"_SetDefaultEventHandler",
"(",
"event",
",",
"Value",
")",
")",
"for",
"event",
"in",
"dir",
"(",
"Class",
")",
":",
"if",
"not",
"event",
".",
"startswith",
"(",
"'_'",
")",
":",
"setattr",
"(",
"cls",
",",
"'On%s'",
"%",
"event",
",",
"make_event",
"(",
"event",
")",
")",
"cls",
".",
"_EventNames",
".",
"append",
"(",
"event",
")"
] | Adds events based on the attributes of the given ``...Events`` class.
:Parameters:
Class : class
An `...Events` class whose methods define events that may occur in the
instances of the current class. | [
"Adds",
"events",
"based",
"on",
"the",
"attributes",
"of",
"the",
"given",
"...",
"Events",
"class",
".",
":",
"Parameters",
":",
"Class",
":",
"class",
"An",
"...",
"Events",
"class",
"whose",
"methods",
"define",
"events",
"that",
"may",
"occur",
"in",
"the",
"instances",
"of",
"the",
"current",
"class",
"."
] | python | train |
jepegit/cellpy | cellpy/utils/batch_tools/engines.py | https://github.com/jepegit/cellpy/blob/9f4a84cdd11f72cfa02cda8c2d7b5174abbb7370/cellpy/utils/batch_tools/engines.py#L87-L131 | def simple_db_engine(reader=None, srnos=None):
"""engine that gets values from the simple excel 'db'"""
if reader is None:
reader = dbreader.Reader()
logger.debug("No reader provided. Creating one myself.")
info_dict = dict()
info_dict["filenames"] = [reader.get_cell_name(srno) for srno in srnos]
info_dict["masses"] = [reader.get_mass(srno) for srno in srnos]
info_dict["total_masses"] = [reader.get_total_mass(srno) for srno in srnos]
info_dict["loadings"] = [reader.get_loading(srno) for srno in srnos]
info_dict["fixed"] = [reader.inspect_hd5f_fixed(srno) for srno in srnos]
info_dict["labels"] = [reader.get_label(srno) for srno in srnos]
info_dict["cell_type"] = [reader.get_cell_type(srno) for srno in srnos]
info_dict["raw_file_names"] = []
info_dict["cellpy_file_names"] = []
logger.debug("created info-dict")
for key in list(info_dict.keys()):
logger.debug("%s: %s" % (key, str(info_dict[key])))
_groups = [reader.get_group(srno) for srno in srnos]
logger.debug(">\ngroups: %s" % str(_groups))
groups = helper.fix_groups(_groups)
info_dict["groups"] = groups
my_timer_start = time.time()
filename_cache = []
info_dict = helper.find_files(info_dict, filename_cache)
my_timer_end = time.time()
if (my_timer_end - my_timer_start) > 5.0:
logger.info(
"The function _find_files was very slow. "
"Save your info_df so you don't have to run it again!"
)
info_df = pd.DataFrame(info_dict)
info_df = info_df.sort_values(["groups", "filenames"])
info_df = helper.make_unique_groups(info_df)
info_df["labels"] = info_df["filenames"].apply(helper.create_labels)
info_df.set_index("filenames", inplace=True)
return info_df | [
"def",
"simple_db_engine",
"(",
"reader",
"=",
"None",
",",
"srnos",
"=",
"None",
")",
":",
"if",
"reader",
"is",
"None",
":",
"reader",
"=",
"dbreader",
".",
"Reader",
"(",
")",
"logger",
".",
"debug",
"(",
"\"No reader provided. Creating one myself.\"",
")",
"info_dict",
"=",
"dict",
"(",
")",
"info_dict",
"[",
"\"filenames\"",
"]",
"=",
"[",
"reader",
".",
"get_cell_name",
"(",
"srno",
")",
"for",
"srno",
"in",
"srnos",
"]",
"info_dict",
"[",
"\"masses\"",
"]",
"=",
"[",
"reader",
".",
"get_mass",
"(",
"srno",
")",
"for",
"srno",
"in",
"srnos",
"]",
"info_dict",
"[",
"\"total_masses\"",
"]",
"=",
"[",
"reader",
".",
"get_total_mass",
"(",
"srno",
")",
"for",
"srno",
"in",
"srnos",
"]",
"info_dict",
"[",
"\"loadings\"",
"]",
"=",
"[",
"reader",
".",
"get_loading",
"(",
"srno",
")",
"for",
"srno",
"in",
"srnos",
"]",
"info_dict",
"[",
"\"fixed\"",
"]",
"=",
"[",
"reader",
".",
"inspect_hd5f_fixed",
"(",
"srno",
")",
"for",
"srno",
"in",
"srnos",
"]",
"info_dict",
"[",
"\"labels\"",
"]",
"=",
"[",
"reader",
".",
"get_label",
"(",
"srno",
")",
"for",
"srno",
"in",
"srnos",
"]",
"info_dict",
"[",
"\"cell_type\"",
"]",
"=",
"[",
"reader",
".",
"get_cell_type",
"(",
"srno",
")",
"for",
"srno",
"in",
"srnos",
"]",
"info_dict",
"[",
"\"raw_file_names\"",
"]",
"=",
"[",
"]",
"info_dict",
"[",
"\"cellpy_file_names\"",
"]",
"=",
"[",
"]",
"logger",
".",
"debug",
"(",
"\"created info-dict\"",
")",
"for",
"key",
"in",
"list",
"(",
"info_dict",
".",
"keys",
"(",
")",
")",
":",
"logger",
".",
"debug",
"(",
"\"%s: %s\"",
"%",
"(",
"key",
",",
"str",
"(",
"info_dict",
"[",
"key",
"]",
")",
")",
")",
"_groups",
"=",
"[",
"reader",
".",
"get_group",
"(",
"srno",
")",
"for",
"srno",
"in",
"srnos",
"]",
"logger",
".",
"debug",
"(",
"\">\\ngroups: %s\"",
"%",
"str",
"(",
"_groups",
")",
")",
"groups",
"=",
"helper",
".",
"fix_groups",
"(",
"_groups",
")",
"info_dict",
"[",
"\"groups\"",
"]",
"=",
"groups",
"my_timer_start",
"=",
"time",
".",
"time",
"(",
")",
"filename_cache",
"=",
"[",
"]",
"info_dict",
"=",
"helper",
".",
"find_files",
"(",
"info_dict",
",",
"filename_cache",
")",
"my_timer_end",
"=",
"time",
".",
"time",
"(",
")",
"if",
"(",
"my_timer_end",
"-",
"my_timer_start",
")",
">",
"5.0",
":",
"logger",
".",
"info",
"(",
"\"The function _find_files was very slow. \"",
"\"Save your info_df so you don't have to run it again!\"",
")",
"info_df",
"=",
"pd",
".",
"DataFrame",
"(",
"info_dict",
")",
"info_df",
"=",
"info_df",
".",
"sort_values",
"(",
"[",
"\"groups\"",
",",
"\"filenames\"",
"]",
")",
"info_df",
"=",
"helper",
".",
"make_unique_groups",
"(",
"info_df",
")",
"info_df",
"[",
"\"labels\"",
"]",
"=",
"info_df",
"[",
"\"filenames\"",
"]",
".",
"apply",
"(",
"helper",
".",
"create_labels",
")",
"info_df",
".",
"set_index",
"(",
"\"filenames\"",
",",
"inplace",
"=",
"True",
")",
"return",
"info_df"
] | engine that gets values from the simple excel 'db | [
"engine",
"that",
"gets",
"values",
"from",
"the",
"simple",
"excel",
"db"
] | python | train |
django-auth-ldap/django-auth-ldap | django_auth_ldap/backend.py | https://github.com/django-auth-ldap/django-auth-ldap/blob/9ce3c2825527f8faa1793958b041816e63d839af/django_auth_ldap/backend.py#L688-L742 | def _normalize_mirror_settings(self):
"""
Validates the group mirroring settings and converts them as necessary.
"""
def malformed_mirror_groups_except():
return ImproperlyConfigured(
"{} must be a collection of group names".format(
self.settings._name("MIRROR_GROUPS_EXCEPT")
)
)
def malformed_mirror_groups():
return ImproperlyConfigured(
"{} must be True or a collection of group names".format(
self.settings._name("MIRROR_GROUPS")
)
)
mge = self.settings.MIRROR_GROUPS_EXCEPT
mg = self.settings.MIRROR_GROUPS
if mge is not None:
if isinstance(mge, (set, frozenset)):
pass
elif isinstance(mge, (list, tuple)):
mge = self.settings.MIRROR_GROUPS_EXCEPT = frozenset(mge)
else:
raise malformed_mirror_groups_except()
if not all(isinstance(value, str) for value in mge):
raise malformed_mirror_groups_except()
elif mg:
warnings.warn(
ConfigurationWarning(
"Ignoring {} in favor of {}".format(
self.settings._name("MIRROR_GROUPS"),
self.settings._name("MIRROR_GROUPS_EXCEPT"),
)
)
)
mg = self.settings.MIRROR_GROUPS = None
if mg is not None:
if isinstance(mg, (bool, set, frozenset)):
pass
elif isinstance(mg, (list, tuple)):
mg = self.settings.MIRROR_GROUPS = frozenset(mg)
else:
raise malformed_mirror_groups()
if isinstance(mg, (set, frozenset)) and (
not all(isinstance(value, str) for value in mg)
):
raise malformed_mirror_groups() | [
"def",
"_normalize_mirror_settings",
"(",
"self",
")",
":",
"def",
"malformed_mirror_groups_except",
"(",
")",
":",
"return",
"ImproperlyConfigured",
"(",
"\"{} must be a collection of group names\"",
".",
"format",
"(",
"self",
".",
"settings",
".",
"_name",
"(",
"\"MIRROR_GROUPS_EXCEPT\"",
")",
")",
")",
"def",
"malformed_mirror_groups",
"(",
")",
":",
"return",
"ImproperlyConfigured",
"(",
"\"{} must be True or a collection of group names\"",
".",
"format",
"(",
"self",
".",
"settings",
".",
"_name",
"(",
"\"MIRROR_GROUPS\"",
")",
")",
")",
"mge",
"=",
"self",
".",
"settings",
".",
"MIRROR_GROUPS_EXCEPT",
"mg",
"=",
"self",
".",
"settings",
".",
"MIRROR_GROUPS",
"if",
"mge",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"mge",
",",
"(",
"set",
",",
"frozenset",
")",
")",
":",
"pass",
"elif",
"isinstance",
"(",
"mge",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"mge",
"=",
"self",
".",
"settings",
".",
"MIRROR_GROUPS_EXCEPT",
"=",
"frozenset",
"(",
"mge",
")",
"else",
":",
"raise",
"malformed_mirror_groups_except",
"(",
")",
"if",
"not",
"all",
"(",
"isinstance",
"(",
"value",
",",
"str",
")",
"for",
"value",
"in",
"mge",
")",
":",
"raise",
"malformed_mirror_groups_except",
"(",
")",
"elif",
"mg",
":",
"warnings",
".",
"warn",
"(",
"ConfigurationWarning",
"(",
"\"Ignoring {} in favor of {}\"",
".",
"format",
"(",
"self",
".",
"settings",
".",
"_name",
"(",
"\"MIRROR_GROUPS\"",
")",
",",
"self",
".",
"settings",
".",
"_name",
"(",
"\"MIRROR_GROUPS_EXCEPT\"",
")",
",",
")",
")",
")",
"mg",
"=",
"self",
".",
"settings",
".",
"MIRROR_GROUPS",
"=",
"None",
"if",
"mg",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"mg",
",",
"(",
"bool",
",",
"set",
",",
"frozenset",
")",
")",
":",
"pass",
"elif",
"isinstance",
"(",
"mg",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"mg",
"=",
"self",
".",
"settings",
".",
"MIRROR_GROUPS",
"=",
"frozenset",
"(",
"mg",
")",
"else",
":",
"raise",
"malformed_mirror_groups",
"(",
")",
"if",
"isinstance",
"(",
"mg",
",",
"(",
"set",
",",
"frozenset",
")",
")",
"and",
"(",
"not",
"all",
"(",
"isinstance",
"(",
"value",
",",
"str",
")",
"for",
"value",
"in",
"mg",
")",
")",
":",
"raise",
"malformed_mirror_groups",
"(",
")"
] | Validates the group mirroring settings and converts them as necessary. | [
"Validates",
"the",
"group",
"mirroring",
"settings",
"and",
"converts",
"them",
"as",
"necessary",
"."
] | python | train |
proycon/pynlpl | pynlpl/formats/sonar.py | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/sonar.py#L235-L244 | def validate(self, formats_dir="../formats/"):
"""checks if the document is valid"""
#TODO: download XSD from web
if self.inline:
xmlschema = ElementTree.XMLSchema(ElementTree.parse(StringIO("\n".join(open(formats_dir+"dcoi-dsc.xsd").readlines()))))
xmlschema.assertValid(self.tree)
#return xmlschema.validate(self)
else:
xmlschema = ElementTree.XMLSchema(ElementTree.parse(StringIO("\n".join(open(formats_dir+"dutchsemcor-standalone.xsd").readlines()))))
xmlschema.assertValid(self.tree) | [
"def",
"validate",
"(",
"self",
",",
"formats_dir",
"=",
"\"../formats/\"",
")",
":",
"#TODO: download XSD from web",
"if",
"self",
".",
"inline",
":",
"xmlschema",
"=",
"ElementTree",
".",
"XMLSchema",
"(",
"ElementTree",
".",
"parse",
"(",
"StringIO",
"(",
"\"\\n\"",
".",
"join",
"(",
"open",
"(",
"formats_dir",
"+",
"\"dcoi-dsc.xsd\"",
")",
".",
"readlines",
"(",
")",
")",
")",
")",
")",
"xmlschema",
".",
"assertValid",
"(",
"self",
".",
"tree",
")",
"#return xmlschema.validate(self)",
"else",
":",
"xmlschema",
"=",
"ElementTree",
".",
"XMLSchema",
"(",
"ElementTree",
".",
"parse",
"(",
"StringIO",
"(",
"\"\\n\"",
".",
"join",
"(",
"open",
"(",
"formats_dir",
"+",
"\"dutchsemcor-standalone.xsd\"",
")",
".",
"readlines",
"(",
")",
")",
")",
")",
")",
"xmlschema",
".",
"assertValid",
"(",
"self",
".",
"tree",
")"
] | checks if the document is valid | [
"checks",
"if",
"the",
"document",
"is",
"valid"
] | python | train |
faucamp/python-gsmmodem | gsmmodem/modem.py | https://github.com/faucamp/python-gsmmodem/blob/834c68b1387ca2c91e2210faa8f75526b39723b5/gsmmodem/modem.py#L1088-L1112 | def deleteMultipleStoredSms(self, delFlag=4, memory=None):
""" Deletes all SMS messages that have the specified read status.
The messages are read from the memory set by the "memory" parameter.
The value of the "delFlag" paramater is the same as the "DelFlag" parameter of the +CMGD command:
1: Delete All READ messages
2: Delete All READ and SENT messages
3: Delete All READ, SENT and UNSENT messages
4: Delete All messages (this is the default)
:param delFlag: Controls what type of messages to delete; see description above.
:type delFlag: int
:param memory: The memory type to delete from. If None, use the current default SMS read/delete memory
:type memory: str or None
:param delete: If True, delete returned messages from the device/SIM card
:type delete: bool
:raise ValueErrror: if "delFlag" is not in range [1,4]
:raise CommandError: if unable to delete the stored messages
"""
if 0 < delFlag <= 4:
self._setSmsMemory(readDelete=memory)
self.write('AT+CMGD=1,{0}'.format(delFlag))
else:
raise ValueError('"delFlag" must be in range [1,4]') | [
"def",
"deleteMultipleStoredSms",
"(",
"self",
",",
"delFlag",
"=",
"4",
",",
"memory",
"=",
"None",
")",
":",
"if",
"0",
"<",
"delFlag",
"<=",
"4",
":",
"self",
".",
"_setSmsMemory",
"(",
"readDelete",
"=",
"memory",
")",
"self",
".",
"write",
"(",
"'AT+CMGD=1,{0}'",
".",
"format",
"(",
"delFlag",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'\"delFlag\" must be in range [1,4]'",
")"
] | Deletes all SMS messages that have the specified read status.
The messages are read from the memory set by the "memory" parameter.
The value of the "delFlag" paramater is the same as the "DelFlag" parameter of the +CMGD command:
1: Delete All READ messages
2: Delete All READ and SENT messages
3: Delete All READ, SENT and UNSENT messages
4: Delete All messages (this is the default)
:param delFlag: Controls what type of messages to delete; see description above.
:type delFlag: int
:param memory: The memory type to delete from. If None, use the current default SMS read/delete memory
:type memory: str or None
:param delete: If True, delete returned messages from the device/SIM card
:type delete: bool
:raise ValueErrror: if "delFlag" is not in range [1,4]
:raise CommandError: if unable to delete the stored messages | [
"Deletes",
"all",
"SMS",
"messages",
"that",
"have",
"the",
"specified",
"read",
"status",
".",
"The",
"messages",
"are",
"read",
"from",
"the",
"memory",
"set",
"by",
"the",
"memory",
"parameter",
".",
"The",
"value",
"of",
"the",
"delFlag",
"paramater",
"is",
"the",
"same",
"as",
"the",
"DelFlag",
"parameter",
"of",
"the",
"+",
"CMGD",
"command",
":",
"1",
":",
"Delete",
"All",
"READ",
"messages",
"2",
":",
"Delete",
"All",
"READ",
"and",
"SENT",
"messages",
"3",
":",
"Delete",
"All",
"READ",
"SENT",
"and",
"UNSENT",
"messages",
"4",
":",
"Delete",
"All",
"messages",
"(",
"this",
"is",
"the",
"default",
")",
":",
"param",
"delFlag",
":",
"Controls",
"what",
"type",
"of",
"messages",
"to",
"delete",
";",
"see",
"description",
"above",
".",
":",
"type",
"delFlag",
":",
"int",
":",
"param",
"memory",
":",
"The",
"memory",
"type",
"to",
"delete",
"from",
".",
"If",
"None",
"use",
"the",
"current",
"default",
"SMS",
"read",
"/",
"delete",
"memory",
":",
"type",
"memory",
":",
"str",
"or",
"None",
":",
"param",
"delete",
":",
"If",
"True",
"delete",
"returned",
"messages",
"from",
"the",
"device",
"/",
"SIM",
"card",
":",
"type",
"delete",
":",
"bool",
":",
"raise",
"ValueErrror",
":",
"if",
"delFlag",
"is",
"not",
"in",
"range",
"[",
"1",
"4",
"]",
":",
"raise",
"CommandError",
":",
"if",
"unable",
"to",
"delete",
"the",
"stored",
"messages"
] | python | train |
CalebBell/thermo | thermo/viscosity.py | https://github.com/CalebBell/thermo/blob/3857ed023a3e64fd3039a32d53576c24990ef1c3/thermo/viscosity.py#L1503-L1538 | def calculate(self, T, method):
r'''Method to calculate low-pressure gas viscosity at
tempearture `T` with a given method.
This method has no exception handling; see `T_dependent_property`
for that.
Parameters
----------
T : float
Temperature of the gas, [K]
method : str
Name of the method to use
Returns
-------
mu : float
Viscosity of the gas at T and a low pressure, [Pa*S]
'''
if method == GHARAGHEIZI:
mu = Gharagheizi_gas_viscosity(T, self.Tc, self.Pc, self.MW)
elif method == COOLPROP:
mu = CoolProp_T_dependent_property(T, self.CASRN, 'V', 'g')
elif method == DIPPR_PERRY_8E:
mu = EQ102(T, *self.Perrys2_312_coeffs)
elif method == VDI_PPDS:
mu = horner(self.VDI_PPDS_coeffs, T)
elif method == YOON_THODOS:
mu = Yoon_Thodos(T, self.Tc, self.Pc, self.MW)
elif method == STIEL_THODOS:
mu = Stiel_Thodos(T, self.Tc, self.Pc, self.MW)
elif method == LUCAS_GAS:
mu = lucas_gas(T, self.Tc, self.Pc, self.Zc, self.MW, self.dipole, CASRN=self.CASRN)
elif method in self.tabular_data:
mu = self.interpolate(T, method)
return mu | [
"def",
"calculate",
"(",
"self",
",",
"T",
",",
"method",
")",
":",
"if",
"method",
"==",
"GHARAGHEIZI",
":",
"mu",
"=",
"Gharagheizi_gas_viscosity",
"(",
"T",
",",
"self",
".",
"Tc",
",",
"self",
".",
"Pc",
",",
"self",
".",
"MW",
")",
"elif",
"method",
"==",
"COOLPROP",
":",
"mu",
"=",
"CoolProp_T_dependent_property",
"(",
"T",
",",
"self",
".",
"CASRN",
",",
"'V'",
",",
"'g'",
")",
"elif",
"method",
"==",
"DIPPR_PERRY_8E",
":",
"mu",
"=",
"EQ102",
"(",
"T",
",",
"*",
"self",
".",
"Perrys2_312_coeffs",
")",
"elif",
"method",
"==",
"VDI_PPDS",
":",
"mu",
"=",
"horner",
"(",
"self",
".",
"VDI_PPDS_coeffs",
",",
"T",
")",
"elif",
"method",
"==",
"YOON_THODOS",
":",
"mu",
"=",
"Yoon_Thodos",
"(",
"T",
",",
"self",
".",
"Tc",
",",
"self",
".",
"Pc",
",",
"self",
".",
"MW",
")",
"elif",
"method",
"==",
"STIEL_THODOS",
":",
"mu",
"=",
"Stiel_Thodos",
"(",
"T",
",",
"self",
".",
"Tc",
",",
"self",
".",
"Pc",
",",
"self",
".",
"MW",
")",
"elif",
"method",
"==",
"LUCAS_GAS",
":",
"mu",
"=",
"lucas_gas",
"(",
"T",
",",
"self",
".",
"Tc",
",",
"self",
".",
"Pc",
",",
"self",
".",
"Zc",
",",
"self",
".",
"MW",
",",
"self",
".",
"dipole",
",",
"CASRN",
"=",
"self",
".",
"CASRN",
")",
"elif",
"method",
"in",
"self",
".",
"tabular_data",
":",
"mu",
"=",
"self",
".",
"interpolate",
"(",
"T",
",",
"method",
")",
"return",
"mu"
] | r'''Method to calculate low-pressure gas viscosity at
tempearture `T` with a given method.
This method has no exception handling; see `T_dependent_property`
for that.
Parameters
----------
T : float
Temperature of the gas, [K]
method : str
Name of the method to use
Returns
-------
mu : float
Viscosity of the gas at T and a low pressure, [Pa*S] | [
"r",
"Method",
"to",
"calculate",
"low",
"-",
"pressure",
"gas",
"viscosity",
"at",
"tempearture",
"T",
"with",
"a",
"given",
"method",
"."
] | python | valid |
jasonrbriggs/stomp.py | stomp/transport.py | https://github.com/jasonrbriggs/stomp.py/blob/643843c5fbf25fd24339dd0e69a9411c3d8b94c7/stomp/transport.py#L103-L113 | def start(self):
"""
Start the connection. This should be called after all
listeners have been registered. If this method is not called,
no frames will be received by the connection.
"""
self.running = True
self.attempt_connection()
receiver_thread = self.create_thread_fc(self.__receiver_loop)
receiver_thread.name = "StompReceiver%s" % getattr(receiver_thread, "name", "Thread")
self.notify('connecting') | [
"def",
"start",
"(",
"self",
")",
":",
"self",
".",
"running",
"=",
"True",
"self",
".",
"attempt_connection",
"(",
")",
"receiver_thread",
"=",
"self",
".",
"create_thread_fc",
"(",
"self",
".",
"__receiver_loop",
")",
"receiver_thread",
".",
"name",
"=",
"\"StompReceiver%s\"",
"%",
"getattr",
"(",
"receiver_thread",
",",
"\"name\"",
",",
"\"Thread\"",
")",
"self",
".",
"notify",
"(",
"'connecting'",
")"
] | Start the connection. This should be called after all
listeners have been registered. If this method is not called,
no frames will be received by the connection. | [
"Start",
"the",
"connection",
".",
"This",
"should",
"be",
"called",
"after",
"all",
"listeners",
"have",
"been",
"registered",
".",
"If",
"this",
"method",
"is",
"not",
"called",
"no",
"frames",
"will",
"be",
"received",
"by",
"the",
"connection",
"."
] | python | train |
divio/django-filer | filer/admin/folderadmin.py | https://github.com/divio/django-filer/blob/946629087943d41eff290f07bfdf240b8853dd88/filer/admin/folderadmin.py#L504-L581 | def response_action(self, request, files_queryset, folders_queryset):
"""
Handle an admin action. This is called if a request is POSTed to the
changelist; it returns an HttpResponse if the action was handled, and
None otherwise.
"""
# There can be multiple action forms on the page (at the top
# and bottom of the change list, for example). Get the action
# whose button was pushed.
try:
action_index = int(request.POST.get('index', 0))
except ValueError:
action_index = 0
# Construct the action form.
data = request.POST.copy()
data.pop(helpers.ACTION_CHECKBOX_NAME, None)
data.pop("index", None)
# Use the action whose button was pushed
try:
data.update({'action': data.getlist('action')[action_index]})
except IndexError:
# If we didn't get an action from the chosen form that's invalid
# POST data, so by deleting action it'll fail the validation check
# below. So no need to do anything here
pass
action_form = self.action_form(data, auto_id=None)
action_form.fields['action'].choices = self.get_action_choices(request)
# If the form's valid we can handle the action.
if action_form.is_valid():
action = action_form.cleaned_data['action']
select_across = action_form.cleaned_data['select_across']
func, name, description = self.get_actions(request)[action]
# Get the list of selected PKs. If nothing's selected, we can't
# perform an action on it, so bail. Except we want to perform
# the action explicitly on all objects.
selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME)
if not selected and not select_across:
# Reminder that something needs to be selected or nothing
# will happen
msg = _("Items must be selected in order to perform "
"actions on them. No items have been changed.")
self.message_user(request, msg)
return None
if not select_across:
selected_files = []
selected_folders = []
for pk in selected:
if pk[:5] == "file-":
selected_files.append(pk[5:])
else:
selected_folders.append(pk[7:])
# Perform the action only on the selected objects
files_queryset = files_queryset.filter(pk__in=selected_files)
folders_queryset = folders_queryset.filter(
pk__in=selected_folders)
response = func(self, request, files_queryset, folders_queryset)
# Actions may return an HttpResponse, which will be used as the
# response from the POST. If not, we'll be a good little HTTP
# citizen and redirect back to the changelist page.
if isinstance(response, HttpResponse):
return response
else:
return HttpResponseRedirect(request.get_full_path())
else:
msg = _("No action selected.")
self.message_user(request, msg)
return None | [
"def",
"response_action",
"(",
"self",
",",
"request",
",",
"files_queryset",
",",
"folders_queryset",
")",
":",
"# There can be multiple action forms on the page (at the top",
"# and bottom of the change list, for example). Get the action",
"# whose button was pushed.",
"try",
":",
"action_index",
"=",
"int",
"(",
"request",
".",
"POST",
".",
"get",
"(",
"'index'",
",",
"0",
")",
")",
"except",
"ValueError",
":",
"action_index",
"=",
"0",
"# Construct the action form.",
"data",
"=",
"request",
".",
"POST",
".",
"copy",
"(",
")",
"data",
".",
"pop",
"(",
"helpers",
".",
"ACTION_CHECKBOX_NAME",
",",
"None",
")",
"data",
".",
"pop",
"(",
"\"index\"",
",",
"None",
")",
"# Use the action whose button was pushed",
"try",
":",
"data",
".",
"update",
"(",
"{",
"'action'",
":",
"data",
".",
"getlist",
"(",
"'action'",
")",
"[",
"action_index",
"]",
"}",
")",
"except",
"IndexError",
":",
"# If we didn't get an action from the chosen form that's invalid",
"# POST data, so by deleting action it'll fail the validation check",
"# below. So no need to do anything here",
"pass",
"action_form",
"=",
"self",
".",
"action_form",
"(",
"data",
",",
"auto_id",
"=",
"None",
")",
"action_form",
".",
"fields",
"[",
"'action'",
"]",
".",
"choices",
"=",
"self",
".",
"get_action_choices",
"(",
"request",
")",
"# If the form's valid we can handle the action.",
"if",
"action_form",
".",
"is_valid",
"(",
")",
":",
"action",
"=",
"action_form",
".",
"cleaned_data",
"[",
"'action'",
"]",
"select_across",
"=",
"action_form",
".",
"cleaned_data",
"[",
"'select_across'",
"]",
"func",
",",
"name",
",",
"description",
"=",
"self",
".",
"get_actions",
"(",
"request",
")",
"[",
"action",
"]",
"# Get the list of selected PKs. If nothing's selected, we can't",
"# perform an action on it, so bail. Except we want to perform",
"# the action explicitly on all objects.",
"selected",
"=",
"request",
".",
"POST",
".",
"getlist",
"(",
"helpers",
".",
"ACTION_CHECKBOX_NAME",
")",
"if",
"not",
"selected",
"and",
"not",
"select_across",
":",
"# Reminder that something needs to be selected or nothing",
"# will happen",
"msg",
"=",
"_",
"(",
"\"Items must be selected in order to perform \"",
"\"actions on them. No items have been changed.\"",
")",
"self",
".",
"message_user",
"(",
"request",
",",
"msg",
")",
"return",
"None",
"if",
"not",
"select_across",
":",
"selected_files",
"=",
"[",
"]",
"selected_folders",
"=",
"[",
"]",
"for",
"pk",
"in",
"selected",
":",
"if",
"pk",
"[",
":",
"5",
"]",
"==",
"\"file-\"",
":",
"selected_files",
".",
"append",
"(",
"pk",
"[",
"5",
":",
"]",
")",
"else",
":",
"selected_folders",
".",
"append",
"(",
"pk",
"[",
"7",
":",
"]",
")",
"# Perform the action only on the selected objects",
"files_queryset",
"=",
"files_queryset",
".",
"filter",
"(",
"pk__in",
"=",
"selected_files",
")",
"folders_queryset",
"=",
"folders_queryset",
".",
"filter",
"(",
"pk__in",
"=",
"selected_folders",
")",
"response",
"=",
"func",
"(",
"self",
",",
"request",
",",
"files_queryset",
",",
"folders_queryset",
")",
"# Actions may return an HttpResponse, which will be used as the",
"# response from the POST. If not, we'll be a good little HTTP",
"# citizen and redirect back to the changelist page.",
"if",
"isinstance",
"(",
"response",
",",
"HttpResponse",
")",
":",
"return",
"response",
"else",
":",
"return",
"HttpResponseRedirect",
"(",
"request",
".",
"get_full_path",
"(",
")",
")",
"else",
":",
"msg",
"=",
"_",
"(",
"\"No action selected.\"",
")",
"self",
".",
"message_user",
"(",
"request",
",",
"msg",
")",
"return",
"None"
] | Handle an admin action. This is called if a request is POSTed to the
changelist; it returns an HttpResponse if the action was handled, and
None otherwise. | [
"Handle",
"an",
"admin",
"action",
".",
"This",
"is",
"called",
"if",
"a",
"request",
"is",
"POSTed",
"to",
"the",
"changelist",
";",
"it",
"returns",
"an",
"HttpResponse",
"if",
"the",
"action",
"was",
"handled",
"and",
"None",
"otherwise",
"."
] | python | train |
materialsproject/pymatgen | pymatgen/analysis/fragmenter.py | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/fragmenter.py#L90-L126 | def _fragment_one_level(self, mol_graphs):
"""
Perform one step of iterative fragmentation on a list of molecule graphs. Loop through the graphs,
then loop through each graph's edges and attempt to remove that edge in order to obtain two
disconnected subgraphs, aka two new fragments. If successful, check to see if the new fragments
are already present in self.unique_fragments, and append them if not. If unsucessful, we know
that edge belongs to a ring. If we are opening rings, do so with that bond, and then again
check if the resulting fragment is present in self.unique_fragments and add it if it is not.
"""
unique_fragments_on_this_level = []
for mol_graph in mol_graphs:
for edge in mol_graph.graph.edges:
bond = [(edge[0],edge[1])]
try:
fragments = mol_graph.split_molecule_subgraphs(bond, allow_reverse=True)
for fragment in fragments:
found = False
for unique_fragment in self.unique_fragments:
if unique_fragment.isomorphic_to(fragment):
found = True
break
if not found:
self.unique_fragments.append(fragment)
unique_fragments_on_this_level.append(fragment)
except MolGraphSplitError:
if self.open_rings:
fragment = open_ring(mol_graph, bond, self.opt_steps)
found = False
for unique_fragment in self.unique_fragments:
if unique_fragment.isomorphic_to(fragment):
found = True
break
if not found:
self.unique_fragments.append(fragment)
self.unique_fragments_from_ring_openings.append(fragment)
unique_fragments_on_this_level.append(fragment)
return unique_fragments_on_this_level | [
"def",
"_fragment_one_level",
"(",
"self",
",",
"mol_graphs",
")",
":",
"unique_fragments_on_this_level",
"=",
"[",
"]",
"for",
"mol_graph",
"in",
"mol_graphs",
":",
"for",
"edge",
"in",
"mol_graph",
".",
"graph",
".",
"edges",
":",
"bond",
"=",
"[",
"(",
"edge",
"[",
"0",
"]",
",",
"edge",
"[",
"1",
"]",
")",
"]",
"try",
":",
"fragments",
"=",
"mol_graph",
".",
"split_molecule_subgraphs",
"(",
"bond",
",",
"allow_reverse",
"=",
"True",
")",
"for",
"fragment",
"in",
"fragments",
":",
"found",
"=",
"False",
"for",
"unique_fragment",
"in",
"self",
".",
"unique_fragments",
":",
"if",
"unique_fragment",
".",
"isomorphic_to",
"(",
"fragment",
")",
":",
"found",
"=",
"True",
"break",
"if",
"not",
"found",
":",
"self",
".",
"unique_fragments",
".",
"append",
"(",
"fragment",
")",
"unique_fragments_on_this_level",
".",
"append",
"(",
"fragment",
")",
"except",
"MolGraphSplitError",
":",
"if",
"self",
".",
"open_rings",
":",
"fragment",
"=",
"open_ring",
"(",
"mol_graph",
",",
"bond",
",",
"self",
".",
"opt_steps",
")",
"found",
"=",
"False",
"for",
"unique_fragment",
"in",
"self",
".",
"unique_fragments",
":",
"if",
"unique_fragment",
".",
"isomorphic_to",
"(",
"fragment",
")",
":",
"found",
"=",
"True",
"break",
"if",
"not",
"found",
":",
"self",
".",
"unique_fragments",
".",
"append",
"(",
"fragment",
")",
"self",
".",
"unique_fragments_from_ring_openings",
".",
"append",
"(",
"fragment",
")",
"unique_fragments_on_this_level",
".",
"append",
"(",
"fragment",
")",
"return",
"unique_fragments_on_this_level"
] | Perform one step of iterative fragmentation on a list of molecule graphs. Loop through the graphs,
then loop through each graph's edges and attempt to remove that edge in order to obtain two
disconnected subgraphs, aka two new fragments. If successful, check to see if the new fragments
are already present in self.unique_fragments, and append them if not. If unsucessful, we know
that edge belongs to a ring. If we are opening rings, do so with that bond, and then again
check if the resulting fragment is present in self.unique_fragments and add it if it is not. | [
"Perform",
"one",
"step",
"of",
"iterative",
"fragmentation",
"on",
"a",
"list",
"of",
"molecule",
"graphs",
".",
"Loop",
"through",
"the",
"graphs",
"then",
"loop",
"through",
"each",
"graph",
"s",
"edges",
"and",
"attempt",
"to",
"remove",
"that",
"edge",
"in",
"order",
"to",
"obtain",
"two",
"disconnected",
"subgraphs",
"aka",
"two",
"new",
"fragments",
".",
"If",
"successful",
"check",
"to",
"see",
"if",
"the",
"new",
"fragments",
"are",
"already",
"present",
"in",
"self",
".",
"unique_fragments",
"and",
"append",
"them",
"if",
"not",
".",
"If",
"unsucessful",
"we",
"know",
"that",
"edge",
"belongs",
"to",
"a",
"ring",
".",
"If",
"we",
"are",
"opening",
"rings",
"do",
"so",
"with",
"that",
"bond",
"and",
"then",
"again",
"check",
"if",
"the",
"resulting",
"fragment",
"is",
"present",
"in",
"self",
".",
"unique_fragments",
"and",
"add",
"it",
"if",
"it",
"is",
"not",
"."
] | python | train |
awslabs/sockeye | sockeye/image_captioning/inference.py | https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/image_captioning/inference.py#L102-L130 | def translate(self, trans_inputs: List[TranslatorInput]) -> List[TranslatorOutput]:
"""
Batch-translates a list of TranslatorInputs, returns a list of TranslatorOutputs.
Splits oversized sentences to sentence chunks of size less than max_input_length.
:param trans_inputs: List of TranslatorInputs as returned by make_input().
:return: List of translation results.
"""
batch_size = self.max_batch_size
# translate in batch-sized blocks over input chunks
translations = []
for batch_id, batch in enumerate(utils.grouper(trans_inputs, batch_size)):
logger.debug("Translating batch %d", batch_id)
# underfilled batch will be filled to a full batch size with copies of the 1st input
rest = batch_size - len(batch)
if rest > 0:
logger.debug("Extending the last batch to the full batch size (%d)", batch_size)
batch = batch + [batch[0]] * rest
batch_translations = self._translate_nd(*self._get_inference_input(batch))
# truncate to remove filler translations
if rest > 0:
batch_translations = batch_translations[:-rest]
translations.extend(batch_translations)
# Concatenate results
results = [] # type: List[TranslatorOutput]
for trans_input, translation in zip(trans_inputs, translations):
results.append(self._make_result(trans_input, translation))
return results | [
"def",
"translate",
"(",
"self",
",",
"trans_inputs",
":",
"List",
"[",
"TranslatorInput",
"]",
")",
"->",
"List",
"[",
"TranslatorOutput",
"]",
":",
"batch_size",
"=",
"self",
".",
"max_batch_size",
"# translate in batch-sized blocks over input chunks",
"translations",
"=",
"[",
"]",
"for",
"batch_id",
",",
"batch",
"in",
"enumerate",
"(",
"utils",
".",
"grouper",
"(",
"trans_inputs",
",",
"batch_size",
")",
")",
":",
"logger",
".",
"debug",
"(",
"\"Translating batch %d\"",
",",
"batch_id",
")",
"# underfilled batch will be filled to a full batch size with copies of the 1st input",
"rest",
"=",
"batch_size",
"-",
"len",
"(",
"batch",
")",
"if",
"rest",
">",
"0",
":",
"logger",
".",
"debug",
"(",
"\"Extending the last batch to the full batch size (%d)\"",
",",
"batch_size",
")",
"batch",
"=",
"batch",
"+",
"[",
"batch",
"[",
"0",
"]",
"]",
"*",
"rest",
"batch_translations",
"=",
"self",
".",
"_translate_nd",
"(",
"*",
"self",
".",
"_get_inference_input",
"(",
"batch",
")",
")",
"# truncate to remove filler translations",
"if",
"rest",
">",
"0",
":",
"batch_translations",
"=",
"batch_translations",
"[",
":",
"-",
"rest",
"]",
"translations",
".",
"extend",
"(",
"batch_translations",
")",
"# Concatenate results",
"results",
"=",
"[",
"]",
"# type: List[TranslatorOutput]",
"for",
"trans_input",
",",
"translation",
"in",
"zip",
"(",
"trans_inputs",
",",
"translations",
")",
":",
"results",
".",
"append",
"(",
"self",
".",
"_make_result",
"(",
"trans_input",
",",
"translation",
")",
")",
"return",
"results"
] | Batch-translates a list of TranslatorInputs, returns a list of TranslatorOutputs.
Splits oversized sentences to sentence chunks of size less than max_input_length.
:param trans_inputs: List of TranslatorInputs as returned by make_input().
:return: List of translation results. | [
"Batch",
"-",
"translates",
"a",
"list",
"of",
"TranslatorInputs",
"returns",
"a",
"list",
"of",
"TranslatorOutputs",
".",
"Splits",
"oversized",
"sentences",
"to",
"sentence",
"chunks",
"of",
"size",
"less",
"than",
"max_input_length",
"."
] | python | train |
ivanyu/idx2numpy | idx2numpy/converters.py | https://github.com/ivanyu/idx2numpy/blob/9b88698314973226212181d1747dfad6c6974e51/idx2numpy/converters.py#L49-L59 | def convert_from_file(file):
"""
Reads the content of file in IDX format, converts it into numpy.ndarray and
returns it.
file is a file-like object (with read() method) or a file name.
"""
if isinstance(file, six_string_types):
with open(file, 'rb') as f:
return _internal_convert(f)
else:
return _internal_convert(file) | [
"def",
"convert_from_file",
"(",
"file",
")",
":",
"if",
"isinstance",
"(",
"file",
",",
"six_string_types",
")",
":",
"with",
"open",
"(",
"file",
",",
"'rb'",
")",
"as",
"f",
":",
"return",
"_internal_convert",
"(",
"f",
")",
"else",
":",
"return",
"_internal_convert",
"(",
"file",
")"
] | Reads the content of file in IDX format, converts it into numpy.ndarray and
returns it.
file is a file-like object (with read() method) or a file name. | [
"Reads",
"the",
"content",
"of",
"file",
"in",
"IDX",
"format",
"converts",
"it",
"into",
"numpy",
".",
"ndarray",
"and",
"returns",
"it",
".",
"file",
"is",
"a",
"file",
"-",
"like",
"object",
"(",
"with",
"read",
"()",
"method",
")",
"or",
"a",
"file",
"name",
"."
] | python | train |
armstrong/armstrong.dev | armstrong/dev/tasks.py | https://github.com/armstrong/armstrong.dev/blob/6fd8b863038d9e5ebfd52dfe5ce6c85fb441c267/armstrong/dev/tasks.py#L64-L74 | def replaced_by_django_migrations(func, *args, **kwargs):
"""Decorator to preempt South requirement"""
DjangoSettings() # trigger helpful messages if Django is missing
import django
if django.VERSION >= (1, 7):
print("Django 1.7+ has its own migrations system.")
print("Use this instead: `invoke managepy makemigrations`")
sys.exit(1)
return func(*args, **kwargs) | [
"def",
"replaced_by_django_migrations",
"(",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"DjangoSettings",
"(",
")",
"# trigger helpful messages if Django is missing",
"import",
"django",
"if",
"django",
".",
"VERSION",
">=",
"(",
"1",
",",
"7",
")",
":",
"print",
"(",
"\"Django 1.7+ has its own migrations system.\"",
")",
"print",
"(",
"\"Use this instead: `invoke managepy makemigrations`\"",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Decorator to preempt South requirement | [
"Decorator",
"to",
"preempt",
"South",
"requirement"
] | python | train |
mozillazg/python-pinyin | pypinyin/core.py | https://github.com/mozillazg/python-pinyin/blob/b44756c852e0d2f50f251e3098cbbfef51774979/pypinyin/core.py#L251-L283 | def slug(hans, style=Style.NORMAL, heteronym=False, separator='-',
errors='default', strict=True):
"""生成 slug 字符串.
:param hans: 汉字
:type hans: unicode or list
:param style: 指定拼音风格,默认是 :py:attr:`~pypinyin.Style.NORMAL` 风格。
更多拼音风格详见 :class:`~pypinyin.Style`
:param heteronym: 是否启用多音字
:param separstor: 两个拼音间的分隔符/连接符
:param errors: 指定如何处理没有拼音的字符,详情请参考
:py:func:`~pypinyin.pinyin`
:param strict: 是否严格遵照《汉语拼音方案》来处理声母和韵母,详见 :ref:`strict`
:return: slug 字符串.
:raise AssertionError: 当传入的字符串不是 unicode 字符时会抛出这个异常
::
>>> import pypinyin
>>> from pypinyin import Style
>>> pypinyin.slug('中国人')
'zhong-guo-ren'
>>> pypinyin.slug('中国人', separator=' ')
'zhong guo ren'
>>> pypinyin.slug('中国人', style=Style.FIRST_LETTER)
'z-g-r'
>>> pypinyin.slug('中国人', style=Style.CYRILLIC)
'чжун1-го2-жэнь2'
"""
return separator.join(chain(*pinyin(hans, style=style, heteronym=heteronym,
errors=errors, strict=strict)
)) | [
"def",
"slug",
"(",
"hans",
",",
"style",
"=",
"Style",
".",
"NORMAL",
",",
"heteronym",
"=",
"False",
",",
"separator",
"=",
"'-'",
",",
"errors",
"=",
"'default'",
",",
"strict",
"=",
"True",
")",
":",
"return",
"separator",
".",
"join",
"(",
"chain",
"(",
"*",
"pinyin",
"(",
"hans",
",",
"style",
"=",
"style",
",",
"heteronym",
"=",
"heteronym",
",",
"errors",
"=",
"errors",
",",
"strict",
"=",
"strict",
")",
")",
")"
] | 生成 slug 字符串.
:param hans: 汉字
:type hans: unicode or list
:param style: 指定拼音风格,默认是 :py:attr:`~pypinyin.Style.NORMAL` 风格。
更多拼音风格详见 :class:`~pypinyin.Style`
:param heteronym: 是否启用多音字
:param separstor: 两个拼音间的分隔符/连接符
:param errors: 指定如何处理没有拼音的字符,详情请参考
:py:func:`~pypinyin.pinyin`
:param strict: 是否严格遵照《汉语拼音方案》来处理声母和韵母,详见 :ref:`strict`
:return: slug 字符串.
:raise AssertionError: 当传入的字符串不是 unicode 字符时会抛出这个异常
::
>>> import pypinyin
>>> from pypinyin import Style
>>> pypinyin.slug('中国人')
'zhong-guo-ren'
>>> pypinyin.slug('中国人', separator=' ')
'zhong guo ren'
>>> pypinyin.slug('中国人', style=Style.FIRST_LETTER)
'z-g-r'
>>> pypinyin.slug('中国人', style=Style.CYRILLIC)
'чжун1-го2-жэнь2' | [
"生成",
"slug",
"字符串",
"."
] | python | train |
NASA-AMMOS/AIT-Core | ait/core/server/stream.py | https://github.com/NASA-AMMOS/AIT-Core/blob/9d85bd9c738e7a6a6fbdff672bea708238b02a3a/ait/core/server/stream.py#L54-L68 | def process(self, input_data, topic=None):
"""
Invokes each handler in sequence.
Publishes final output data.
Params:
input_data: message received by stream
topic: name of plugin or stream message received from,
if applicable
"""
for handler in self.handlers:
output = handler.handle(input_data)
input_data = output
self.publish(input_data) | [
"def",
"process",
"(",
"self",
",",
"input_data",
",",
"topic",
"=",
"None",
")",
":",
"for",
"handler",
"in",
"self",
".",
"handlers",
":",
"output",
"=",
"handler",
".",
"handle",
"(",
"input_data",
")",
"input_data",
"=",
"output",
"self",
".",
"publish",
"(",
"input_data",
")"
] | Invokes each handler in sequence.
Publishes final output data.
Params:
input_data: message received by stream
topic: name of plugin or stream message received from,
if applicable | [
"Invokes",
"each",
"handler",
"in",
"sequence",
".",
"Publishes",
"final",
"output",
"data",
"."
] | python | train |
libtcod/python-tcod | tcod/libtcodpy.py | https://github.com/libtcod/python-tcod/blob/8ba10c5cfb813eaf3e834de971ba2d6acb7838e4/tcod/libtcodpy.py#L762-L780 | def color_lerp(
c1: Tuple[int, int, int], c2: Tuple[int, int, int], a: float
) -> Color:
"""Return the linear interpolation between two colors.
``a`` is the interpolation value, with 0 returing ``c1``,
1 returning ``c2``, and 0.5 returing a color halfway between both.
Args:
c1 (Union[Tuple[int, int, int], Sequence[int]]):
The first color. At a=0.
c2 (Union[Tuple[int, int, int], Sequence[int]]):
The second color. At a=1.
a (float): The interpolation value,
Returns:
Color: The interpolated Color.
"""
return Color._new_from_cdata(lib.TCOD_color_lerp(c1, c2, a)) | [
"def",
"color_lerp",
"(",
"c1",
":",
"Tuple",
"[",
"int",
",",
"int",
",",
"int",
"]",
",",
"c2",
":",
"Tuple",
"[",
"int",
",",
"int",
",",
"int",
"]",
",",
"a",
":",
"float",
")",
"->",
"Color",
":",
"return",
"Color",
".",
"_new_from_cdata",
"(",
"lib",
".",
"TCOD_color_lerp",
"(",
"c1",
",",
"c2",
",",
"a",
")",
")"
] | Return the linear interpolation between two colors.
``a`` is the interpolation value, with 0 returing ``c1``,
1 returning ``c2``, and 0.5 returing a color halfway between both.
Args:
c1 (Union[Tuple[int, int, int], Sequence[int]]):
The first color. At a=0.
c2 (Union[Tuple[int, int, int], Sequence[int]]):
The second color. At a=1.
a (float): The interpolation value,
Returns:
Color: The interpolated Color. | [
"Return",
"the",
"linear",
"interpolation",
"between",
"two",
"colors",
"."
] | python | train |
pypa/pipenv | pipenv/patched/notpip/_internal/cli/cmdoptions.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_internal/cli/cmdoptions.py#L623-L643 | def no_use_pep517_callback(option, opt, value, parser):
"""
Process a value provided for the --no-use-pep517 option.
This is an optparse.Option callback for the no_use_pep517 option.
"""
# Since --no-use-pep517 doesn't accept arguments, the value argument
# will be None if --no-use-pep517 is passed via the command-line.
# However, the value can be non-None if the option is triggered e.g.
# by an environment variable, for example "PIP_NO_USE_PEP517=true".
if value is not None:
msg = """A value was passed for --no-use-pep517,
probably using either the PIP_NO_USE_PEP517 environment variable
or the "no-use-pep517" config file option. Use an appropriate value
of the PIP_USE_PEP517 environment variable or the "use-pep517"
config file option instead.
"""
raise_option_error(parser, option=option, msg=msg)
# Otherwise, --no-use-pep517 was passed via the command-line.
parser.values.use_pep517 = False | [
"def",
"no_use_pep517_callback",
"(",
"option",
",",
"opt",
",",
"value",
",",
"parser",
")",
":",
"# Since --no-use-pep517 doesn't accept arguments, the value argument",
"# will be None if --no-use-pep517 is passed via the command-line.",
"# However, the value can be non-None if the option is triggered e.g.",
"# by an environment variable, for example \"PIP_NO_USE_PEP517=true\".",
"if",
"value",
"is",
"not",
"None",
":",
"msg",
"=",
"\"\"\"A value was passed for --no-use-pep517,\n probably using either the PIP_NO_USE_PEP517 environment variable\n or the \"no-use-pep517\" config file option. Use an appropriate value\n of the PIP_USE_PEP517 environment variable or the \"use-pep517\"\n config file option instead.\n \"\"\"",
"raise_option_error",
"(",
"parser",
",",
"option",
"=",
"option",
",",
"msg",
"=",
"msg",
")",
"# Otherwise, --no-use-pep517 was passed via the command-line.",
"parser",
".",
"values",
".",
"use_pep517",
"=",
"False"
] | Process a value provided for the --no-use-pep517 option.
This is an optparse.Option callback for the no_use_pep517 option. | [
"Process",
"a",
"value",
"provided",
"for",
"the",
"--",
"no",
"-",
"use",
"-",
"pep517",
"option",
"."
] | python | train |
saltstack/salt | salt/modules/state.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/state.py#L527-L576 | def template(tem, queue=False, **kwargs):
'''
Execute the information stored in a template file on the minion.
This function does not ask a master for a SLS file to render but
instead directly processes the file at the provided path on the minion.
CLI Example:
.. code-block:: bash
salt '*' state.template '<Path to template on the minion>'
'''
if 'env' in kwargs:
# "env" is not supported; Use "saltenv".
kwargs.pop('env')
conflict = _check_queue(queue, kwargs)
if conflict is not None:
return conflict
opts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
try:
st_ = salt.state.HighState(opts,
context=__context__,
proxy=__proxy__,
initial_pillar=_get_initial_pillar(opts))
except NameError:
st_ = salt.state.HighState(opts,
context=__context__,
initial_pillar=_get_initial_pillar(opts))
errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar'])
if errors:
__context__['retcode'] = salt.defaults.exitcodes.EX_PILLAR_FAILURE
raise CommandExecutionError('Pillar failed to render', info=errors)
if not tem.endswith('.sls'):
tem = '{sls}.sls'.format(sls=tem)
high_state, errors = st_.render_state(tem,
kwargs.get('saltenv', ''),
'',
None,
local=True)
if errors:
__context__['retcode'] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR
return errors
ret = st_.state.call_high(high_state)
_set_retcode(ret, highstate=high_state)
return ret | [
"def",
"template",
"(",
"tem",
",",
"queue",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"'env'",
"in",
"kwargs",
":",
"# \"env\" is not supported; Use \"saltenv\".",
"kwargs",
".",
"pop",
"(",
"'env'",
")",
"conflict",
"=",
"_check_queue",
"(",
"queue",
",",
"kwargs",
")",
"if",
"conflict",
"is",
"not",
"None",
":",
"return",
"conflict",
"opts",
"=",
"salt",
".",
"utils",
".",
"state",
".",
"get_sls_opts",
"(",
"__opts__",
",",
"*",
"*",
"kwargs",
")",
"try",
":",
"st_",
"=",
"salt",
".",
"state",
".",
"HighState",
"(",
"opts",
",",
"context",
"=",
"__context__",
",",
"proxy",
"=",
"__proxy__",
",",
"initial_pillar",
"=",
"_get_initial_pillar",
"(",
"opts",
")",
")",
"except",
"NameError",
":",
"st_",
"=",
"salt",
".",
"state",
".",
"HighState",
"(",
"opts",
",",
"context",
"=",
"__context__",
",",
"initial_pillar",
"=",
"_get_initial_pillar",
"(",
"opts",
")",
")",
"errors",
"=",
"_get_pillar_errors",
"(",
"kwargs",
",",
"pillar",
"=",
"st_",
".",
"opts",
"[",
"'pillar'",
"]",
")",
"if",
"errors",
":",
"__context__",
"[",
"'retcode'",
"]",
"=",
"salt",
".",
"defaults",
".",
"exitcodes",
".",
"EX_PILLAR_FAILURE",
"raise",
"CommandExecutionError",
"(",
"'Pillar failed to render'",
",",
"info",
"=",
"errors",
")",
"if",
"not",
"tem",
".",
"endswith",
"(",
"'.sls'",
")",
":",
"tem",
"=",
"'{sls}.sls'",
".",
"format",
"(",
"sls",
"=",
"tem",
")",
"high_state",
",",
"errors",
"=",
"st_",
".",
"render_state",
"(",
"tem",
",",
"kwargs",
".",
"get",
"(",
"'saltenv'",
",",
"''",
")",
",",
"''",
",",
"None",
",",
"local",
"=",
"True",
")",
"if",
"errors",
":",
"__context__",
"[",
"'retcode'",
"]",
"=",
"salt",
".",
"defaults",
".",
"exitcodes",
".",
"EX_STATE_COMPILER_ERROR",
"return",
"errors",
"ret",
"=",
"st_",
".",
"state",
".",
"call_high",
"(",
"high_state",
")",
"_set_retcode",
"(",
"ret",
",",
"highstate",
"=",
"high_state",
")",
"return",
"ret"
] | Execute the information stored in a template file on the minion.
This function does not ask a master for a SLS file to render but
instead directly processes the file at the provided path on the minion.
CLI Example:
.. code-block:: bash
salt '*' state.template '<Path to template on the minion>' | [
"Execute",
"the",
"information",
"stored",
"in",
"a",
"template",
"file",
"on",
"the",
"minion",
"."
] | python | train |
tensorflow/probability | tensorflow_probability/python/distributions/mixture_same_family.py | https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/mixture_same_family.py#L566-L583 | def _prevent_2nd_derivative(x):
"""Disables computation of the second derivatives for a tensor.
NB: you need to apply a non-identity function to the output tensor for the
exception to be raised.
Arguments:
x: A tensor.
Returns:
A tensor with the same value and the same derivative as x, but that raises
LookupError when trying to compute the second derivatives.
"""
def grad(dy):
return array_ops.prevent_gradient(
dy, message="Second derivative is not implemented.")
return tf.identity(x), grad | [
"def",
"_prevent_2nd_derivative",
"(",
"x",
")",
":",
"def",
"grad",
"(",
"dy",
")",
":",
"return",
"array_ops",
".",
"prevent_gradient",
"(",
"dy",
",",
"message",
"=",
"\"Second derivative is not implemented.\"",
")",
"return",
"tf",
".",
"identity",
"(",
"x",
")",
",",
"grad"
] | Disables computation of the second derivatives for a tensor.
NB: you need to apply a non-identity function to the output tensor for the
exception to be raised.
Arguments:
x: A tensor.
Returns:
A tensor with the same value and the same derivative as x, but that raises
LookupError when trying to compute the second derivatives. | [
"Disables",
"computation",
"of",
"the",
"second",
"derivatives",
"for",
"a",
"tensor",
"."
] | python | test |
python-rope/rope | rope/base/oi/type_hinting/utils.py | https://github.com/python-rope/rope/blob/1c9f9cd5964b099a99a9111e998f0dc728860688/rope/base/oi/type_hinting/utils.py#L77-L95 | def resolve_type(type_name, pyobject):
"""
:type type_name: str
:type pyobject: rope.base.pyobjects.PyDefinedObject | rope.base.pyobjects.PyObject
:rtype: rope.base.pyobjects.PyDefinedObject | rope.base.pyobjects.PyObject or None
"""
if '.' not in type_name:
try:
return pyobject.get_module().get_scope().get_name(type_name).get_object()
except Exception:
pass
else:
mod_name, attr_name = type_name.rsplit('.', 1)
try:
mod_finder = ScopeNameFinder(pyobject.get_module())
mod = mod_finder._find_module(mod_name).get_object()
return mod.get_attribute(attr_name).get_object()
except Exception:
pass | [
"def",
"resolve_type",
"(",
"type_name",
",",
"pyobject",
")",
":",
"if",
"'.'",
"not",
"in",
"type_name",
":",
"try",
":",
"return",
"pyobject",
".",
"get_module",
"(",
")",
".",
"get_scope",
"(",
")",
".",
"get_name",
"(",
"type_name",
")",
".",
"get_object",
"(",
")",
"except",
"Exception",
":",
"pass",
"else",
":",
"mod_name",
",",
"attr_name",
"=",
"type_name",
".",
"rsplit",
"(",
"'.'",
",",
"1",
")",
"try",
":",
"mod_finder",
"=",
"ScopeNameFinder",
"(",
"pyobject",
".",
"get_module",
"(",
")",
")",
"mod",
"=",
"mod_finder",
".",
"_find_module",
"(",
"mod_name",
")",
".",
"get_object",
"(",
")",
"return",
"mod",
".",
"get_attribute",
"(",
"attr_name",
")",
".",
"get_object",
"(",
")",
"except",
"Exception",
":",
"pass"
] | :type type_name: str
:type pyobject: rope.base.pyobjects.PyDefinedObject | rope.base.pyobjects.PyObject
:rtype: rope.base.pyobjects.PyDefinedObject | rope.base.pyobjects.PyObject or None | [
":",
"type",
"type_name",
":",
"str",
":",
"type",
"pyobject",
":",
"rope",
".",
"base",
".",
"pyobjects",
".",
"PyDefinedObject",
"|",
"rope",
".",
"base",
".",
"pyobjects",
".",
"PyObject",
":",
"rtype",
":",
"rope",
".",
"base",
".",
"pyobjects",
".",
"PyDefinedObject",
"|",
"rope",
".",
"base",
".",
"pyobjects",
".",
"PyObject",
"or",
"None"
] | python | train |
PythonCharmers/python-future | src/future/backports/http/server.py | https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/http/server.py#L688-L727 | def send_head(self):
"""Common code for GET and HEAD commands.
This sends the response code and MIME headers.
Return value is either a file object (which has to be copied
to the outputfile by the caller unless the command was HEAD,
and must be closed by the caller under all circumstances), or
None, in which case the caller has nothing further to do.
"""
path = self.translate_path(self.path)
f = None
if os.path.isdir(path):
if not self.path.endswith('/'):
# redirect browser - doing basically what apache does
self.send_response(301)
self.send_header("Location", self.path + "/")
self.end_headers()
return None
for index in "index.html", "index.htm":
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
else:
return self.list_directory(path)
ctype = self.guess_type(path)
try:
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found")
return None
self.send_response(200)
self.send_header("Content-type", ctype)
fs = os.fstat(f.fileno())
self.send_header("Content-Length", str(fs[6]))
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
self.end_headers()
return f | [
"def",
"send_head",
"(",
"self",
")",
":",
"path",
"=",
"self",
".",
"translate_path",
"(",
"self",
".",
"path",
")",
"f",
"=",
"None",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"if",
"not",
"self",
".",
"path",
".",
"endswith",
"(",
"'/'",
")",
":",
"# redirect browser - doing basically what apache does",
"self",
".",
"send_response",
"(",
"301",
")",
"self",
".",
"send_header",
"(",
"\"Location\"",
",",
"self",
".",
"path",
"+",
"\"/\"",
")",
"self",
".",
"end_headers",
"(",
")",
"return",
"None",
"for",
"index",
"in",
"\"index.html\"",
",",
"\"index.htm\"",
":",
"index",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"index",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"index",
")",
":",
"path",
"=",
"index",
"break",
"else",
":",
"return",
"self",
".",
"list_directory",
"(",
"path",
")",
"ctype",
"=",
"self",
".",
"guess_type",
"(",
"path",
")",
"try",
":",
"f",
"=",
"open",
"(",
"path",
",",
"'rb'",
")",
"except",
"IOError",
":",
"self",
".",
"send_error",
"(",
"404",
",",
"\"File not found\"",
")",
"return",
"None",
"self",
".",
"send_response",
"(",
"200",
")",
"self",
".",
"send_header",
"(",
"\"Content-type\"",
",",
"ctype",
")",
"fs",
"=",
"os",
".",
"fstat",
"(",
"f",
".",
"fileno",
"(",
")",
")",
"self",
".",
"send_header",
"(",
"\"Content-Length\"",
",",
"str",
"(",
"fs",
"[",
"6",
"]",
")",
")",
"self",
".",
"send_header",
"(",
"\"Last-Modified\"",
",",
"self",
".",
"date_time_string",
"(",
"fs",
".",
"st_mtime",
")",
")",
"self",
".",
"end_headers",
"(",
")",
"return",
"f"
] | Common code for GET and HEAD commands.
This sends the response code and MIME headers.
Return value is either a file object (which has to be copied
to the outputfile by the caller unless the command was HEAD,
and must be closed by the caller under all circumstances), or
None, in which case the caller has nothing further to do. | [
"Common",
"code",
"for",
"GET",
"and",
"HEAD",
"commands",
"."
] | python | train |
amadeus4dev/amadeus-python | amadeus/shopping/_hotel_offer.py | https://github.com/amadeus4dev/amadeus-python/blob/afb93667d2cd486ddc7f4a7f29f222f04453a44a/amadeus/shopping/_hotel_offer.py#L9-L21 | def get(self, **params):
'''
Returns details for a specific offer.
.. code-block:: python
amadeus.shopping.hotel_offer('XXX').get
:rtype: amadeus.Response
:raises amadeus.ResponseError: if the request could not be completed
'''
return self.client.get('/v2/shopping/hotel-offers/{0}'
.format(self.offer_id), **params) | [
"def",
"get",
"(",
"self",
",",
"*",
"*",
"params",
")",
":",
"return",
"self",
".",
"client",
".",
"get",
"(",
"'/v2/shopping/hotel-offers/{0}'",
".",
"format",
"(",
"self",
".",
"offer_id",
")",
",",
"*",
"*",
"params",
")"
] | Returns details for a specific offer.
.. code-block:: python
amadeus.shopping.hotel_offer('XXX').get
:rtype: amadeus.Response
:raises amadeus.ResponseError: if the request could not be completed | [
"Returns",
"details",
"for",
"a",
"specific",
"offer",
"."
] | python | train |
alex-sherman/unsync | examples/mixing_methods.py | https://github.com/alex-sherman/unsync/blob/a52a0b04980dcaf6dc2fd734aa9d7be9d8960bbe/examples/mixing_methods.py#L25-L32 | async def result_processor(tasks):
"""An async result aggregator that combines all the results
This gets executed in unsync.loop and unsync.thread"""
output = {}
for task in tasks:
num, res = await task
output[num] = res
return output | [
"async",
"def",
"result_processor",
"(",
"tasks",
")",
":",
"output",
"=",
"{",
"}",
"for",
"task",
"in",
"tasks",
":",
"num",
",",
"res",
"=",
"await",
"task",
"output",
"[",
"num",
"]",
"=",
"res",
"return",
"output"
] | An async result aggregator that combines all the results
This gets executed in unsync.loop and unsync.thread | [
"An",
"async",
"result",
"aggregator",
"that",
"combines",
"all",
"the",
"results",
"This",
"gets",
"executed",
"in",
"unsync",
".",
"loop",
"and",
"unsync",
".",
"thread"
] | python | train |
materialsproject/pymatgen | pymatgen/analysis/magnetism/analyzer.py | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/magnetism/analyzer.py#L538-L559 | def get_exchange_group_info(self, symprec=1e-2, angle_tolerance=5.0):
"""
Returns the information on the symmetry of the Hamiltonian
describing the exchange energy of the system, taking into
account relative direction of magnetic moments but not their
absolute direction.
This is not strictly accurate (e.g. some/many atoms will
have zero magnetic moments), but defining symmetry this
way is a useful way of keeping track of distinct magnetic
orderings within pymatgen.
:param symprec: same as SpacegroupAnalyzer
:param angle_tolerance: same as SpacegroupAnalyzer
:return: spacegroup_symbol, international_number
"""
structure = self.get_structure_with_spin()
return structure.get_space_group_info(
symprec=symprec, angle_tolerance=angle_tolerance
) | [
"def",
"get_exchange_group_info",
"(",
"self",
",",
"symprec",
"=",
"1e-2",
",",
"angle_tolerance",
"=",
"5.0",
")",
":",
"structure",
"=",
"self",
".",
"get_structure_with_spin",
"(",
")",
"return",
"structure",
".",
"get_space_group_info",
"(",
"symprec",
"=",
"symprec",
",",
"angle_tolerance",
"=",
"angle_tolerance",
")"
] | Returns the information on the symmetry of the Hamiltonian
describing the exchange energy of the system, taking into
account relative direction of magnetic moments but not their
absolute direction.
This is not strictly accurate (e.g. some/many atoms will
have zero magnetic moments), but defining symmetry this
way is a useful way of keeping track of distinct magnetic
orderings within pymatgen.
:param symprec: same as SpacegroupAnalyzer
:param angle_tolerance: same as SpacegroupAnalyzer
:return: spacegroup_symbol, international_number | [
"Returns",
"the",
"information",
"on",
"the",
"symmetry",
"of",
"the",
"Hamiltonian",
"describing",
"the",
"exchange",
"energy",
"of",
"the",
"system",
"taking",
"into",
"account",
"relative",
"direction",
"of",
"magnetic",
"moments",
"but",
"not",
"their",
"absolute",
"direction",
"."
] | python | train |
pantsbuild/pants | contrib/buildgen/src/python/pants/contrib/buildgen/build_file_manipulator.py | https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/contrib/buildgen/src/python/pants/contrib/buildgen/build_file_manipulator.py#L400-L405 | def build_file_lines(self):
"""Like `target_lines`, the entire BUILD file's lines after dependency manipulation."""
build_file_lines = self._build_file_source_lines[:]
target_begin, target_end = self._target_interval
build_file_lines[target_begin:target_end] = self.target_lines()
return build_file_lines | [
"def",
"build_file_lines",
"(",
"self",
")",
":",
"build_file_lines",
"=",
"self",
".",
"_build_file_source_lines",
"[",
":",
"]",
"target_begin",
",",
"target_end",
"=",
"self",
".",
"_target_interval",
"build_file_lines",
"[",
"target_begin",
":",
"target_end",
"]",
"=",
"self",
".",
"target_lines",
"(",
")",
"return",
"build_file_lines"
] | Like `target_lines`, the entire BUILD file's lines after dependency manipulation. | [
"Like",
"target_lines",
"the",
"entire",
"BUILD",
"file",
"s",
"lines",
"after",
"dependency",
"manipulation",
"."
] | python | train |
quantumlib/Cirq | dev_tools/incremental_coverage.py | https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/dev_tools/incremental_coverage.py#L48-L105 | def diff_to_new_interesting_lines(unified_diff_lines: List[str]
) -> Dict[int, str]:
"""
Extracts a set of 'interesting' lines out of a GNU unified diff format.
Format:
gnu.org/software/diffutils/manual/html_node/Detailed-Unified.html
@@ from-line-numbers to-line-numbers @@
line-from-either-file
...
@@ start,count start,count @@
line-from-either-file
...
@@ single start,count @@
line-from-either-file
...
Examples:
Deleted line (5 is the deleted LOC, 7 is the guessed would-have-been loc
in the updated file given other changes pushing the line around):
@@ 5 7,0 @@
- content-of-line
Added line:
@@ 5,0 7 @@
+ content-of-line
Modified chunk:
@@ 10,15 11,5 @@
- removed-line
+ added-line
...
Args:
unified_diff_lines: Lines of output from git diff.
Returns:
A dictionary of "touched lines", with key equal to the line number and
value equal to the reason the line was touched. Includes added lines
and lines near changes (including removals).
"""
interesting_lines = dict()
for diff_line in unified_diff_lines:
# Parse the 'new file' range parts of the unified diff.
if not diff_line.startswith('@@ '):
continue
change = diff_line[3:diff_line.index(' @@', 3)]
new = change.split(' ')[1]
start = int(new.split(',')[0])
count = 1 if ',' not in new else int(new.split(',')[1])
# The lines before and after a deletion should still be covered.
if count == 0:
for i in range(start, start + 2):
interesting_lines[i] = 'is near a removal'
else:
for i in range(start, start + count):
interesting_lines[i] = 'is new or changed'
return interesting_lines | [
"def",
"diff_to_new_interesting_lines",
"(",
"unified_diff_lines",
":",
"List",
"[",
"str",
"]",
")",
"->",
"Dict",
"[",
"int",
",",
"str",
"]",
":",
"interesting_lines",
"=",
"dict",
"(",
")",
"for",
"diff_line",
"in",
"unified_diff_lines",
":",
"# Parse the 'new file' range parts of the unified diff.",
"if",
"not",
"diff_line",
".",
"startswith",
"(",
"'@@ '",
")",
":",
"continue",
"change",
"=",
"diff_line",
"[",
"3",
":",
"diff_line",
".",
"index",
"(",
"' @@'",
",",
"3",
")",
"]",
"new",
"=",
"change",
".",
"split",
"(",
"' '",
")",
"[",
"1",
"]",
"start",
"=",
"int",
"(",
"new",
".",
"split",
"(",
"','",
")",
"[",
"0",
"]",
")",
"count",
"=",
"1",
"if",
"','",
"not",
"in",
"new",
"else",
"int",
"(",
"new",
".",
"split",
"(",
"','",
")",
"[",
"1",
"]",
")",
"# The lines before and after a deletion should still be covered.",
"if",
"count",
"==",
"0",
":",
"for",
"i",
"in",
"range",
"(",
"start",
",",
"start",
"+",
"2",
")",
":",
"interesting_lines",
"[",
"i",
"]",
"=",
"'is near a removal'",
"else",
":",
"for",
"i",
"in",
"range",
"(",
"start",
",",
"start",
"+",
"count",
")",
":",
"interesting_lines",
"[",
"i",
"]",
"=",
"'is new or changed'",
"return",
"interesting_lines"
] | Extracts a set of 'interesting' lines out of a GNU unified diff format.
Format:
gnu.org/software/diffutils/manual/html_node/Detailed-Unified.html
@@ from-line-numbers to-line-numbers @@
line-from-either-file
...
@@ start,count start,count @@
line-from-either-file
...
@@ single start,count @@
line-from-either-file
...
Examples:
Deleted line (5 is the deleted LOC, 7 is the guessed would-have-been loc
in the updated file given other changes pushing the line around):
@@ 5 7,0 @@
- content-of-line
Added line:
@@ 5,0 7 @@
+ content-of-line
Modified chunk:
@@ 10,15 11,5 @@
- removed-line
+ added-line
...
Args:
unified_diff_lines: Lines of output from git diff.
Returns:
A dictionary of "touched lines", with key equal to the line number and
value equal to the reason the line was touched. Includes added lines
and lines near changes (including removals). | [
"Extracts",
"a",
"set",
"of",
"interesting",
"lines",
"out",
"of",
"a",
"GNU",
"unified",
"diff",
"format",
"."
] | python | train |
ThreatConnect-Inc/tcex | tcex/tcex_data_filter.py | https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_data_filter.py#L28-L57 | def _build_indexes(self):
"""Build indexes from data for fast filtering of data.
Building indexes of data when possible. This is only supported when dealing with a
List of Dictionaries with String values.
"""
if isinstance(self._data, list):
for d in self._data:
if not isinstance(d, dict):
err = u'Cannot build index for non Dict type.'
self._tcex.log.error(err)
raise RuntimeError(err)
data_obj = DataObj(d)
self._master_index.setdefault(id(data_obj), data_obj)
for key, value in d.items():
# bcs - update this
# if not isinstance(value, (types.StringType, float, int)):
# TODO: This is not Python 3 ready
if not isinstance(value, (float, int, str)):
# For comparison operators the value needs to be a StringType
self._tcex.log.debug(u'Can only build index String Types.')
continue
self._indexes.setdefault(key, {}).setdefault(value, []).append(data_obj)
else:
err = u'Only *List* data type is currently supported'
self._tcex.log.error(err)
raise RuntimeError(err) | [
"def",
"_build_indexes",
"(",
"self",
")",
":",
"if",
"isinstance",
"(",
"self",
".",
"_data",
",",
"list",
")",
":",
"for",
"d",
"in",
"self",
".",
"_data",
":",
"if",
"not",
"isinstance",
"(",
"d",
",",
"dict",
")",
":",
"err",
"=",
"u'Cannot build index for non Dict type.'",
"self",
".",
"_tcex",
".",
"log",
".",
"error",
"(",
"err",
")",
"raise",
"RuntimeError",
"(",
"err",
")",
"data_obj",
"=",
"DataObj",
"(",
"d",
")",
"self",
".",
"_master_index",
".",
"setdefault",
"(",
"id",
"(",
"data_obj",
")",
",",
"data_obj",
")",
"for",
"key",
",",
"value",
"in",
"d",
".",
"items",
"(",
")",
":",
"# bcs - update this",
"# if not isinstance(value, (types.StringType, float, int)):",
"# TODO: This is not Python 3 ready",
"if",
"not",
"isinstance",
"(",
"value",
",",
"(",
"float",
",",
"int",
",",
"str",
")",
")",
":",
"# For comparison operators the value needs to be a StringType",
"self",
".",
"_tcex",
".",
"log",
".",
"debug",
"(",
"u'Can only build index String Types.'",
")",
"continue",
"self",
".",
"_indexes",
".",
"setdefault",
"(",
"key",
",",
"{",
"}",
")",
".",
"setdefault",
"(",
"value",
",",
"[",
"]",
")",
".",
"append",
"(",
"data_obj",
")",
"else",
":",
"err",
"=",
"u'Only *List* data type is currently supported'",
"self",
".",
"_tcex",
".",
"log",
".",
"error",
"(",
"err",
")",
"raise",
"RuntimeError",
"(",
"err",
")"
] | Build indexes from data for fast filtering of data.
Building indexes of data when possible. This is only supported when dealing with a
List of Dictionaries with String values. | [
"Build",
"indexes",
"from",
"data",
"for",
"fast",
"filtering",
"of",
"data",
"."
] | python | train |
saltstack/salt | salt/runners/ddns.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/runners/ddns.py#L187-L229 | def add_host(zone, name, ttl, ip, keyname, keyfile, nameserver, timeout,
port=53, keyalgorithm='hmac-md5'):
'''
Create both A and PTR (reverse) records for a host.
CLI Example:
.. code-block:: bash
salt-run ddns.add_host domain.com my-test-vm 3600 10.20.30.40 my-tsig-key /etc/salt/tsig.keyring 10.0.0.1 5
'''
res = []
if zone in name:
name = name.replace(zone, '').rstrip('.')
fqdn = '{0}.{1}'.format(name, zone)
ret = create(zone, name, ttl, 'A', ip, keyname, keyfile, nameserver,
timeout, port, keyalgorithm)
res.append(ret[fqdn])
parts = ip.split('.')[::-1]
i = len(parts)
popped = []
# Iterate over possible reverse zones
while i > 1:
p = parts.pop(0)
i -= 1
popped.append(p)
zone = '{0}.{1}'.format('.'.join(parts), 'in-addr.arpa.')
name = '.'.join(popped)
rev_fqdn = '{0}.{1}'.format(name, zone)
ret = create(zone, name, ttl, 'PTR', "{0}.".format(fqdn), keyname,
keyfile, nameserver, timeout, port, keyalgorithm)
if "Created" in ret[rev_fqdn]:
res.append(ret[rev_fqdn])
return {fqdn: res}
res.append(ret[rev_fqdn])
return {fqdn: res} | [
"def",
"add_host",
"(",
"zone",
",",
"name",
",",
"ttl",
",",
"ip",
",",
"keyname",
",",
"keyfile",
",",
"nameserver",
",",
"timeout",
",",
"port",
"=",
"53",
",",
"keyalgorithm",
"=",
"'hmac-md5'",
")",
":",
"res",
"=",
"[",
"]",
"if",
"zone",
"in",
"name",
":",
"name",
"=",
"name",
".",
"replace",
"(",
"zone",
",",
"''",
")",
".",
"rstrip",
"(",
"'.'",
")",
"fqdn",
"=",
"'{0}.{1}'",
".",
"format",
"(",
"name",
",",
"zone",
")",
"ret",
"=",
"create",
"(",
"zone",
",",
"name",
",",
"ttl",
",",
"'A'",
",",
"ip",
",",
"keyname",
",",
"keyfile",
",",
"nameserver",
",",
"timeout",
",",
"port",
",",
"keyalgorithm",
")",
"res",
".",
"append",
"(",
"ret",
"[",
"fqdn",
"]",
")",
"parts",
"=",
"ip",
".",
"split",
"(",
"'.'",
")",
"[",
":",
":",
"-",
"1",
"]",
"i",
"=",
"len",
"(",
"parts",
")",
"popped",
"=",
"[",
"]",
"# Iterate over possible reverse zones",
"while",
"i",
">",
"1",
":",
"p",
"=",
"parts",
".",
"pop",
"(",
"0",
")",
"i",
"-=",
"1",
"popped",
".",
"append",
"(",
"p",
")",
"zone",
"=",
"'{0}.{1}'",
".",
"format",
"(",
"'.'",
".",
"join",
"(",
"parts",
")",
",",
"'in-addr.arpa.'",
")",
"name",
"=",
"'.'",
".",
"join",
"(",
"popped",
")",
"rev_fqdn",
"=",
"'{0}.{1}'",
".",
"format",
"(",
"name",
",",
"zone",
")",
"ret",
"=",
"create",
"(",
"zone",
",",
"name",
",",
"ttl",
",",
"'PTR'",
",",
"\"{0}.\"",
".",
"format",
"(",
"fqdn",
")",
",",
"keyname",
",",
"keyfile",
",",
"nameserver",
",",
"timeout",
",",
"port",
",",
"keyalgorithm",
")",
"if",
"\"Created\"",
"in",
"ret",
"[",
"rev_fqdn",
"]",
":",
"res",
".",
"append",
"(",
"ret",
"[",
"rev_fqdn",
"]",
")",
"return",
"{",
"fqdn",
":",
"res",
"}",
"res",
".",
"append",
"(",
"ret",
"[",
"rev_fqdn",
"]",
")",
"return",
"{",
"fqdn",
":",
"res",
"}"
] | Create both A and PTR (reverse) records for a host.
CLI Example:
.. code-block:: bash
salt-run ddns.add_host domain.com my-test-vm 3600 10.20.30.40 my-tsig-key /etc/salt/tsig.keyring 10.0.0.1 5 | [
"Create",
"both",
"A",
"and",
"PTR",
"(",
"reverse",
")",
"records",
"for",
"a",
"host",
"."
] | python | train |
sosy-lab/benchexec | benchexec/tablegenerator/columns.py | https://github.com/sosy-lab/benchexec/blob/44428f67f41384c03aea13e7e25f884764653617/benchexec/tablegenerator/columns.py#L348-L364 | def get_column_type(column, column_values):
"""
Returns the type of the given column based on its row values on the given RunSetResult.
@param column: the column to return the correct ColumnType for
@param column_values: the column values to consider
@return: a tuple of a type object describing the column - the concrete ColumnType is stored in the attribute 'type',
the display unit of the column, which may be None,
the source unit of the column, which may be None,
and the scale factor to convert from the source unit to the display unit.
If no scaling is necessary for conversion, this value is 1.
"""
try:
return _get_column_type_heur(column, column_values)
except util.TableDefinitionError as e:
logging.error("Column type couldn't be determined: {}".format(e.message))
return ColumnType.text, None, None, 1 | [
"def",
"get_column_type",
"(",
"column",
",",
"column_values",
")",
":",
"try",
":",
"return",
"_get_column_type_heur",
"(",
"column",
",",
"column_values",
")",
"except",
"util",
".",
"TableDefinitionError",
"as",
"e",
":",
"logging",
".",
"error",
"(",
"\"Column type couldn't be determined: {}\"",
".",
"format",
"(",
"e",
".",
"message",
")",
")",
"return",
"ColumnType",
".",
"text",
",",
"None",
",",
"None",
",",
"1"
] | Returns the type of the given column based on its row values on the given RunSetResult.
@param column: the column to return the correct ColumnType for
@param column_values: the column values to consider
@return: a tuple of a type object describing the column - the concrete ColumnType is stored in the attribute 'type',
the display unit of the column, which may be None,
the source unit of the column, which may be None,
and the scale factor to convert from the source unit to the display unit.
If no scaling is necessary for conversion, this value is 1. | [
"Returns",
"the",
"type",
"of",
"the",
"given",
"column",
"based",
"on",
"its",
"row",
"values",
"on",
"the",
"given",
"RunSetResult",
"."
] | python | train |
Microsoft/LightGBM | python-package/lightgbm/basic.py | https://github.com/Microsoft/LightGBM/blob/8d2ec69f4f685b0ab1c4624d59ee2d3287bb3147/python-package/lightgbm/basic.py#L234-L252 | def c_int_array(data):
"""Get pointer of int numpy array / list."""
if is_1d_list(data):
data = np.array(data, copy=False)
if is_numpy_1d_array(data):
data = convert_from_sliced_object(data)
assert data.flags.c_contiguous
if data.dtype == np.int32:
ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_int32))
type_data = C_API_DTYPE_INT32
elif data.dtype == np.int64:
ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_int64))
type_data = C_API_DTYPE_INT64
else:
raise TypeError("Expected np.int32 or np.int64, met type({})"
.format(data.dtype))
else:
raise TypeError("Unknown type({})".format(type(data).__name__))
return (ptr_data, type_data, data) | [
"def",
"c_int_array",
"(",
"data",
")",
":",
"if",
"is_1d_list",
"(",
"data",
")",
":",
"data",
"=",
"np",
".",
"array",
"(",
"data",
",",
"copy",
"=",
"False",
")",
"if",
"is_numpy_1d_array",
"(",
"data",
")",
":",
"data",
"=",
"convert_from_sliced_object",
"(",
"data",
")",
"assert",
"data",
".",
"flags",
".",
"c_contiguous",
"if",
"data",
".",
"dtype",
"==",
"np",
".",
"int32",
":",
"ptr_data",
"=",
"data",
".",
"ctypes",
".",
"data_as",
"(",
"ctypes",
".",
"POINTER",
"(",
"ctypes",
".",
"c_int32",
")",
")",
"type_data",
"=",
"C_API_DTYPE_INT32",
"elif",
"data",
".",
"dtype",
"==",
"np",
".",
"int64",
":",
"ptr_data",
"=",
"data",
".",
"ctypes",
".",
"data_as",
"(",
"ctypes",
".",
"POINTER",
"(",
"ctypes",
".",
"c_int64",
")",
")",
"type_data",
"=",
"C_API_DTYPE_INT64",
"else",
":",
"raise",
"TypeError",
"(",
"\"Expected np.int32 or np.int64, met type({})\"",
".",
"format",
"(",
"data",
".",
"dtype",
")",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"Unknown type({})\"",
".",
"format",
"(",
"type",
"(",
"data",
")",
".",
"__name__",
")",
")",
"return",
"(",
"ptr_data",
",",
"type_data",
",",
"data",
")"
] | Get pointer of int numpy array / list. | [
"Get",
"pointer",
"of",
"int",
"numpy",
"array",
"/",
"list",
"."
] | python | train |
datajoint/datajoint-python | datajoint/declare.py | https://github.com/datajoint/datajoint-python/blob/4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c/datajoint/declare.py#L255-L325 | def compile_attribute(line, in_key, foreign_key_sql):
"""
Convert attribute definition from DataJoint format to SQL
:param line: attribution line
:param in_key: set to True if attribute is in primary key set
:param foreign_key_sql:
:returns: (name, sql, is_external) -- attribute name and sql code for its declaration
"""
try:
match = attribute_parser.parseString(line+'#', parseAll=True)
except pp.ParseException as err:
raise DataJointError('Declaration error in position {pos} in line:\n {line}\n{msg}'.format(
line=err.args[0], pos=err.args[1], msg=err.args[2]))
match['comment'] = match['comment'].rstrip('#')
if 'default' not in match:
match['default'] = ''
match = {k: v.strip() for k, v in match.items()}
match['nullable'] = match['default'].lower() == 'null'
accepted_datatype = r'time|date|year|enum|(var)?char|float|real|double|decimal|numeric|' \
r'(tiny|small|medium|big)?int|bool|' \
r'(tiny|small|medium|long)?blob|external|attach'
if re.match(accepted_datatype, match['type'], re.I) is None:
raise DataJointError('DataJoint does not support datatype "{type}"'.format(**match))
literals = ['CURRENT_TIMESTAMP'] # not to be enclosed in quotes
if match['nullable']:
if in_key:
raise DataJointError('Primary key attributes cannot be nullable in line %s' % line)
match['default'] = 'DEFAULT NULL' # nullable attributes default to null
else:
if match['default']:
quote = match['default'].upper() not in literals and match['default'][0] not in '"\''
match['default'] = ('NOT NULL DEFAULT ' +
('"%s"' if quote else "%s") % match['default'])
else:
match['default'] = 'NOT NULL'
match['comment'] = match['comment'].replace('"', '\\"') # escape double quotes in comment
is_external = match['type'].startswith('external')
is_attachment = match['type'].startswith('attachment')
if not is_external:
sql = ('`{name}` {type} {default}' + (' COMMENT "{comment}"' if match['comment'] else '')).format(**match)
else:
# process externally stored attribute
if in_key:
raise DataJointError('External attributes cannot be primary in:\n%s' % line)
store_name = match['type'].split('-')
if store_name[0] != 'external':
raise DataJointError('External store types must be specified as "external" or "external-<name>"')
store_name = '-'.join(store_name[1:])
if store_name != '' and not store_name.isidentifier():
raise DataJointError(
'The external store name `{type}` is invalid. Make like a python identifier.'.format(**match))
if len(store_name) > STORE_NAME_LENGTH:
raise DataJointError(
'The external store name `{type}` is too long. Must be <={max_len} characters.'.format(
max_len=STORE_NAME_LENGTH, **match))
if not match['default'] in ('DEFAULT NULL', 'NOT NULL'):
raise DataJointError('The only acceptable default value for an external field is null in:\n%s' % line)
if match['type'] not in config:
raise DataJointError('The external store `{type}` is not configured.'.format(**match))
# append external configuration name to the end of the comment
sql = '`{name}` {hash_type} {default} COMMENT ":{type}:{comment}"'.format(
hash_type=HASH_DATA_TYPE, **match)
foreign_key_sql.append(
"FOREIGN KEY (`{name}`) REFERENCES {{external_table}} (`hash`) "
"ON UPDATE RESTRICT ON DELETE RESTRICT".format(**match))
return match['name'], sql, is_external | [
"def",
"compile_attribute",
"(",
"line",
",",
"in_key",
",",
"foreign_key_sql",
")",
":",
"try",
":",
"match",
"=",
"attribute_parser",
".",
"parseString",
"(",
"line",
"+",
"'#'",
",",
"parseAll",
"=",
"True",
")",
"except",
"pp",
".",
"ParseException",
"as",
"err",
":",
"raise",
"DataJointError",
"(",
"'Declaration error in position {pos} in line:\\n {line}\\n{msg}'",
".",
"format",
"(",
"line",
"=",
"err",
".",
"args",
"[",
"0",
"]",
",",
"pos",
"=",
"err",
".",
"args",
"[",
"1",
"]",
",",
"msg",
"=",
"err",
".",
"args",
"[",
"2",
"]",
")",
")",
"match",
"[",
"'comment'",
"]",
"=",
"match",
"[",
"'comment'",
"]",
".",
"rstrip",
"(",
"'#'",
")",
"if",
"'default'",
"not",
"in",
"match",
":",
"match",
"[",
"'default'",
"]",
"=",
"''",
"match",
"=",
"{",
"k",
":",
"v",
".",
"strip",
"(",
")",
"for",
"k",
",",
"v",
"in",
"match",
".",
"items",
"(",
")",
"}",
"match",
"[",
"'nullable'",
"]",
"=",
"match",
"[",
"'default'",
"]",
".",
"lower",
"(",
")",
"==",
"'null'",
"accepted_datatype",
"=",
"r'time|date|year|enum|(var)?char|float|real|double|decimal|numeric|'",
"r'(tiny|small|medium|big)?int|bool|'",
"r'(tiny|small|medium|long)?blob|external|attach'",
"if",
"re",
".",
"match",
"(",
"accepted_datatype",
",",
"match",
"[",
"'type'",
"]",
",",
"re",
".",
"I",
")",
"is",
"None",
":",
"raise",
"DataJointError",
"(",
"'DataJoint does not support datatype \"{type}\"'",
".",
"format",
"(",
"*",
"*",
"match",
")",
")",
"literals",
"=",
"[",
"'CURRENT_TIMESTAMP'",
"]",
"# not to be enclosed in quotes",
"if",
"match",
"[",
"'nullable'",
"]",
":",
"if",
"in_key",
":",
"raise",
"DataJointError",
"(",
"'Primary key attributes cannot be nullable in line %s'",
"%",
"line",
")",
"match",
"[",
"'default'",
"]",
"=",
"'DEFAULT NULL'",
"# nullable attributes default to null",
"else",
":",
"if",
"match",
"[",
"'default'",
"]",
":",
"quote",
"=",
"match",
"[",
"'default'",
"]",
".",
"upper",
"(",
")",
"not",
"in",
"literals",
"and",
"match",
"[",
"'default'",
"]",
"[",
"0",
"]",
"not",
"in",
"'\"\\''",
"match",
"[",
"'default'",
"]",
"=",
"(",
"'NOT NULL DEFAULT '",
"+",
"(",
"'\"%s\"'",
"if",
"quote",
"else",
"\"%s\"",
")",
"%",
"match",
"[",
"'default'",
"]",
")",
"else",
":",
"match",
"[",
"'default'",
"]",
"=",
"'NOT NULL'",
"match",
"[",
"'comment'",
"]",
"=",
"match",
"[",
"'comment'",
"]",
".",
"replace",
"(",
"'\"'",
",",
"'\\\\\"'",
")",
"# escape double quotes in comment",
"is_external",
"=",
"match",
"[",
"'type'",
"]",
".",
"startswith",
"(",
"'external'",
")",
"is_attachment",
"=",
"match",
"[",
"'type'",
"]",
".",
"startswith",
"(",
"'attachment'",
")",
"if",
"not",
"is_external",
":",
"sql",
"=",
"(",
"'`{name}` {type} {default}'",
"+",
"(",
"' COMMENT \"{comment}\"'",
"if",
"match",
"[",
"'comment'",
"]",
"else",
"''",
")",
")",
".",
"format",
"(",
"*",
"*",
"match",
")",
"else",
":",
"# process externally stored attribute",
"if",
"in_key",
":",
"raise",
"DataJointError",
"(",
"'External attributes cannot be primary in:\\n%s'",
"%",
"line",
")",
"store_name",
"=",
"match",
"[",
"'type'",
"]",
".",
"split",
"(",
"'-'",
")",
"if",
"store_name",
"[",
"0",
"]",
"!=",
"'external'",
":",
"raise",
"DataJointError",
"(",
"'External store types must be specified as \"external\" or \"external-<name>\"'",
")",
"store_name",
"=",
"'-'",
".",
"join",
"(",
"store_name",
"[",
"1",
":",
"]",
")",
"if",
"store_name",
"!=",
"''",
"and",
"not",
"store_name",
".",
"isidentifier",
"(",
")",
":",
"raise",
"DataJointError",
"(",
"'The external store name `{type}` is invalid. Make like a python identifier.'",
".",
"format",
"(",
"*",
"*",
"match",
")",
")",
"if",
"len",
"(",
"store_name",
")",
">",
"STORE_NAME_LENGTH",
":",
"raise",
"DataJointError",
"(",
"'The external store name `{type}` is too long. Must be <={max_len} characters.'",
".",
"format",
"(",
"max_len",
"=",
"STORE_NAME_LENGTH",
",",
"*",
"*",
"match",
")",
")",
"if",
"not",
"match",
"[",
"'default'",
"]",
"in",
"(",
"'DEFAULT NULL'",
",",
"'NOT NULL'",
")",
":",
"raise",
"DataJointError",
"(",
"'The only acceptable default value for an external field is null in:\\n%s'",
"%",
"line",
")",
"if",
"match",
"[",
"'type'",
"]",
"not",
"in",
"config",
":",
"raise",
"DataJointError",
"(",
"'The external store `{type}` is not configured.'",
".",
"format",
"(",
"*",
"*",
"match",
")",
")",
"# append external configuration name to the end of the comment",
"sql",
"=",
"'`{name}` {hash_type} {default} COMMENT \":{type}:{comment}\"'",
".",
"format",
"(",
"hash_type",
"=",
"HASH_DATA_TYPE",
",",
"*",
"*",
"match",
")",
"foreign_key_sql",
".",
"append",
"(",
"\"FOREIGN KEY (`{name}`) REFERENCES {{external_table}} (`hash`) \"",
"\"ON UPDATE RESTRICT ON DELETE RESTRICT\"",
".",
"format",
"(",
"*",
"*",
"match",
")",
")",
"return",
"match",
"[",
"'name'",
"]",
",",
"sql",
",",
"is_external"
] | Convert attribute definition from DataJoint format to SQL
:param line: attribution line
:param in_key: set to True if attribute is in primary key set
:param foreign_key_sql:
:returns: (name, sql, is_external) -- attribute name and sql code for its declaration | [
"Convert",
"attribute",
"definition",
"from",
"DataJoint",
"format",
"to",
"SQL"
] | python | train |
bloomberg/bqplot | bqplot/pyplot.py | https://github.com/bloomberg/bqplot/blob/8eb8b163abe9ee6306f6918067e2f36c1caef2ef/bqplot/pyplot.py#L871-L897 | def bin(sample, options={}, **kwargs):
"""Draw a histogram in the current context figure.
Parameters
----------
sample: numpy.ndarray, 1d
The sample for which the histogram must be generated.
options: dict (default: {})
Options for the scales to be created. If a scale labeled 'x'
is required for that mark, options['x'] contains optional keyword
arguments for the constructor of the corresponding scale type.
axes_options: dict (default: {})
Options for the axes to be created. If an axis labeled 'x' is
required for that mark, axes_options['x'] contains optional
keyword arguments for the constructor of the corresponding axis type.
"""
kwargs['sample'] = sample
scales = kwargs.pop('scales', {})
for xy in ['x', 'y']:
if xy not in scales:
dimension = _get_attribute_dimension(xy, Bars)
if dimension in _context['scales']:
scales[xy] = _context['scales'][dimension]
else:
scales[xy] = LinearScale(**options.get(xy, {}))
_context['scales'][dimension] = scales[xy]
kwargs['scales'] = scales
return _draw_mark(Bins, options=options, **kwargs) | [
"def",
"bin",
"(",
"sample",
",",
"options",
"=",
"{",
"}",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'sample'",
"]",
"=",
"sample",
"scales",
"=",
"kwargs",
".",
"pop",
"(",
"'scales'",
",",
"{",
"}",
")",
"for",
"xy",
"in",
"[",
"'x'",
",",
"'y'",
"]",
":",
"if",
"xy",
"not",
"in",
"scales",
":",
"dimension",
"=",
"_get_attribute_dimension",
"(",
"xy",
",",
"Bars",
")",
"if",
"dimension",
"in",
"_context",
"[",
"'scales'",
"]",
":",
"scales",
"[",
"xy",
"]",
"=",
"_context",
"[",
"'scales'",
"]",
"[",
"dimension",
"]",
"else",
":",
"scales",
"[",
"xy",
"]",
"=",
"LinearScale",
"(",
"*",
"*",
"options",
".",
"get",
"(",
"xy",
",",
"{",
"}",
")",
")",
"_context",
"[",
"'scales'",
"]",
"[",
"dimension",
"]",
"=",
"scales",
"[",
"xy",
"]",
"kwargs",
"[",
"'scales'",
"]",
"=",
"scales",
"return",
"_draw_mark",
"(",
"Bins",
",",
"options",
"=",
"options",
",",
"*",
"*",
"kwargs",
")"
] | Draw a histogram in the current context figure.
Parameters
----------
sample: numpy.ndarray, 1d
The sample for which the histogram must be generated.
options: dict (default: {})
Options for the scales to be created. If a scale labeled 'x'
is required for that mark, options['x'] contains optional keyword
arguments for the constructor of the corresponding scale type.
axes_options: dict (default: {})
Options for the axes to be created. If an axis labeled 'x' is
required for that mark, axes_options['x'] contains optional
keyword arguments for the constructor of the corresponding axis type. | [
"Draw",
"a",
"histogram",
"in",
"the",
"current",
"context",
"figure",
".",
"Parameters",
"----------",
"sample",
":",
"numpy",
".",
"ndarray",
"1d",
"The",
"sample",
"for",
"which",
"the",
"histogram",
"must",
"be",
"generated",
".",
"options",
":",
"dict",
"(",
"default",
":",
"{}",
")",
"Options",
"for",
"the",
"scales",
"to",
"be",
"created",
".",
"If",
"a",
"scale",
"labeled",
"x",
"is",
"required",
"for",
"that",
"mark",
"options",
"[",
"x",
"]",
"contains",
"optional",
"keyword",
"arguments",
"for",
"the",
"constructor",
"of",
"the",
"corresponding",
"scale",
"type",
".",
"axes_options",
":",
"dict",
"(",
"default",
":",
"{}",
")",
"Options",
"for",
"the",
"axes",
"to",
"be",
"created",
".",
"If",
"an",
"axis",
"labeled",
"x",
"is",
"required",
"for",
"that",
"mark",
"axes_options",
"[",
"x",
"]",
"contains",
"optional",
"keyword",
"arguments",
"for",
"the",
"constructor",
"of",
"the",
"corresponding",
"axis",
"type",
"."
] | python | train |
ska-sa/katcp-python | katcp/fake_clients.py | https://github.com/ska-sa/katcp-python/blob/9127c826a1d030c53b84d0e95743e20e5c5ea153/katcp/fake_clients.py#L16-L59 | def fake_KATCP_client_resource_factory(
KATCPClientResourceClass, fake_options, resource_spec, *args, **kwargs):
"""Create a fake KATCPClientResource-like class and a fake-manager
Parameters
----------
KATCPClientResourceClass : class
Subclass of :class:`katcp.resource_client.KATCPClientResource`
fake_options : dict
Options for the faking process. Keys:
allow_any_request : bool, default False
(TODO not implemented behaves as if it were True)
resource_spec, *args, **kwargs : passed to KATCPClientResourceClass
A subclass of the passed-in KATCPClientResourceClass is created that replaces the
internal InspecingClient instances with fakes using fake_inspecting_client_factory()
based on the InspectingClient class used by KATCPClientResourceClass.
Returns
-------
(fake_katcp_client_resource, fake_katcp_client_resource_manager):
fake_katcp_client_resource : instance of faked subclass of KATCPClientResourceClass
fake_katcp_client_resource_manager : :class:`FakeKATCPClientResourceManager` instance
Bound to the `fake_katcp_client_resource` instance.
"""
# TODO Implement allow_any_request functionality. When True, any unknown request (even
# if there is no fake implementation) should succeed
allow_any_request = fake_options.get('allow_any_request', False)
class FakeKATCPClientResource(KATCPClientResourceClass):
def inspecting_client_factory(self, host, port, ioloop_set_to):
real_instance = (super(FakeKATCPClientResource, self)
.inspecting_client_factory(host, port, ioloop_set_to) )
fic, fic_manager = fake_inspecting_client_factory(
real_instance.__class__, fake_options, host, port,
ioloop=ioloop_set_to, auto_reconnect=self.auto_reconnect)
self.fake_inspecting_client_manager = fic_manager
return fic
fkcr = FakeKATCPClientResource(resource_spec, *args, **kwargs)
fkcr_manager = FakeKATCPClientResourceManager(fkcr)
return (fkcr, fkcr_manager) | [
"def",
"fake_KATCP_client_resource_factory",
"(",
"KATCPClientResourceClass",
",",
"fake_options",
",",
"resource_spec",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# TODO Implement allow_any_request functionality. When True, any unknown request (even",
"# if there is no fake implementation) should succeed",
"allow_any_request",
"=",
"fake_options",
".",
"get",
"(",
"'allow_any_request'",
",",
"False",
")",
"class",
"FakeKATCPClientResource",
"(",
"KATCPClientResourceClass",
")",
":",
"def",
"inspecting_client_factory",
"(",
"self",
",",
"host",
",",
"port",
",",
"ioloop_set_to",
")",
":",
"real_instance",
"=",
"(",
"super",
"(",
"FakeKATCPClientResource",
",",
"self",
")",
".",
"inspecting_client_factory",
"(",
"host",
",",
"port",
",",
"ioloop_set_to",
")",
")",
"fic",
",",
"fic_manager",
"=",
"fake_inspecting_client_factory",
"(",
"real_instance",
".",
"__class__",
",",
"fake_options",
",",
"host",
",",
"port",
",",
"ioloop",
"=",
"ioloop_set_to",
",",
"auto_reconnect",
"=",
"self",
".",
"auto_reconnect",
")",
"self",
".",
"fake_inspecting_client_manager",
"=",
"fic_manager",
"return",
"fic",
"fkcr",
"=",
"FakeKATCPClientResource",
"(",
"resource_spec",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"fkcr_manager",
"=",
"FakeKATCPClientResourceManager",
"(",
"fkcr",
")",
"return",
"(",
"fkcr",
",",
"fkcr_manager",
")"
] | Create a fake KATCPClientResource-like class and a fake-manager
Parameters
----------
KATCPClientResourceClass : class
Subclass of :class:`katcp.resource_client.KATCPClientResource`
fake_options : dict
Options for the faking process. Keys:
allow_any_request : bool, default False
(TODO not implemented behaves as if it were True)
resource_spec, *args, **kwargs : passed to KATCPClientResourceClass
A subclass of the passed-in KATCPClientResourceClass is created that replaces the
internal InspecingClient instances with fakes using fake_inspecting_client_factory()
based on the InspectingClient class used by KATCPClientResourceClass.
Returns
-------
(fake_katcp_client_resource, fake_katcp_client_resource_manager):
fake_katcp_client_resource : instance of faked subclass of KATCPClientResourceClass
fake_katcp_client_resource_manager : :class:`FakeKATCPClientResourceManager` instance
Bound to the `fake_katcp_client_resource` instance. | [
"Create",
"a",
"fake",
"KATCPClientResource",
"-",
"like",
"class",
"and",
"a",
"fake",
"-",
"manager"
] | python | train |
linkedin/luminol | src/luminol/correlator.py | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/correlator.py#L72-L90 | def _get_algorithm_and_params(self, algorithm_name, algorithm_params):
"""
Get the specific algorithm and merge the algorithm params.
:param str algorithm: name of the algorithm to use.
:param dict algorithm_params: additional params for the specific algorithm.
"""
algorithm_name = algorithm_name or CORRELATOR_ALGORITHM
try:
self.algorithm = correlator_algorithms[algorithm_name]
except KeyError:
raise exceptions.AlgorithmNotFound('luminol.Correlator: ' + str(algorithm_name) + ' not found.')
# Merge parameters.
if algorithm_params:
if not isinstance(algorithm_params, dict):
raise exceptions.InvalidDataFormat('luminol.Correlator: algorithm_params passed is not a dictionary.')
else:
# self.algorithm_params = dict(algorithm_params.items() + self.algorithm_params.items())
self.algorithm_params = self.algorithm_params.copy()
self.algorithm_params.update(algorithm_params) | [
"def",
"_get_algorithm_and_params",
"(",
"self",
",",
"algorithm_name",
",",
"algorithm_params",
")",
":",
"algorithm_name",
"=",
"algorithm_name",
"or",
"CORRELATOR_ALGORITHM",
"try",
":",
"self",
".",
"algorithm",
"=",
"correlator_algorithms",
"[",
"algorithm_name",
"]",
"except",
"KeyError",
":",
"raise",
"exceptions",
".",
"AlgorithmNotFound",
"(",
"'luminol.Correlator: '",
"+",
"str",
"(",
"algorithm_name",
")",
"+",
"' not found.'",
")",
"# Merge parameters.",
"if",
"algorithm_params",
":",
"if",
"not",
"isinstance",
"(",
"algorithm_params",
",",
"dict",
")",
":",
"raise",
"exceptions",
".",
"InvalidDataFormat",
"(",
"'luminol.Correlator: algorithm_params passed is not a dictionary.'",
")",
"else",
":",
"# self.algorithm_params = dict(algorithm_params.items() + self.algorithm_params.items())",
"self",
".",
"algorithm_params",
"=",
"self",
".",
"algorithm_params",
".",
"copy",
"(",
")",
"self",
".",
"algorithm_params",
".",
"update",
"(",
"algorithm_params",
")"
] | Get the specific algorithm and merge the algorithm params.
:param str algorithm: name of the algorithm to use.
:param dict algorithm_params: additional params for the specific algorithm. | [
"Get",
"the",
"specific",
"algorithm",
"and",
"merge",
"the",
"algorithm",
"params",
".",
":",
"param",
"str",
"algorithm",
":",
"name",
"of",
"the",
"algorithm",
"to",
"use",
".",
":",
"param",
"dict",
"algorithm_params",
":",
"additional",
"params",
"for",
"the",
"specific",
"algorithm",
"."
] | python | train |
summa-tx/riemann | riemann/encoding/base58.py | https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/encoding/base58.py#L42-L55 | def decode(s, checksum=True):
"""Convert base58 to binary using BASE58_ALPHABET."""
v, prefix = to_long(
BASE58_BASE, lambda c: BASE58_LOOKUP[c], s.encode("utf8"))
data = from_long(v, prefix, 256, lambda x: x)
if checksum:
data, the_hash = data[:-4], data[-4:]
if utils.hash256(data)[:4] == the_hash:
return data
raise ValueError("hashed base58 has bad checksum %s" % s)
return data | [
"def",
"decode",
"(",
"s",
",",
"checksum",
"=",
"True",
")",
":",
"v",
",",
"prefix",
"=",
"to_long",
"(",
"BASE58_BASE",
",",
"lambda",
"c",
":",
"BASE58_LOOKUP",
"[",
"c",
"]",
",",
"s",
".",
"encode",
"(",
"\"utf8\"",
")",
")",
"data",
"=",
"from_long",
"(",
"v",
",",
"prefix",
",",
"256",
",",
"lambda",
"x",
":",
"x",
")",
"if",
"checksum",
":",
"data",
",",
"the_hash",
"=",
"data",
"[",
":",
"-",
"4",
"]",
",",
"data",
"[",
"-",
"4",
":",
"]",
"if",
"utils",
".",
"hash256",
"(",
"data",
")",
"[",
":",
"4",
"]",
"==",
"the_hash",
":",
"return",
"data",
"raise",
"ValueError",
"(",
"\"hashed base58 has bad checksum %s\"",
"%",
"s",
")",
"return",
"data"
] | Convert base58 to binary using BASE58_ALPHABET. | [
"Convert",
"base58",
"to",
"binary",
"using",
"BASE58_ALPHABET",
"."
] | python | train |
ANTsX/ANTsPy | ants/learn/decomposition.py | https://github.com/ANTsX/ANTsPy/blob/638020af2cdfc5ff4bdb9809ffe67aa505727a3b/ants/learn/decomposition.py#L19-L248 | def sparse_decom2(inmatrix,
inmask=(None, None),
sparseness=(0.01, 0.01),
nvecs=3,
its=20,
cthresh=(0,0),
statdir=None,
perms=0,
uselong=0,
z=0,
smooth=0,
robust=0,
mycoption=0,
initialization_list=[],
initialization_list2=[],
ell1=10,
prior_weight=0,
verbose=False,
rejector=0,
max_based=False,
version=1):
"""
Decomposes two matrices into paired sparse eigenevectors to
maximize canonical correlation - aka Sparse CCA.
Note: we do not scale the matrices internally. We leave
scaling choices to the user.
ANTsR function: `sparseDecom2`
Arguments
---------
inmatrix : 2-tuple of ndarrays
input as inmatrix=(mat1,mat2). n by p input matrix and n by q input matrix , spatial variable lies along columns.
inmask : 2-tuple of ANTsImage types (optional - one or both)
optional pair of image masks
sparseness : tuple
a pair of float values e.g c(0.01,0.1) enforces an unsigned 99 percent and 90 percent sparse solution for each respective view
nvecs : integer
number of eigenvector pairs
its : integer
number of iterations, 10 or 20 usually sufficient
cthresh : 2-tuple
cluster threshold pair
statdir : string (optional)
temporary directory if you want to look at full output
perms : integer
number of permutations. settings permutations greater than 0 will estimate significance per vector empirically. For small datasets, these may be conservative. p-values depend on how one scales the input matrices.
uselong : boolean
enforce solutions of both views to be the same - requires matrices to be the same size
z : float
subject space (low-dimensional space) sparseness value
smooth : float
smooth the data (only available when mask is used)
robust : boolean
rank transform input matrices
mycoption : integer
enforce 1 - spatial orthogonality, 2 - low-dimensional orthogonality or 0 - both
initialization_list : list
initialization for first view
initialization_list2 : list
initialization for 2nd view
ell1 : float
gradient descent parameter, if negative then l0 otherwise use l1
prior_weight : scalar
Scalar value weight on prior between 0 (prior is weak) and 1 (prior is strong). Only engaged if initialization is used
verbose : boolean
activates verbose output to screen
rejector : scalar
rejects small correlation solutions
max_based : boolean
whether to choose max-based thresholding
Returns
-------
dict w/ following key/value pairs:
`projections` : ndarray
X projections
`projections2` : ndarray
Y projections
`eig1` : ndarray
X components
`eig2` : ndarray
Y components
`summary` : pd.DataFrame
first column is canonical correlations,
second column is p-values (these are `None` if perms > 0)
Example
-------
>>> import numpy as np
>>> import ants
>>> mat = np.random.randn(20, 100)
>>> mat2 = np.random.randn(20, 90)
>>> mydecom = ants.sparse_decom2(inmatrix = (mat,mat2),
sparseness=(0.1,0.3), nvecs=3,
its=3, perms=0)
"""
if inmatrix[0].shape[0] != inmatrix[1].shape[0]:
raise ValueError('Matrices must have same number of rows (samples)')
idim = 3
if isinstance(inmask[0], iio.ANTsImage):
maskx = inmask[0].clone('float')
idim = inmask[0].dimension
hasmaskx = 1
elif isinstance(inmask[0], np.ndarray):
maskx = core.from_numpy(inmask[0], pixeltype='float')
idim = inmask[0].ndim
hasmaskx = 1
else:
maskx = core.make_image([1]*idim, pixeltype='float')
hasmaskx = -1
if isinstance(inmask[1], iio.ANTsImage):
masky = inmask[1].clone('float')
idim = inmask[1].dimension
hasmasky = 1
elif isinstance(inmask[1], np.ndarray):
masky = core.from_numpy(inmask[1], pixeltype='float')
idim = inmask[1].ndim
hasmasky = 1
else:
masky = core.make_image([1]*idim, pixeltype='float')
hasmasky = -1
inmask = [maskx, masky]
if robust > 0:
raise NotImplementedError('robust > 0 not currently implemented')
else:
input_matrices = inmatrix
if idim == 2:
if version == 1:
sccancpp_fn = utils.get_lib_fn('sccanCpp2D')
elif version == 2:
sccancpp_fn = utils.get_lib_fn('sccanCpp2DV2')
input_matrices = (input_matrices[0].tolist(), input_matrices[1].tolist())
elif idim ==3:
if version == 1:
sccancpp_fn = utils.get_lib_fn('sccanCpp3D')
elif version == 2:
sccancpp_fn = utils.get_lib_fn('sccanCpp3DV2')
input_matrices = (input_matrices[0].tolist(), input_matrices[1].tolist())
outval = sccancpp_fn(input_matrices[0], input_matrices[1],
inmask[0].pointer, inmask[1].pointer,
hasmaskx, hasmasky,
sparseness[0], sparseness[1],
nvecs, its,
cthresh[0], cthresh[1],
z, smooth,
initialization_list, initialization_list2,
ell1, verbose,
prior_weight, mycoption, max_based)
p1 = np.dot(input_matrices[0], outval['eig1'].T)
p2 = np.dot(input_matrices[1], outval['eig2'].T)
outcorrs = np.array([pearsonr(p1[:,i],p2[:,i])[0] for i in range(p1.shape[1])])
if prior_weight < 1e-10:
myord = np.argsort(np.abs(outcorrs))[::-1]
outcorrs = outcorrs[myord]
p1 = p1[:, myord]
p2 = p2[:, myord]
outval['eig1'] = outval['eig1'][myord,:]
outval['eig2'] = outval['eig2'][myord,:]
cca_summary = np.vstack((outcorrs,[None]*len(outcorrs))).T
if perms > 0:
cca_summary[:,1] = 0
nsubs = input_matrices[0].shape[0]
for permer in range(perms):
m1 = input_matrices[0][np.random.permutation(nsubs),:]
m2 = input_matrices[1][np.random.permutation(nsubs),:]
outvalperm = sccancpp_fn(m1, m2,
inmask[0].pointer, inmask[1].pointer,
hasmaskx, hasmasky,
sparseness[0], sparseness[1],
nvecs, its,
cthresh[0], cthresh[1],
z, smooth,
initialization_list, initialization_list2,
ell1, verbose,
prior_weight, mycoption, max_based)
p1perm = np.dot(m1, outvalperm['eig1'].T)
p2perm = np.dot(m2, outvalperm['eig2'].T)
outcorrsperm = np.array([pearsonr(p1perm[:,i],p2perm[:,i])[0] for i in range(p1perm.shape[1])])
if prior_weight < 1e-10:
myord = np.argsort(np.abs(outcorrsperm))[::-1]
outcorrsperm = outcorrsperm[myord]
counter = np.abs(cca_summary[:,0]) < np.abs(outcorrsperm)
counter = counter.astype('int')
cca_summary[:,1] = cca_summary[:,1] + counter
cca_summary[:,1] = cca_summary[:,1] / float(perms)
return {'projections': p1,
'projections2': p2,
'eig1': outval['eig1'].T,
'eig2': outval['eig2'].T,
'summary': pd.DataFrame(cca_summary,columns=['corrs','pvalues'])} | [
"def",
"sparse_decom2",
"(",
"inmatrix",
",",
"inmask",
"=",
"(",
"None",
",",
"None",
")",
",",
"sparseness",
"=",
"(",
"0.01",
",",
"0.01",
")",
",",
"nvecs",
"=",
"3",
",",
"its",
"=",
"20",
",",
"cthresh",
"=",
"(",
"0",
",",
"0",
")",
",",
"statdir",
"=",
"None",
",",
"perms",
"=",
"0",
",",
"uselong",
"=",
"0",
",",
"z",
"=",
"0",
",",
"smooth",
"=",
"0",
",",
"robust",
"=",
"0",
",",
"mycoption",
"=",
"0",
",",
"initialization_list",
"=",
"[",
"]",
",",
"initialization_list2",
"=",
"[",
"]",
",",
"ell1",
"=",
"10",
",",
"prior_weight",
"=",
"0",
",",
"verbose",
"=",
"False",
",",
"rejector",
"=",
"0",
",",
"max_based",
"=",
"False",
",",
"version",
"=",
"1",
")",
":",
"if",
"inmatrix",
"[",
"0",
"]",
".",
"shape",
"[",
"0",
"]",
"!=",
"inmatrix",
"[",
"1",
"]",
".",
"shape",
"[",
"0",
"]",
":",
"raise",
"ValueError",
"(",
"'Matrices must have same number of rows (samples)'",
")",
"idim",
"=",
"3",
"if",
"isinstance",
"(",
"inmask",
"[",
"0",
"]",
",",
"iio",
".",
"ANTsImage",
")",
":",
"maskx",
"=",
"inmask",
"[",
"0",
"]",
".",
"clone",
"(",
"'float'",
")",
"idim",
"=",
"inmask",
"[",
"0",
"]",
".",
"dimension",
"hasmaskx",
"=",
"1",
"elif",
"isinstance",
"(",
"inmask",
"[",
"0",
"]",
",",
"np",
".",
"ndarray",
")",
":",
"maskx",
"=",
"core",
".",
"from_numpy",
"(",
"inmask",
"[",
"0",
"]",
",",
"pixeltype",
"=",
"'float'",
")",
"idim",
"=",
"inmask",
"[",
"0",
"]",
".",
"ndim",
"hasmaskx",
"=",
"1",
"else",
":",
"maskx",
"=",
"core",
".",
"make_image",
"(",
"[",
"1",
"]",
"*",
"idim",
",",
"pixeltype",
"=",
"'float'",
")",
"hasmaskx",
"=",
"-",
"1",
"if",
"isinstance",
"(",
"inmask",
"[",
"1",
"]",
",",
"iio",
".",
"ANTsImage",
")",
":",
"masky",
"=",
"inmask",
"[",
"1",
"]",
".",
"clone",
"(",
"'float'",
")",
"idim",
"=",
"inmask",
"[",
"1",
"]",
".",
"dimension",
"hasmasky",
"=",
"1",
"elif",
"isinstance",
"(",
"inmask",
"[",
"1",
"]",
",",
"np",
".",
"ndarray",
")",
":",
"masky",
"=",
"core",
".",
"from_numpy",
"(",
"inmask",
"[",
"1",
"]",
",",
"pixeltype",
"=",
"'float'",
")",
"idim",
"=",
"inmask",
"[",
"1",
"]",
".",
"ndim",
"hasmasky",
"=",
"1",
"else",
":",
"masky",
"=",
"core",
".",
"make_image",
"(",
"[",
"1",
"]",
"*",
"idim",
",",
"pixeltype",
"=",
"'float'",
")",
"hasmasky",
"=",
"-",
"1",
"inmask",
"=",
"[",
"maskx",
",",
"masky",
"]",
"if",
"robust",
">",
"0",
":",
"raise",
"NotImplementedError",
"(",
"'robust > 0 not currently implemented'",
")",
"else",
":",
"input_matrices",
"=",
"inmatrix",
"if",
"idim",
"==",
"2",
":",
"if",
"version",
"==",
"1",
":",
"sccancpp_fn",
"=",
"utils",
".",
"get_lib_fn",
"(",
"'sccanCpp2D'",
")",
"elif",
"version",
"==",
"2",
":",
"sccancpp_fn",
"=",
"utils",
".",
"get_lib_fn",
"(",
"'sccanCpp2DV2'",
")",
"input_matrices",
"=",
"(",
"input_matrices",
"[",
"0",
"]",
".",
"tolist",
"(",
")",
",",
"input_matrices",
"[",
"1",
"]",
".",
"tolist",
"(",
")",
")",
"elif",
"idim",
"==",
"3",
":",
"if",
"version",
"==",
"1",
":",
"sccancpp_fn",
"=",
"utils",
".",
"get_lib_fn",
"(",
"'sccanCpp3D'",
")",
"elif",
"version",
"==",
"2",
":",
"sccancpp_fn",
"=",
"utils",
".",
"get_lib_fn",
"(",
"'sccanCpp3DV2'",
")",
"input_matrices",
"=",
"(",
"input_matrices",
"[",
"0",
"]",
".",
"tolist",
"(",
")",
",",
"input_matrices",
"[",
"1",
"]",
".",
"tolist",
"(",
")",
")",
"outval",
"=",
"sccancpp_fn",
"(",
"input_matrices",
"[",
"0",
"]",
",",
"input_matrices",
"[",
"1",
"]",
",",
"inmask",
"[",
"0",
"]",
".",
"pointer",
",",
"inmask",
"[",
"1",
"]",
".",
"pointer",
",",
"hasmaskx",
",",
"hasmasky",
",",
"sparseness",
"[",
"0",
"]",
",",
"sparseness",
"[",
"1",
"]",
",",
"nvecs",
",",
"its",
",",
"cthresh",
"[",
"0",
"]",
",",
"cthresh",
"[",
"1",
"]",
",",
"z",
",",
"smooth",
",",
"initialization_list",
",",
"initialization_list2",
",",
"ell1",
",",
"verbose",
",",
"prior_weight",
",",
"mycoption",
",",
"max_based",
")",
"p1",
"=",
"np",
".",
"dot",
"(",
"input_matrices",
"[",
"0",
"]",
",",
"outval",
"[",
"'eig1'",
"]",
".",
"T",
")",
"p2",
"=",
"np",
".",
"dot",
"(",
"input_matrices",
"[",
"1",
"]",
",",
"outval",
"[",
"'eig2'",
"]",
".",
"T",
")",
"outcorrs",
"=",
"np",
".",
"array",
"(",
"[",
"pearsonr",
"(",
"p1",
"[",
":",
",",
"i",
"]",
",",
"p2",
"[",
":",
",",
"i",
"]",
")",
"[",
"0",
"]",
"for",
"i",
"in",
"range",
"(",
"p1",
".",
"shape",
"[",
"1",
"]",
")",
"]",
")",
"if",
"prior_weight",
"<",
"1e-10",
":",
"myord",
"=",
"np",
".",
"argsort",
"(",
"np",
".",
"abs",
"(",
"outcorrs",
")",
")",
"[",
":",
":",
"-",
"1",
"]",
"outcorrs",
"=",
"outcorrs",
"[",
"myord",
"]",
"p1",
"=",
"p1",
"[",
":",
",",
"myord",
"]",
"p2",
"=",
"p2",
"[",
":",
",",
"myord",
"]",
"outval",
"[",
"'eig1'",
"]",
"=",
"outval",
"[",
"'eig1'",
"]",
"[",
"myord",
",",
":",
"]",
"outval",
"[",
"'eig2'",
"]",
"=",
"outval",
"[",
"'eig2'",
"]",
"[",
"myord",
",",
":",
"]",
"cca_summary",
"=",
"np",
".",
"vstack",
"(",
"(",
"outcorrs",
",",
"[",
"None",
"]",
"*",
"len",
"(",
"outcorrs",
")",
")",
")",
".",
"T",
"if",
"perms",
">",
"0",
":",
"cca_summary",
"[",
":",
",",
"1",
"]",
"=",
"0",
"nsubs",
"=",
"input_matrices",
"[",
"0",
"]",
".",
"shape",
"[",
"0",
"]",
"for",
"permer",
"in",
"range",
"(",
"perms",
")",
":",
"m1",
"=",
"input_matrices",
"[",
"0",
"]",
"[",
"np",
".",
"random",
".",
"permutation",
"(",
"nsubs",
")",
",",
":",
"]",
"m2",
"=",
"input_matrices",
"[",
"1",
"]",
"[",
"np",
".",
"random",
".",
"permutation",
"(",
"nsubs",
")",
",",
":",
"]",
"outvalperm",
"=",
"sccancpp_fn",
"(",
"m1",
",",
"m2",
",",
"inmask",
"[",
"0",
"]",
".",
"pointer",
",",
"inmask",
"[",
"1",
"]",
".",
"pointer",
",",
"hasmaskx",
",",
"hasmasky",
",",
"sparseness",
"[",
"0",
"]",
",",
"sparseness",
"[",
"1",
"]",
",",
"nvecs",
",",
"its",
",",
"cthresh",
"[",
"0",
"]",
",",
"cthresh",
"[",
"1",
"]",
",",
"z",
",",
"smooth",
",",
"initialization_list",
",",
"initialization_list2",
",",
"ell1",
",",
"verbose",
",",
"prior_weight",
",",
"mycoption",
",",
"max_based",
")",
"p1perm",
"=",
"np",
".",
"dot",
"(",
"m1",
",",
"outvalperm",
"[",
"'eig1'",
"]",
".",
"T",
")",
"p2perm",
"=",
"np",
".",
"dot",
"(",
"m2",
",",
"outvalperm",
"[",
"'eig2'",
"]",
".",
"T",
")",
"outcorrsperm",
"=",
"np",
".",
"array",
"(",
"[",
"pearsonr",
"(",
"p1perm",
"[",
":",
",",
"i",
"]",
",",
"p2perm",
"[",
":",
",",
"i",
"]",
")",
"[",
"0",
"]",
"for",
"i",
"in",
"range",
"(",
"p1perm",
".",
"shape",
"[",
"1",
"]",
")",
"]",
")",
"if",
"prior_weight",
"<",
"1e-10",
":",
"myord",
"=",
"np",
".",
"argsort",
"(",
"np",
".",
"abs",
"(",
"outcorrsperm",
")",
")",
"[",
":",
":",
"-",
"1",
"]",
"outcorrsperm",
"=",
"outcorrsperm",
"[",
"myord",
"]",
"counter",
"=",
"np",
".",
"abs",
"(",
"cca_summary",
"[",
":",
",",
"0",
"]",
")",
"<",
"np",
".",
"abs",
"(",
"outcorrsperm",
")",
"counter",
"=",
"counter",
".",
"astype",
"(",
"'int'",
")",
"cca_summary",
"[",
":",
",",
"1",
"]",
"=",
"cca_summary",
"[",
":",
",",
"1",
"]",
"+",
"counter",
"cca_summary",
"[",
":",
",",
"1",
"]",
"=",
"cca_summary",
"[",
":",
",",
"1",
"]",
"/",
"float",
"(",
"perms",
")",
"return",
"{",
"'projections'",
":",
"p1",
",",
"'projections2'",
":",
"p2",
",",
"'eig1'",
":",
"outval",
"[",
"'eig1'",
"]",
".",
"T",
",",
"'eig2'",
":",
"outval",
"[",
"'eig2'",
"]",
".",
"T",
",",
"'summary'",
":",
"pd",
".",
"DataFrame",
"(",
"cca_summary",
",",
"columns",
"=",
"[",
"'corrs'",
",",
"'pvalues'",
"]",
")",
"}"
] | Decomposes two matrices into paired sparse eigenevectors to
maximize canonical correlation - aka Sparse CCA.
Note: we do not scale the matrices internally. We leave
scaling choices to the user.
ANTsR function: `sparseDecom2`
Arguments
---------
inmatrix : 2-tuple of ndarrays
input as inmatrix=(mat1,mat2). n by p input matrix and n by q input matrix , spatial variable lies along columns.
inmask : 2-tuple of ANTsImage types (optional - one or both)
optional pair of image masks
sparseness : tuple
a pair of float values e.g c(0.01,0.1) enforces an unsigned 99 percent and 90 percent sparse solution for each respective view
nvecs : integer
number of eigenvector pairs
its : integer
number of iterations, 10 or 20 usually sufficient
cthresh : 2-tuple
cluster threshold pair
statdir : string (optional)
temporary directory if you want to look at full output
perms : integer
number of permutations. settings permutations greater than 0 will estimate significance per vector empirically. For small datasets, these may be conservative. p-values depend on how one scales the input matrices.
uselong : boolean
enforce solutions of both views to be the same - requires matrices to be the same size
z : float
subject space (low-dimensional space) sparseness value
smooth : float
smooth the data (only available when mask is used)
robust : boolean
rank transform input matrices
mycoption : integer
enforce 1 - spatial orthogonality, 2 - low-dimensional orthogonality or 0 - both
initialization_list : list
initialization for first view
initialization_list2 : list
initialization for 2nd view
ell1 : float
gradient descent parameter, if negative then l0 otherwise use l1
prior_weight : scalar
Scalar value weight on prior between 0 (prior is weak) and 1 (prior is strong). Only engaged if initialization is used
verbose : boolean
activates verbose output to screen
rejector : scalar
rejects small correlation solutions
max_based : boolean
whether to choose max-based thresholding
Returns
-------
dict w/ following key/value pairs:
`projections` : ndarray
X projections
`projections2` : ndarray
Y projections
`eig1` : ndarray
X components
`eig2` : ndarray
Y components
`summary` : pd.DataFrame
first column is canonical correlations,
second column is p-values (these are `None` if perms > 0)
Example
-------
>>> import numpy as np
>>> import ants
>>> mat = np.random.randn(20, 100)
>>> mat2 = np.random.randn(20, 90)
>>> mydecom = ants.sparse_decom2(inmatrix = (mat,mat2),
sparseness=(0.1,0.3), nvecs=3,
its=3, perms=0) | [
"Decomposes",
"two",
"matrices",
"into",
"paired",
"sparse",
"eigenevectors",
"to",
"maximize",
"canonical",
"correlation",
"-",
"aka",
"Sparse",
"CCA",
".",
"Note",
":",
"we",
"do",
"not",
"scale",
"the",
"matrices",
"internally",
".",
"We",
"leave",
"scaling",
"choices",
"to",
"the",
"user",
"."
] | python | train |
adrn/gala | gala/potential/frame/builtin/transformations.py | https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/potential/frame/builtin/transformations.py#L10-L29 | def rodrigues_axis_angle_rotate(x, vec, theta):
"""
Rotated the input vector or set of vectors `x` around the axis
`vec` by the angle `theta`.
Parameters
----------
x : array_like
The vector or array of vectors to transform. Must have shape
"""
x = np.array(x).T
vec = np.array(vec).T
theta = np.array(theta).T[...,None]
out = np.cos(theta)*x + np.sin(theta)*np.cross(vec, x) + \
(1 - np.cos(theta)) * (vec * x).sum(axis=-1)[...,None] * vec
return out.T | [
"def",
"rodrigues_axis_angle_rotate",
"(",
"x",
",",
"vec",
",",
"theta",
")",
":",
"x",
"=",
"np",
".",
"array",
"(",
"x",
")",
".",
"T",
"vec",
"=",
"np",
".",
"array",
"(",
"vec",
")",
".",
"T",
"theta",
"=",
"np",
".",
"array",
"(",
"theta",
")",
".",
"T",
"[",
"...",
",",
"None",
"]",
"out",
"=",
"np",
".",
"cos",
"(",
"theta",
")",
"*",
"x",
"+",
"np",
".",
"sin",
"(",
"theta",
")",
"*",
"np",
".",
"cross",
"(",
"vec",
",",
"x",
")",
"+",
"(",
"1",
"-",
"np",
".",
"cos",
"(",
"theta",
")",
")",
"*",
"(",
"vec",
"*",
"x",
")",
".",
"sum",
"(",
"axis",
"=",
"-",
"1",
")",
"[",
"...",
",",
"None",
"]",
"*",
"vec",
"return",
"out",
".",
"T"
] | Rotated the input vector or set of vectors `x` around the axis
`vec` by the angle `theta`.
Parameters
----------
x : array_like
The vector or array of vectors to transform. Must have shape | [
"Rotated",
"the",
"input",
"vector",
"or",
"set",
"of",
"vectors",
"x",
"around",
"the",
"axis",
"vec",
"by",
"the",
"angle",
"theta",
"."
] | python | train |
fboender/ansible-cmdb | lib/mako/_ast_util.py | https://github.com/fboender/ansible-cmdb/blob/ebd960ac10684e8c9ec2b12751bba2c4c9504ab7/lib/mako/_ast_util.py#L107-L122 | def dump(node):
"""
A very verbose representation of the node passed. This is useful for
debugging purposes.
"""
def _format(node):
if isinstance(node, AST):
return '%s(%s)' % (node.__class__.__name__,
', '.join('%s=%s' % (a, _format(b))
for a, b in iter_fields(node)))
elif isinstance(node, list):
return '[%s]' % ', '.join(_format(x) for x in node)
return repr(node)
if not isinstance(node, AST):
raise TypeError('expected AST, got %r' % node.__class__.__name__)
return _format(node) | [
"def",
"dump",
"(",
"node",
")",
":",
"def",
"_format",
"(",
"node",
")",
":",
"if",
"isinstance",
"(",
"node",
",",
"AST",
")",
":",
"return",
"'%s(%s)'",
"%",
"(",
"node",
".",
"__class__",
".",
"__name__",
",",
"', '",
".",
"join",
"(",
"'%s=%s'",
"%",
"(",
"a",
",",
"_format",
"(",
"b",
")",
")",
"for",
"a",
",",
"b",
"in",
"iter_fields",
"(",
"node",
")",
")",
")",
"elif",
"isinstance",
"(",
"node",
",",
"list",
")",
":",
"return",
"'[%s]'",
"%",
"', '",
".",
"join",
"(",
"_format",
"(",
"x",
")",
"for",
"x",
"in",
"node",
")",
"return",
"repr",
"(",
"node",
")",
"if",
"not",
"isinstance",
"(",
"node",
",",
"AST",
")",
":",
"raise",
"TypeError",
"(",
"'expected AST, got %r'",
"%",
"node",
".",
"__class__",
".",
"__name__",
")",
"return",
"_format",
"(",
"node",
")"
] | A very verbose representation of the node passed. This is useful for
debugging purposes. | [
"A",
"very",
"verbose",
"representation",
"of",
"the",
"node",
"passed",
".",
"This",
"is",
"useful",
"for",
"debugging",
"purposes",
"."
] | python | train |
scanny/python-pptx | pptx/chart/axis.py | https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/chart/axis.py#L164-L172 | def minor_tick_mark(self):
"""
Read/write :ref:`XlTickMark` value specifying the type of minor tick
mark for this axis.
"""
minorTickMark = self._element.minorTickMark
if minorTickMark is None:
return XL_TICK_MARK.CROSS
return minorTickMark.val | [
"def",
"minor_tick_mark",
"(",
"self",
")",
":",
"minorTickMark",
"=",
"self",
".",
"_element",
".",
"minorTickMark",
"if",
"minorTickMark",
"is",
"None",
":",
"return",
"XL_TICK_MARK",
".",
"CROSS",
"return",
"minorTickMark",
".",
"val"
] | Read/write :ref:`XlTickMark` value specifying the type of minor tick
mark for this axis. | [
"Read",
"/",
"write",
":",
"ref",
":",
"XlTickMark",
"value",
"specifying",
"the",
"type",
"of",
"minor",
"tick",
"mark",
"for",
"this",
"axis",
"."
] | python | train |
EwilDawe/typy | typy/mouse.py | https://github.com/EwilDawe/typy/blob/0349e7176567a4dbef318e75d9b3d6868950a1a9/typy/mouse.py#L43-L69 | def move_arc(x, y, r, speed = 1, orientation = True):
# WARNING: This function currently contains inaccuracy likely due to the rounding of trigonometric functions
"""
Moves the cursor in an arc of radius r to (x, y) at a certain speed
:param x: target x-ordinate
:param y: target y-ordinate
:param r: radius
:param speed: pixel traversal rate
:param orientation: direction of arc
:return: None
"""
_x, _y = win32api.GetCursorPos()
c_len = (r**2 - (((x - _x)/2)**2 + ((y - _y)/2)**2))**0.5
t = (c_len**2/((y - _y)**2 + (x - _x)**2))**0.5
t = t if orientation else -t
centre = ((_x + x)/2 + t*(_x - x), (_y + y)/2 + t*(y - _y))
if any(isinstance(ordinate, complex) for ordinate in centre):
raise ValueError("Radius too low - minimum: {}".format(((x - _x)**2 + (y - _y)**2)**0.5/2))
theta = math.atan2(_y - centre[1], _x - centre[0])
end = math.atan2(y - centre[1], x - centre[0])
while theta < end:
move(*list(map(round, (centre[0] + r*math.cos(theta), centre[1] + r*math.sin(theta)))))
theta += speed/100
time.sleep(0.01)
move(x, y) | [
"def",
"move_arc",
"(",
"x",
",",
"y",
",",
"r",
",",
"speed",
"=",
"1",
",",
"orientation",
"=",
"True",
")",
":",
"# WARNING: This function currently contains inaccuracy likely due to the rounding of trigonometric functions",
"_x",
",",
"_y",
"=",
"win32api",
".",
"GetCursorPos",
"(",
")",
"c_len",
"=",
"(",
"r",
"**",
"2",
"-",
"(",
"(",
"(",
"x",
"-",
"_x",
")",
"/",
"2",
")",
"**",
"2",
"+",
"(",
"(",
"y",
"-",
"_y",
")",
"/",
"2",
")",
"**",
"2",
")",
")",
"**",
"0.5",
"t",
"=",
"(",
"c_len",
"**",
"2",
"/",
"(",
"(",
"y",
"-",
"_y",
")",
"**",
"2",
"+",
"(",
"x",
"-",
"_x",
")",
"**",
"2",
")",
")",
"**",
"0.5",
"t",
"=",
"t",
"if",
"orientation",
"else",
"-",
"t",
"centre",
"=",
"(",
"(",
"_x",
"+",
"x",
")",
"/",
"2",
"+",
"t",
"*",
"(",
"_x",
"-",
"x",
")",
",",
"(",
"_y",
"+",
"y",
")",
"/",
"2",
"+",
"t",
"*",
"(",
"y",
"-",
"_y",
")",
")",
"if",
"any",
"(",
"isinstance",
"(",
"ordinate",
",",
"complex",
")",
"for",
"ordinate",
"in",
"centre",
")",
":",
"raise",
"ValueError",
"(",
"\"Radius too low - minimum: {}\"",
".",
"format",
"(",
"(",
"(",
"x",
"-",
"_x",
")",
"**",
"2",
"+",
"(",
"y",
"-",
"_y",
")",
"**",
"2",
")",
"**",
"0.5",
"/",
"2",
")",
")",
"theta",
"=",
"math",
".",
"atan2",
"(",
"_y",
"-",
"centre",
"[",
"1",
"]",
",",
"_x",
"-",
"centre",
"[",
"0",
"]",
")",
"end",
"=",
"math",
".",
"atan2",
"(",
"y",
"-",
"centre",
"[",
"1",
"]",
",",
"x",
"-",
"centre",
"[",
"0",
"]",
")",
"while",
"theta",
"<",
"end",
":",
"move",
"(",
"*",
"list",
"(",
"map",
"(",
"round",
",",
"(",
"centre",
"[",
"0",
"]",
"+",
"r",
"*",
"math",
".",
"cos",
"(",
"theta",
")",
",",
"centre",
"[",
"1",
"]",
"+",
"r",
"*",
"math",
".",
"sin",
"(",
"theta",
")",
")",
")",
")",
")",
"theta",
"+=",
"speed",
"/",
"100",
"time",
".",
"sleep",
"(",
"0.01",
")",
"move",
"(",
"x",
",",
"y",
")"
] | Moves the cursor in an arc of radius r to (x, y) at a certain speed
:param x: target x-ordinate
:param y: target y-ordinate
:param r: radius
:param speed: pixel traversal rate
:param orientation: direction of arc
:return: None | [
"Moves",
"the",
"cursor",
"in",
"an",
"arc",
"of",
"radius",
"r",
"to",
"(",
"x",
"y",
")",
"at",
"a",
"certain",
"speed"
] | python | train |
mblayman/httpony | httpony/application.py | https://github.com/mblayman/httpony/blob/5af404d647a8dac8a043b64ea09882589b3b5247/httpony/application.py#L15-L55 | def make_app():
"""Make a WSGI app that has all the HTTPie pieces baked in."""
env = Environment()
# STDIN is ignored because HTTPony runs a server that doesn't care.
# Additionally, it is needed or else pytest blows up.
args = parser.parse_args(args=['/', '--ignore-stdin'], env=env)
args.output_options = 'HB' # Output only requests.
server = 'HTTPony/{0}'.format(__version__)
def application(environ, start_response):
# The WSGI server puts content length and type in the environment
# even when not provided with the request. Drop them if they are empty.
if environ.get('CONTENT_LENGTH') == '':
del environ['CONTENT_LENGTH']
if environ.get('CONTENT_TYPE') == '':
del environ['CONTENT_TYPE']
wrequest = WerkzeugRequest(environ)
data = wrequest.get_data()
request = Request(
method=wrequest.method,
url=wrequest.url,
headers=wrequest.headers,
data=data,
)
prepared = request.prepare()
stream = streams.build_output_stream(
args, env, prepared, response=None,
output_options=args.output_options)
streams.write_stream(stream, env.stdout, env.stdout_isatty)
# When there is data in the request, give the next one breathing room.
if data:
print("\n", file=env.stdout)
# Make dreams come true.
response = Response(headers={'Server': server})
return response(environ, start_response)
return application | [
"def",
"make_app",
"(",
")",
":",
"env",
"=",
"Environment",
"(",
")",
"# STDIN is ignored because HTTPony runs a server that doesn't care.",
"# Additionally, it is needed or else pytest blows up.",
"args",
"=",
"parser",
".",
"parse_args",
"(",
"args",
"=",
"[",
"'/'",
",",
"'--ignore-stdin'",
"]",
",",
"env",
"=",
"env",
")",
"args",
".",
"output_options",
"=",
"'HB'",
"# Output only requests.",
"server",
"=",
"'HTTPony/{0}'",
".",
"format",
"(",
"__version__",
")",
"def",
"application",
"(",
"environ",
",",
"start_response",
")",
":",
"# The WSGI server puts content length and type in the environment",
"# even when not provided with the request. Drop them if they are empty.",
"if",
"environ",
".",
"get",
"(",
"'CONTENT_LENGTH'",
")",
"==",
"''",
":",
"del",
"environ",
"[",
"'CONTENT_LENGTH'",
"]",
"if",
"environ",
".",
"get",
"(",
"'CONTENT_TYPE'",
")",
"==",
"''",
":",
"del",
"environ",
"[",
"'CONTENT_TYPE'",
"]",
"wrequest",
"=",
"WerkzeugRequest",
"(",
"environ",
")",
"data",
"=",
"wrequest",
".",
"get_data",
"(",
")",
"request",
"=",
"Request",
"(",
"method",
"=",
"wrequest",
".",
"method",
",",
"url",
"=",
"wrequest",
".",
"url",
",",
"headers",
"=",
"wrequest",
".",
"headers",
",",
"data",
"=",
"data",
",",
")",
"prepared",
"=",
"request",
".",
"prepare",
"(",
")",
"stream",
"=",
"streams",
".",
"build_output_stream",
"(",
"args",
",",
"env",
",",
"prepared",
",",
"response",
"=",
"None",
",",
"output_options",
"=",
"args",
".",
"output_options",
")",
"streams",
".",
"write_stream",
"(",
"stream",
",",
"env",
".",
"stdout",
",",
"env",
".",
"stdout_isatty",
")",
"# When there is data in the request, give the next one breathing room.",
"if",
"data",
":",
"print",
"(",
"\"\\n\"",
",",
"file",
"=",
"env",
".",
"stdout",
")",
"# Make dreams come true.",
"response",
"=",
"Response",
"(",
"headers",
"=",
"{",
"'Server'",
":",
"server",
"}",
")",
"return",
"response",
"(",
"environ",
",",
"start_response",
")",
"return",
"application"
] | Make a WSGI app that has all the HTTPie pieces baked in. | [
"Make",
"a",
"WSGI",
"app",
"that",
"has",
"all",
"the",
"HTTPie",
"pieces",
"baked",
"in",
"."
] | python | test |
merll/docker-fabric | dockerfabric/apiclient.py | https://github.com/merll/docker-fabric/blob/785d84e40e17265b667d8b11a6e30d8e6b2bf8d4/dockerfabric/apiclient.py#L147-L156 | def build(self, tag, **kwargs):
"""
Identical to :meth:`dockermap.client.base.DockerClientWrapper.build` with additional logging.
"""
self.push_log("Building image '{0}'.".format(tag))
set_raise_on_error(kwargs)
try:
return super(DockerFabricClient, self).build(tag, **kwargs)
except DockerStatusError as e:
error(e.message) | [
"def",
"build",
"(",
"self",
",",
"tag",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"push_log",
"(",
"\"Building image '{0}'.\"",
".",
"format",
"(",
"tag",
")",
")",
"set_raise_on_error",
"(",
"kwargs",
")",
"try",
":",
"return",
"super",
"(",
"DockerFabricClient",
",",
"self",
")",
".",
"build",
"(",
"tag",
",",
"*",
"*",
"kwargs",
")",
"except",
"DockerStatusError",
"as",
"e",
":",
"error",
"(",
"e",
".",
"message",
")"
] | Identical to :meth:`dockermap.client.base.DockerClientWrapper.build` with additional logging. | [
"Identical",
"to",
":",
"meth",
":",
"dockermap",
".",
"client",
".",
"base",
".",
"DockerClientWrapper",
".",
"build",
"with",
"additional",
"logging",
"."
] | python | train |
plivo/plivohelper-python | plivohelper.py | https://github.com/plivo/plivohelper-python/blob/a2f706d69e2138fbb973f792041341f662072d26/plivohelper.py#L244-L249 | def schedule_play(self, call_params):
"""REST Schedule playing something on a call Helper
"""
path = '/' + self.api_version + '/SchedulePlay/'
method = 'POST'
return self.request(path, method, call_params) | [
"def",
"schedule_play",
"(",
"self",
",",
"call_params",
")",
":",
"path",
"=",
"'/'",
"+",
"self",
".",
"api_version",
"+",
"'/SchedulePlay/'",
"method",
"=",
"'POST'",
"return",
"self",
".",
"request",
"(",
"path",
",",
"method",
",",
"call_params",
")"
] | REST Schedule playing something on a call Helper | [
"REST",
"Schedule",
"playing",
"something",
"on",
"a",
"call",
"Helper"
] | python | valid |
gem/oq-engine | openquake/hazardlib/gsim/boore_1997.py | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/boore_1997.py#L121-L141 | def _compute_style_of_faulting_term(self, rup, C):
"""
Computes the coefficient to scale for reverse or strike-slip events
Fault type (Strike-slip, Normal, Thrust/reverse) is
derived from rake angle.
Rakes angles within 30 of horizontal are strike-slip,
angles from 30 to 150 are reverse, and angles from
-30 to -150 are normal. See paragraph 'Predictor Variables'
pag 103.
Note that 'Unspecified' case is used to refer to all other rake
angles.
"""
if np.abs(rup.rake) <= 30.0 or (180.0 - np.abs(rup.rake)) <= 30.0:
# strike-slip
return C['B1ss']
elif rup.rake > 30.0 and rup.rake < 150.0:
# reverse
return C['B1rv']
else:
# unspecified (also includes Normal faulting!)
return C['B1all'] | [
"def",
"_compute_style_of_faulting_term",
"(",
"self",
",",
"rup",
",",
"C",
")",
":",
"if",
"np",
".",
"abs",
"(",
"rup",
".",
"rake",
")",
"<=",
"30.0",
"or",
"(",
"180.0",
"-",
"np",
".",
"abs",
"(",
"rup",
".",
"rake",
")",
")",
"<=",
"30.0",
":",
"# strike-slip",
"return",
"C",
"[",
"'B1ss'",
"]",
"elif",
"rup",
".",
"rake",
">",
"30.0",
"and",
"rup",
".",
"rake",
"<",
"150.0",
":",
"# reverse",
"return",
"C",
"[",
"'B1rv'",
"]",
"else",
":",
"# unspecified (also includes Normal faulting!)",
"return",
"C",
"[",
"'B1all'",
"]"
] | Computes the coefficient to scale for reverse or strike-slip events
Fault type (Strike-slip, Normal, Thrust/reverse) is
derived from rake angle.
Rakes angles within 30 of horizontal are strike-slip,
angles from 30 to 150 are reverse, and angles from
-30 to -150 are normal. See paragraph 'Predictor Variables'
pag 103.
Note that 'Unspecified' case is used to refer to all other rake
angles. | [
"Computes",
"the",
"coefficient",
"to",
"scale",
"for",
"reverse",
"or",
"strike",
"-",
"slip",
"events",
"Fault",
"type",
"(",
"Strike",
"-",
"slip",
"Normal",
"Thrust",
"/",
"reverse",
")",
"is",
"derived",
"from",
"rake",
"angle",
".",
"Rakes",
"angles",
"within",
"30",
"of",
"horizontal",
"are",
"strike",
"-",
"slip",
"angles",
"from",
"30",
"to",
"150",
"are",
"reverse",
"and",
"angles",
"from",
"-",
"30",
"to",
"-",
"150",
"are",
"normal",
".",
"See",
"paragraph",
"Predictor",
"Variables",
"pag",
"103",
".",
"Note",
"that",
"Unspecified",
"case",
"is",
"used",
"to",
"refer",
"to",
"all",
"other",
"rake",
"angles",
"."
] | python | train |
Infinidat/infi.clickhouse_orm | src/infi/clickhouse_orm/models.py | https://github.com/Infinidat/infi.clickhouse_orm/blob/595f2023e334e3925a5c3fbfdd6083a5992a7169/src/infi/clickhouse_orm/models.py#L207-L228 | def from_tsv(cls, line, field_names, timezone_in_use=pytz.utc, database=None):
'''
Create a model instance from a tab-separated line. The line may or may not include a newline.
The `field_names` list must match the fields defined in the model, but does not have to include all of them.
- `line`: the TSV-formatted data.
- `field_names`: names of the model fields in the data.
- `timezone_in_use`: the timezone to use when parsing dates and datetimes.
- `database`: if given, sets the database that this instance belongs to.
'''
from six import next
values = iter(parse_tsv(line))
kwargs = {}
for name in field_names:
field = getattr(cls, name)
kwargs[name] = field.to_python(next(values), timezone_in_use)
obj = cls(**kwargs)
if database is not None:
obj.set_database(database)
return obj | [
"def",
"from_tsv",
"(",
"cls",
",",
"line",
",",
"field_names",
",",
"timezone_in_use",
"=",
"pytz",
".",
"utc",
",",
"database",
"=",
"None",
")",
":",
"from",
"six",
"import",
"next",
"values",
"=",
"iter",
"(",
"parse_tsv",
"(",
"line",
")",
")",
"kwargs",
"=",
"{",
"}",
"for",
"name",
"in",
"field_names",
":",
"field",
"=",
"getattr",
"(",
"cls",
",",
"name",
")",
"kwargs",
"[",
"name",
"]",
"=",
"field",
".",
"to_python",
"(",
"next",
"(",
"values",
")",
",",
"timezone_in_use",
")",
"obj",
"=",
"cls",
"(",
"*",
"*",
"kwargs",
")",
"if",
"database",
"is",
"not",
"None",
":",
"obj",
".",
"set_database",
"(",
"database",
")",
"return",
"obj"
] | Create a model instance from a tab-separated line. The line may or may not include a newline.
The `field_names` list must match the fields defined in the model, but does not have to include all of them.
- `line`: the TSV-formatted data.
- `field_names`: names of the model fields in the data.
- `timezone_in_use`: the timezone to use when parsing dates and datetimes.
- `database`: if given, sets the database that this instance belongs to. | [
"Create",
"a",
"model",
"instance",
"from",
"a",
"tab",
"-",
"separated",
"line",
".",
"The",
"line",
"may",
"or",
"may",
"not",
"include",
"a",
"newline",
".",
"The",
"field_names",
"list",
"must",
"match",
"the",
"fields",
"defined",
"in",
"the",
"model",
"but",
"does",
"not",
"have",
"to",
"include",
"all",
"of",
"them",
"."
] | python | train |
brutasse/rache | rache/__init__.py | https://github.com/brutasse/rache/blob/fa9cf073376a8c731a13924b84fb8422a771a4ab/rache/__init__.py#L48-L87 | def schedule_job(job_id, schedule_in, connection=None, **kwargs):
"""Schedules a job.
:param job_id: unique identifier for this job
:param schedule_in: number of seconds from now in which to schedule the
job or timedelta object.
:param **kwargs: parameters to attach to the job, key-value structure.
>>> schedule_job('http://example.com/test', schedule_in=10, num_retries=10)
"""
if not isinstance(schedule_in, int): # assumed to be a timedelta
schedule_in = schedule_in.days * 3600 * 24 + schedule_in.seconds
schedule_at = int(time.time()) + schedule_in
if connection is None:
connection = r
if 'id' in kwargs:
raise RuntimeError("'id' is a reserved key for the job ID")
with connection.pipeline() as pipe:
if schedule_at is not None:
args = (schedule_at, job_id)
if isinstance(connection, redis.Redis):
# StrictRedis or Redis don't have the same argument order
args = (job_id, schedule_at)
pipe.zadd(REDIS_KEY, *args)
delete = []
hmset = {}
for key, value in kwargs.items():
if value is None:
delete.append(key)
else:
hmset[key] = value
if hmset:
pipe.hmset(job_key(job_id), hmset)
if len(delete) > 0:
pipe.hdel(job_key(job_id), *delete)
pipe.execute() | [
"def",
"schedule_job",
"(",
"job_id",
",",
"schedule_in",
",",
"connection",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"isinstance",
"(",
"schedule_in",
",",
"int",
")",
":",
"# assumed to be a timedelta",
"schedule_in",
"=",
"schedule_in",
".",
"days",
"*",
"3600",
"*",
"24",
"+",
"schedule_in",
".",
"seconds",
"schedule_at",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
")",
"+",
"schedule_in",
"if",
"connection",
"is",
"None",
":",
"connection",
"=",
"r",
"if",
"'id'",
"in",
"kwargs",
":",
"raise",
"RuntimeError",
"(",
"\"'id' is a reserved key for the job ID\"",
")",
"with",
"connection",
".",
"pipeline",
"(",
")",
"as",
"pipe",
":",
"if",
"schedule_at",
"is",
"not",
"None",
":",
"args",
"=",
"(",
"schedule_at",
",",
"job_id",
")",
"if",
"isinstance",
"(",
"connection",
",",
"redis",
".",
"Redis",
")",
":",
"# StrictRedis or Redis don't have the same argument order",
"args",
"=",
"(",
"job_id",
",",
"schedule_at",
")",
"pipe",
".",
"zadd",
"(",
"REDIS_KEY",
",",
"*",
"args",
")",
"delete",
"=",
"[",
"]",
"hmset",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"if",
"value",
"is",
"None",
":",
"delete",
".",
"append",
"(",
"key",
")",
"else",
":",
"hmset",
"[",
"key",
"]",
"=",
"value",
"if",
"hmset",
":",
"pipe",
".",
"hmset",
"(",
"job_key",
"(",
"job_id",
")",
",",
"hmset",
")",
"if",
"len",
"(",
"delete",
")",
">",
"0",
":",
"pipe",
".",
"hdel",
"(",
"job_key",
"(",
"job_id",
")",
",",
"*",
"delete",
")",
"pipe",
".",
"execute",
"(",
")"
] | Schedules a job.
:param job_id: unique identifier for this job
:param schedule_in: number of seconds from now in which to schedule the
job or timedelta object.
:param **kwargs: parameters to attach to the job, key-value structure.
>>> schedule_job('http://example.com/test', schedule_in=10, num_retries=10) | [
"Schedules",
"a",
"job",
"."
] | python | train |
UCBerkeleySETI/blimpy | blimpy/file_wrapper.py | https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/file_wrapper.py#L147-L158 | def _calc_selection_shape(self):
"""Calculate shape of data of interest.
"""
#Check how many integrations requested
n_ints = int(self.t_stop - self.t_start)
#Check how many frequency channels requested
n_chan = int(np.round((self.f_stop - self.f_start) / abs(self.header[b'foff'])))
selection_shape = (n_ints,int(self.header[b'nifs']),n_chan)
return selection_shape | [
"def",
"_calc_selection_shape",
"(",
"self",
")",
":",
"#Check how many integrations requested",
"n_ints",
"=",
"int",
"(",
"self",
".",
"t_stop",
"-",
"self",
".",
"t_start",
")",
"#Check how many frequency channels requested",
"n_chan",
"=",
"int",
"(",
"np",
".",
"round",
"(",
"(",
"self",
".",
"f_stop",
"-",
"self",
".",
"f_start",
")",
"/",
"abs",
"(",
"self",
".",
"header",
"[",
"b'foff'",
"]",
")",
")",
")",
"selection_shape",
"=",
"(",
"n_ints",
",",
"int",
"(",
"self",
".",
"header",
"[",
"b'nifs'",
"]",
")",
",",
"n_chan",
")",
"return",
"selection_shape"
] | Calculate shape of data of interest. | [
"Calculate",
"shape",
"of",
"data",
"of",
"interest",
"."
] | python | test |
rodluger/everest | everest/missions/k2/utils.py | https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/utils.py#L545-L615 | def GetHiResImage(ID):
'''
Queries the Palomar Observatory Sky Survey II catalog to
obtain a higher resolution optical image of the star with EPIC number
:py:obj:`ID`.
'''
# Get the TPF info
client = kplr.API()
star = client.k2_star(ID)
k2ra = star.k2_ra
k2dec = star.k2_dec
tpf = star.get_target_pixel_files()[0]
with tpf.open() as f:
k2wcs = WCS(f[2].header)
shape = np.array(f[1].data.field('FLUX'), dtype='float64')[0].shape
# Get the POSS URL
hou = int(k2ra * 24 / 360.)
min = int(60 * (k2ra * 24 / 360. - hou))
sec = 60 * (60 * (k2ra * 24 / 360. - hou) - min)
ra = '%02d+%02d+%.2f' % (hou, min, sec)
sgn = '' if np.sign(k2dec) >= 0 else '-'
deg = int(np.abs(k2dec))
min = int(60 * (np.abs(k2dec) - deg))
sec = 3600 * (np.abs(k2dec) - deg - min / 60)
dec = '%s%02d+%02d+%.1f' % (sgn, deg, min, sec)
url = 'https://archive.stsci.edu/cgi-bin/dss_search?v=poss2ukstu_red&' + \
'r=%s&d=%s&e=J2000&h=3&w=3&f=fits&c=none&fov=NONE&v3=' % (ra, dec)
# Query the server
r = urllib.request.Request(url)
handler = urllib.request.urlopen(r)
code = handler.getcode()
if int(code) != 200:
# Unavailable
return None
data = handler.read()
# Atomically write to a temp file
f = NamedTemporaryFile("wb", delete=False)
f.write(data)
f.flush()
os.fsync(f.fileno())
f.close()
# Now open the POSS fits file
with pyfits.open(f.name) as ff:
img = ff[0].data
# Map POSS pixels onto K2 pixels
xy = np.empty((img.shape[0] * img.shape[1], 2))
z = np.empty(img.shape[0] * img.shape[1])
pwcs = WCS(f.name)
k = 0
for i in range(img.shape[0]):
for j in range(img.shape[1]):
ra, dec = pwcs.all_pix2world(float(j), float(i), 0)
xy[k] = k2wcs.all_world2pix(ra, dec, 0)
z[k] = img[i, j]
k += 1
# Resample
grid_x, grid_y = np.mgrid[-0.5:shape[1] - 0.5:0.1, -0.5:shape[0] - 0.5:0.1]
resampled = griddata(xy, z, (grid_x, grid_y), method='cubic')
# Rotate to align with K2 image. Not sure why, but it is necessary
resampled = np.rot90(resampled)
return resampled | [
"def",
"GetHiResImage",
"(",
"ID",
")",
":",
"# Get the TPF info",
"client",
"=",
"kplr",
".",
"API",
"(",
")",
"star",
"=",
"client",
".",
"k2_star",
"(",
"ID",
")",
"k2ra",
"=",
"star",
".",
"k2_ra",
"k2dec",
"=",
"star",
".",
"k2_dec",
"tpf",
"=",
"star",
".",
"get_target_pixel_files",
"(",
")",
"[",
"0",
"]",
"with",
"tpf",
".",
"open",
"(",
")",
"as",
"f",
":",
"k2wcs",
"=",
"WCS",
"(",
"f",
"[",
"2",
"]",
".",
"header",
")",
"shape",
"=",
"np",
".",
"array",
"(",
"f",
"[",
"1",
"]",
".",
"data",
".",
"field",
"(",
"'FLUX'",
")",
",",
"dtype",
"=",
"'float64'",
")",
"[",
"0",
"]",
".",
"shape",
"# Get the POSS URL",
"hou",
"=",
"int",
"(",
"k2ra",
"*",
"24",
"/",
"360.",
")",
"min",
"=",
"int",
"(",
"60",
"*",
"(",
"k2ra",
"*",
"24",
"/",
"360.",
"-",
"hou",
")",
")",
"sec",
"=",
"60",
"*",
"(",
"60",
"*",
"(",
"k2ra",
"*",
"24",
"/",
"360.",
"-",
"hou",
")",
"-",
"min",
")",
"ra",
"=",
"'%02d+%02d+%.2f'",
"%",
"(",
"hou",
",",
"min",
",",
"sec",
")",
"sgn",
"=",
"''",
"if",
"np",
".",
"sign",
"(",
"k2dec",
")",
">=",
"0",
"else",
"'-'",
"deg",
"=",
"int",
"(",
"np",
".",
"abs",
"(",
"k2dec",
")",
")",
"min",
"=",
"int",
"(",
"60",
"*",
"(",
"np",
".",
"abs",
"(",
"k2dec",
")",
"-",
"deg",
")",
")",
"sec",
"=",
"3600",
"*",
"(",
"np",
".",
"abs",
"(",
"k2dec",
")",
"-",
"deg",
"-",
"min",
"/",
"60",
")",
"dec",
"=",
"'%s%02d+%02d+%.1f'",
"%",
"(",
"sgn",
",",
"deg",
",",
"min",
",",
"sec",
")",
"url",
"=",
"'https://archive.stsci.edu/cgi-bin/dss_search?v=poss2ukstu_red&'",
"+",
"'r=%s&d=%s&e=J2000&h=3&w=3&f=fits&c=none&fov=NONE&v3='",
"%",
"(",
"ra",
",",
"dec",
")",
"# Query the server",
"r",
"=",
"urllib",
".",
"request",
".",
"Request",
"(",
"url",
")",
"handler",
"=",
"urllib",
".",
"request",
".",
"urlopen",
"(",
"r",
")",
"code",
"=",
"handler",
".",
"getcode",
"(",
")",
"if",
"int",
"(",
"code",
")",
"!=",
"200",
":",
"# Unavailable",
"return",
"None",
"data",
"=",
"handler",
".",
"read",
"(",
")",
"# Atomically write to a temp file",
"f",
"=",
"NamedTemporaryFile",
"(",
"\"wb\"",
",",
"delete",
"=",
"False",
")",
"f",
".",
"write",
"(",
"data",
")",
"f",
".",
"flush",
"(",
")",
"os",
".",
"fsync",
"(",
"f",
".",
"fileno",
"(",
")",
")",
"f",
".",
"close",
"(",
")",
"# Now open the POSS fits file",
"with",
"pyfits",
".",
"open",
"(",
"f",
".",
"name",
")",
"as",
"ff",
":",
"img",
"=",
"ff",
"[",
"0",
"]",
".",
"data",
"# Map POSS pixels onto K2 pixels",
"xy",
"=",
"np",
".",
"empty",
"(",
"(",
"img",
".",
"shape",
"[",
"0",
"]",
"*",
"img",
".",
"shape",
"[",
"1",
"]",
",",
"2",
")",
")",
"z",
"=",
"np",
".",
"empty",
"(",
"img",
".",
"shape",
"[",
"0",
"]",
"*",
"img",
".",
"shape",
"[",
"1",
"]",
")",
"pwcs",
"=",
"WCS",
"(",
"f",
".",
"name",
")",
"k",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"img",
".",
"shape",
"[",
"0",
"]",
")",
":",
"for",
"j",
"in",
"range",
"(",
"img",
".",
"shape",
"[",
"1",
"]",
")",
":",
"ra",
",",
"dec",
"=",
"pwcs",
".",
"all_pix2world",
"(",
"float",
"(",
"j",
")",
",",
"float",
"(",
"i",
")",
",",
"0",
")",
"xy",
"[",
"k",
"]",
"=",
"k2wcs",
".",
"all_world2pix",
"(",
"ra",
",",
"dec",
",",
"0",
")",
"z",
"[",
"k",
"]",
"=",
"img",
"[",
"i",
",",
"j",
"]",
"k",
"+=",
"1",
"# Resample",
"grid_x",
",",
"grid_y",
"=",
"np",
".",
"mgrid",
"[",
"-",
"0.5",
":",
"shape",
"[",
"1",
"]",
"-",
"0.5",
":",
"0.1",
",",
"-",
"0.5",
":",
"shape",
"[",
"0",
"]",
"-",
"0.5",
":",
"0.1",
"]",
"resampled",
"=",
"griddata",
"(",
"xy",
",",
"z",
",",
"(",
"grid_x",
",",
"grid_y",
")",
",",
"method",
"=",
"'cubic'",
")",
"# Rotate to align with K2 image. Not sure why, but it is necessary",
"resampled",
"=",
"np",
".",
"rot90",
"(",
"resampled",
")",
"return",
"resampled"
] | Queries the Palomar Observatory Sky Survey II catalog to
obtain a higher resolution optical image of the star with EPIC number
:py:obj:`ID`. | [
"Queries",
"the",
"Palomar",
"Observatory",
"Sky",
"Survey",
"II",
"catalog",
"to",
"obtain",
"a",
"higher",
"resolution",
"optical",
"image",
"of",
"the",
"star",
"with",
"EPIC",
"number",
":",
"py",
":",
"obj",
":",
"ID",
"."
] | python | train |
OpenHydrology/floodestimation | floodestimation/analysis.py | https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/analysis.py#L973-L981 | def _solve_location_param(self):
"""
We're lazy here and simply iterate to find the location parameter such that growth_curve(0.5)=1.
"""
params = copy.copy(self.params)
del params['loc']
f = lambda location: self.distr_f.ppf(0.5, loc=location, **params) - 1
return optimize.brentq(f, -10, 10) | [
"def",
"_solve_location_param",
"(",
"self",
")",
":",
"params",
"=",
"copy",
".",
"copy",
"(",
"self",
".",
"params",
")",
"del",
"params",
"[",
"'loc'",
"]",
"f",
"=",
"lambda",
"location",
":",
"self",
".",
"distr_f",
".",
"ppf",
"(",
"0.5",
",",
"loc",
"=",
"location",
",",
"*",
"*",
"params",
")",
"-",
"1",
"return",
"optimize",
".",
"brentq",
"(",
"f",
",",
"-",
"10",
",",
"10",
")"
] | We're lazy here and simply iterate to find the location parameter such that growth_curve(0.5)=1. | [
"We",
"re",
"lazy",
"here",
"and",
"simply",
"iterate",
"to",
"find",
"the",
"location",
"parameter",
"such",
"that",
"growth_curve",
"(",
"0",
".",
"5",
")",
"=",
"1",
"."
] | python | train |
awslabs/serverless-application-model | samtranslator/sdk/parameter.py | https://github.com/awslabs/serverless-application-model/blob/cccb0c96b5c91e53355ebc07e542467303a5eedd/samtranslator/sdk/parameter.py#L19-L59 | def add_default_parameter_values(self, sam_template):
"""
Method to read default values for template parameters and merge with user supplied values.
Example:
If the template contains the following parameters defined
Parameters:
Param1:
Type: String
Default: default_value
Param2:
Type: String
Default: default_value
And, the user explicitly provided the following parameter values:
{
Param2: "new value"
}
then, this method will grab default value for Param1 and return the following result:
{
Param1: "default_value",
Param2: "new value"
}
:param dict sam_template: SAM template
:param dict parameter_values: Dictionary of parameter values provided by the user
:return dict: Merged parameter values
"""
parameter_definition = sam_template.get("Parameters", None)
if not parameter_definition or not isinstance(parameter_definition, dict):
return self.parameter_values
for param_name, value in parameter_definition.items():
if param_name not in self.parameter_values and isinstance(value, dict) and "Default" in value:
self.parameter_values[param_name] = value["Default"] | [
"def",
"add_default_parameter_values",
"(",
"self",
",",
"sam_template",
")",
":",
"parameter_definition",
"=",
"sam_template",
".",
"get",
"(",
"\"Parameters\"",
",",
"None",
")",
"if",
"not",
"parameter_definition",
"or",
"not",
"isinstance",
"(",
"parameter_definition",
",",
"dict",
")",
":",
"return",
"self",
".",
"parameter_values",
"for",
"param_name",
",",
"value",
"in",
"parameter_definition",
".",
"items",
"(",
")",
":",
"if",
"param_name",
"not",
"in",
"self",
".",
"parameter_values",
"and",
"isinstance",
"(",
"value",
",",
"dict",
")",
"and",
"\"Default\"",
"in",
"value",
":",
"self",
".",
"parameter_values",
"[",
"param_name",
"]",
"=",
"value",
"[",
"\"Default\"",
"]"
] | Method to read default values for template parameters and merge with user supplied values.
Example:
If the template contains the following parameters defined
Parameters:
Param1:
Type: String
Default: default_value
Param2:
Type: String
Default: default_value
And, the user explicitly provided the following parameter values:
{
Param2: "new value"
}
then, this method will grab default value for Param1 and return the following result:
{
Param1: "default_value",
Param2: "new value"
}
:param dict sam_template: SAM template
:param dict parameter_values: Dictionary of parameter values provided by the user
:return dict: Merged parameter values | [
"Method",
"to",
"read",
"default",
"values",
"for",
"template",
"parameters",
"and",
"merge",
"with",
"user",
"supplied",
"values",
"."
] | python | train |
tensorflow/cleverhans | cleverhans/model_zoo/deep_k_nearest_neighbors/dknn.py | https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/model_zoo/deep_k_nearest_neighbors/dknn.py#L170-L190 | def nonconformity(self, knns_labels):
"""
Given an dictionary of nb_data x nb_classes dimension, compute the nonconformity of
each candidate label for each data point: i.e. the number of knns whose label is
different from the candidate label.
"""
nb_data = knns_labels[self.layers[0]].shape[0]
knns_not_in_class = np.zeros((nb_data, self.nb_classes), dtype=np.int32)
for i in range(nb_data):
# Compute number of nearest neighbors per class
knns_in_class = np.zeros(
(len(self.layers), self.nb_classes), dtype=np.int32)
for layer_id, layer in enumerate(self.layers):
knns_in_class[layer_id, :] = np.bincount(
knns_labels[layer][i], minlength=self.nb_classes)
# Compute number of knns in other class than class_id
for class_id in range(self.nb_classes):
knns_not_in_class[i, class_id] = np.sum(
knns_in_class) - np.sum(knns_in_class[:, class_id])
return knns_not_in_class | [
"def",
"nonconformity",
"(",
"self",
",",
"knns_labels",
")",
":",
"nb_data",
"=",
"knns_labels",
"[",
"self",
".",
"layers",
"[",
"0",
"]",
"]",
".",
"shape",
"[",
"0",
"]",
"knns_not_in_class",
"=",
"np",
".",
"zeros",
"(",
"(",
"nb_data",
",",
"self",
".",
"nb_classes",
")",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
"for",
"i",
"in",
"range",
"(",
"nb_data",
")",
":",
"# Compute number of nearest neighbors per class",
"knns_in_class",
"=",
"np",
".",
"zeros",
"(",
"(",
"len",
"(",
"self",
".",
"layers",
")",
",",
"self",
".",
"nb_classes",
")",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
"for",
"layer_id",
",",
"layer",
"in",
"enumerate",
"(",
"self",
".",
"layers",
")",
":",
"knns_in_class",
"[",
"layer_id",
",",
":",
"]",
"=",
"np",
".",
"bincount",
"(",
"knns_labels",
"[",
"layer",
"]",
"[",
"i",
"]",
",",
"minlength",
"=",
"self",
".",
"nb_classes",
")",
"# Compute number of knns in other class than class_id",
"for",
"class_id",
"in",
"range",
"(",
"self",
".",
"nb_classes",
")",
":",
"knns_not_in_class",
"[",
"i",
",",
"class_id",
"]",
"=",
"np",
".",
"sum",
"(",
"knns_in_class",
")",
"-",
"np",
".",
"sum",
"(",
"knns_in_class",
"[",
":",
",",
"class_id",
"]",
")",
"return",
"knns_not_in_class"
] | Given an dictionary of nb_data x nb_classes dimension, compute the nonconformity of
each candidate label for each data point: i.e. the number of knns whose label is
different from the candidate label. | [
"Given",
"an",
"dictionary",
"of",
"nb_data",
"x",
"nb_classes",
"dimension",
"compute",
"the",
"nonconformity",
"of",
"each",
"candidate",
"label",
"for",
"each",
"data",
"point",
":",
"i",
".",
"e",
".",
"the",
"number",
"of",
"knns",
"whose",
"label",
"is",
"different",
"from",
"the",
"candidate",
"label",
"."
] | python | train |
Rediker-Software/doac | doac/contrib/rest_framework/authentication.py | https://github.com/Rediker-Software/doac/blob/398fdd64452e4ff8662297b0381926addd77505a/doac/contrib/rest_framework/authentication.py#L6-L25 | def authenticate(self, request):
"""
Send the request through the authentication middleware that
is provided with DOAC and grab the user and token from it.
"""
from doac.middleware import AuthenticationMiddleware
try:
response = AuthenticationMiddleware().process_request(request)
except:
raise exceptions.AuthenticationFailed("Invalid handler")
if not hasattr(request, "user") or not request.user.is_authenticated():
return None
if not hasattr(request, "access_token"):
raise exceptions.AuthenticationFailed("Access token was not valid")
return request.user, request.access_token | [
"def",
"authenticate",
"(",
"self",
",",
"request",
")",
":",
"from",
"doac",
".",
"middleware",
"import",
"AuthenticationMiddleware",
"try",
":",
"response",
"=",
"AuthenticationMiddleware",
"(",
")",
".",
"process_request",
"(",
"request",
")",
"except",
":",
"raise",
"exceptions",
".",
"AuthenticationFailed",
"(",
"\"Invalid handler\"",
")",
"if",
"not",
"hasattr",
"(",
"request",
",",
"\"user\"",
")",
"or",
"not",
"request",
".",
"user",
".",
"is_authenticated",
"(",
")",
":",
"return",
"None",
"if",
"not",
"hasattr",
"(",
"request",
",",
"\"access_token\"",
")",
":",
"raise",
"exceptions",
".",
"AuthenticationFailed",
"(",
"\"Access token was not valid\"",
")",
"return",
"request",
".",
"user",
",",
"request",
".",
"access_token"
] | Send the request through the authentication middleware that
is provided with DOAC and grab the user and token from it. | [
"Send",
"the",
"request",
"through",
"the",
"authentication",
"middleware",
"that",
"is",
"provided",
"with",
"DOAC",
"and",
"grab",
"the",
"user",
"and",
"token",
"from",
"it",
"."
] | python | train |