repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/client.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/client.py#L293-L309
def dataset(self, dataset_id, project=None): """Construct a reference to a dataset. :type dataset_id: str :param dataset_id: ID of the dataset. :type project: str :param project: (Optional) project ID for the dataset (defaults to the project of the client). :rtype: :class:`google.cloud.bigquery.dataset.DatasetReference` :returns: a new ``DatasetReference`` instance """ if project is None: project = self.project return DatasetReference(project, dataset_id)
[ "def", "dataset", "(", "self", ",", "dataset_id", ",", "project", "=", "None", ")", ":", "if", "project", "is", "None", ":", "project", "=", "self", ".", "project", "return", "DatasetReference", "(", "project", ",", "dataset_id", ")" ]
Construct a reference to a dataset. :type dataset_id: str :param dataset_id: ID of the dataset. :type project: str :param project: (Optional) project ID for the dataset (defaults to the project of the client). :rtype: :class:`google.cloud.bigquery.dataset.DatasetReference` :returns: a new ``DatasetReference`` instance
[ "Construct", "a", "reference", "to", "a", "dataset", "." ]
python
train
honzamach/pydgets
pydgets/widgets.py
https://github.com/honzamach/pydgets/blob/5ca4ce19fc2d9b5f41441fb9163810f8ca502e79/pydgets/widgets.py#L1294-L1308
def _render_content(self, content, **settings): """ Perform widget rendering, but do not print anything. """ bar_len = int(settings[self.SETTING_BAR_WIDTH]) if not bar_len: bar_len = TERMINAL_WIDTH - 10 percent = content progress = "" progress += str(settings[self.SETTING_BAR_CHAR]) * int(bar_len * percent) s = {k: settings[k] for k in (self.SETTING_FLAG_PLAIN,)} s.update(settings[self.SETTING_BAR_FORMATING]) progress = self.fmt_text(progress, **s) progress += ' ' * int(bar_len - int(bar_len * percent)) return "{:6.2f}% [{:s}]".format(percent * 100, progress)
[ "def", "_render_content", "(", "self", ",", "content", ",", "*", "*", "settings", ")", ":", "bar_len", "=", "int", "(", "settings", "[", "self", ".", "SETTING_BAR_WIDTH", "]", ")", "if", "not", "bar_len", ":", "bar_len", "=", "TERMINAL_WIDTH", "-", "10", "percent", "=", "content", "progress", "=", "\"\"", "progress", "+=", "str", "(", "settings", "[", "self", ".", "SETTING_BAR_CHAR", "]", ")", "*", "int", "(", "bar_len", "*", "percent", ")", "s", "=", "{", "k", ":", "settings", "[", "k", "]", "for", "k", "in", "(", "self", ".", "SETTING_FLAG_PLAIN", ",", ")", "}", "s", ".", "update", "(", "settings", "[", "self", ".", "SETTING_BAR_FORMATING", "]", ")", "progress", "=", "self", ".", "fmt_text", "(", "progress", ",", "*", "*", "s", ")", "progress", "+=", "' '", "*", "int", "(", "bar_len", "-", "int", "(", "bar_len", "*", "percent", ")", ")", "return", "\"{:6.2f}% [{:s}]\"", ".", "format", "(", "percent", "*", "100", ",", "progress", ")" ]
Perform widget rendering, but do not print anything.
[ "Perform", "widget", "rendering", "but", "do", "not", "print", "anything", "." ]
python
train
alvations/pywsd
pywsd/utils.py
https://github.com/alvations/pywsd/blob/4c12394c8adbcfed71dd912bdbef2e36370821bf/pywsd/utils.py#L29-L34
def remove_tags(text: str) -> str: """ Removes <tags> in angled brackets from text. """ tags = {i:" " for i in re.findall("(<[^>\n]*>)",text.strip())} no_tag_text = reduce(lambda x, kv:x.replace(*kv), tags.iteritems(), text) return " ".join(no_tag_text.split())
[ "def", "remove_tags", "(", "text", ":", "str", ")", "->", "str", ":", "tags", "=", "{", "i", ":", "\" \"", "for", "i", "in", "re", ".", "findall", "(", "\"(<[^>\\n]*>)\"", ",", "text", ".", "strip", "(", ")", ")", "}", "no_tag_text", "=", "reduce", "(", "lambda", "x", ",", "kv", ":", "x", ".", "replace", "(", "*", "kv", ")", ",", "tags", ".", "iteritems", "(", ")", ",", "text", ")", "return", "\" \"", ".", "join", "(", "no_tag_text", ".", "split", "(", ")", ")" ]
Removes <tags> in angled brackets from text.
[ "Removes", "<tags", ">", "in", "angled", "brackets", "from", "text", "." ]
python
train
frictionlessdata/tableschema-pandas-py
tableschema_pandas/mapper.py
https://github.com/frictionlessdata/tableschema-pandas-py/blob/ef941dbc12f5d346e9612f8fec1b4b356b8493ca/tableschema_pandas/mapper.py#L156-L176
def restore_row(self, row, schema, pk): """Restore row from Pandas """ result = [] for field in schema.fields: if schema.primary_key and schema.primary_key[0] == field.name: if field.type == 'number' and np.isnan(pk): pk = None if pk and field.type == 'integer': pk = int(pk) result.append(field.cast_value(pk)) else: value = row[field.name] if field.type == 'number' and np.isnan(value): value = None if value and field.type == 'integer': value = int(value) elif field.type == 'datetime': value = value.to_pydatetime() result.append(field.cast_value(value)) return result
[ "def", "restore_row", "(", "self", ",", "row", ",", "schema", ",", "pk", ")", ":", "result", "=", "[", "]", "for", "field", "in", "schema", ".", "fields", ":", "if", "schema", ".", "primary_key", "and", "schema", ".", "primary_key", "[", "0", "]", "==", "field", ".", "name", ":", "if", "field", ".", "type", "==", "'number'", "and", "np", ".", "isnan", "(", "pk", ")", ":", "pk", "=", "None", "if", "pk", "and", "field", ".", "type", "==", "'integer'", ":", "pk", "=", "int", "(", "pk", ")", "result", ".", "append", "(", "field", ".", "cast_value", "(", "pk", ")", ")", "else", ":", "value", "=", "row", "[", "field", ".", "name", "]", "if", "field", ".", "type", "==", "'number'", "and", "np", ".", "isnan", "(", "value", ")", ":", "value", "=", "None", "if", "value", "and", "field", ".", "type", "==", "'integer'", ":", "value", "=", "int", "(", "value", ")", "elif", "field", ".", "type", "==", "'datetime'", ":", "value", "=", "value", ".", "to_pydatetime", "(", ")", "result", ".", "append", "(", "field", ".", "cast_value", "(", "value", ")", ")", "return", "result" ]
Restore row from Pandas
[ "Restore", "row", "from", "Pandas" ]
python
train
klen/muffin-debugtoolbar
muffin_debugtoolbar/tbtools/tbtools.py
https://github.com/klen/muffin-debugtoolbar/blob/b650b35fbe2035888f6bba5dac3073ef01c94dc6/muffin_debugtoolbar/tbtools/tbtools.py#L231-L256
def render_full(self, request, lodgeit_url=None): """Render the Full HTML page with the traceback info.""" app = request.app root_path = request.app.ps.debugtoolbar.cfg.prefix exc = escape(self.exception) summary = self.render_summary(include_title=False, request=request) token = request.app['debugtoolbar']['pdbt_token'] vars = { 'evalex': app.ps.debugtoolbar.cfg.intercept_exc == 'debug' and 'true' or 'false', 'console': 'console', 'lodgeit_url': lodgeit_url and escape(lodgeit_url) or '', 'title': exc, 'exception': exc, 'exception_type': escape(self.exception_type), 'summary': summary, 'plaintext': self.plaintext, 'plaintext_cs': re.sub('-{2,}', '-', self.plaintext), 'traceback_id': self.id, 'static_path': root_path + 'static/', 'token': token, 'root_path': root_path, 'url': root_path + 'exception?token=%s&tb=%s' % (token, self.id), } template = app.ps.jinja2.env.get_template('debugtoolbar/exception.html') return template.render(app=app, request=request, **vars)
[ "def", "render_full", "(", "self", ",", "request", ",", "lodgeit_url", "=", "None", ")", ":", "app", "=", "request", ".", "app", "root_path", "=", "request", ".", "app", ".", "ps", ".", "debugtoolbar", ".", "cfg", ".", "prefix", "exc", "=", "escape", "(", "self", ".", "exception", ")", "summary", "=", "self", ".", "render_summary", "(", "include_title", "=", "False", ",", "request", "=", "request", ")", "token", "=", "request", ".", "app", "[", "'debugtoolbar'", "]", "[", "'pdbt_token'", "]", "vars", "=", "{", "'evalex'", ":", "app", ".", "ps", ".", "debugtoolbar", ".", "cfg", ".", "intercept_exc", "==", "'debug'", "and", "'true'", "or", "'false'", ",", "'console'", ":", "'console'", ",", "'lodgeit_url'", ":", "lodgeit_url", "and", "escape", "(", "lodgeit_url", ")", "or", "''", ",", "'title'", ":", "exc", ",", "'exception'", ":", "exc", ",", "'exception_type'", ":", "escape", "(", "self", ".", "exception_type", ")", ",", "'summary'", ":", "summary", ",", "'plaintext'", ":", "self", ".", "plaintext", ",", "'plaintext_cs'", ":", "re", ".", "sub", "(", "'-{2,}'", ",", "'-'", ",", "self", ".", "plaintext", ")", ",", "'traceback_id'", ":", "self", ".", "id", ",", "'static_path'", ":", "root_path", "+", "'static/'", ",", "'token'", ":", "token", ",", "'root_path'", ":", "root_path", ",", "'url'", ":", "root_path", "+", "'exception?token=%s&tb=%s'", "%", "(", "token", ",", "self", ".", "id", ")", ",", "}", "template", "=", "app", ".", "ps", ".", "jinja2", ".", "env", ".", "get_template", "(", "'debugtoolbar/exception.html'", ")", "return", "template", ".", "render", "(", "app", "=", "app", ",", "request", "=", "request", ",", "*", "*", "vars", ")" ]
Render the Full HTML page with the traceback info.
[ "Render", "the", "Full", "HTML", "page", "with", "the", "traceback", "info", "." ]
python
train
pypa/pipenv
pipenv/vendor/click/globals.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/click/globals.py#L39-L48
def resolve_color_default(color=None): """"Internal helper to get the default value of the color flag. If a value is passed it's returned unchanged, otherwise it's looked up from the current context. """ if color is not None: return color ctx = get_current_context(silent=True) if ctx is not None: return ctx.color
[ "def", "resolve_color_default", "(", "color", "=", "None", ")", ":", "if", "color", "is", "not", "None", ":", "return", "color", "ctx", "=", "get_current_context", "(", "silent", "=", "True", ")", "if", "ctx", "is", "not", "None", ":", "return", "ctx", ".", "color" ]
Internal helper to get the default value of the color flag. If a value is passed it's returned unchanged, otherwise it's looked up from the current context.
[ "Internal", "helper", "to", "get", "the", "default", "value", "of", "the", "color", "flag", ".", "If", "a", "value", "is", "passed", "it", "s", "returned", "unchanged", "otherwise", "it", "s", "looked", "up", "from", "the", "current", "context", "." ]
python
train
watson-developer-cloud/python-sdk
ibm_watson/visual_recognition_v3.py
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/visual_recognition_v3.py#L789-L809
def _from_dict(cls, _dict): """Initialize a ClassifiedImage object from a json dictionary.""" args = {} if 'source_url' in _dict: args['source_url'] = _dict.get('source_url') if 'resolved_url' in _dict: args['resolved_url'] = _dict.get('resolved_url') if 'image' in _dict: args['image'] = _dict.get('image') if 'error' in _dict: args['error'] = ErrorInfo._from_dict(_dict.get('error')) if 'classifiers' in _dict: args['classifiers'] = [ ClassifierResult._from_dict(x) for x in (_dict.get('classifiers')) ] else: raise ValueError( 'Required property \'classifiers\' not present in ClassifiedImage JSON' ) return cls(**args)
[ "def", "_from_dict", "(", "cls", ",", "_dict", ")", ":", "args", "=", "{", "}", "if", "'source_url'", "in", "_dict", ":", "args", "[", "'source_url'", "]", "=", "_dict", ".", "get", "(", "'source_url'", ")", "if", "'resolved_url'", "in", "_dict", ":", "args", "[", "'resolved_url'", "]", "=", "_dict", ".", "get", "(", "'resolved_url'", ")", "if", "'image'", "in", "_dict", ":", "args", "[", "'image'", "]", "=", "_dict", ".", "get", "(", "'image'", ")", "if", "'error'", "in", "_dict", ":", "args", "[", "'error'", "]", "=", "ErrorInfo", ".", "_from_dict", "(", "_dict", ".", "get", "(", "'error'", ")", ")", "if", "'classifiers'", "in", "_dict", ":", "args", "[", "'classifiers'", "]", "=", "[", "ClassifierResult", ".", "_from_dict", "(", "x", ")", "for", "x", "in", "(", "_dict", ".", "get", "(", "'classifiers'", ")", ")", "]", "else", ":", "raise", "ValueError", "(", "'Required property \\'classifiers\\' not present in ClassifiedImage JSON'", ")", "return", "cls", "(", "*", "*", "args", ")" ]
Initialize a ClassifiedImage object from a json dictionary.
[ "Initialize", "a", "ClassifiedImage", "object", "from", "a", "json", "dictionary", "." ]
python
train
PrefPy/prefpy
prefpy/mechanismMcmc.py
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanismMcmc.py#L140-L171
def getRankingBruteForce(self, profile): """ Returns a list that orders all candidates from best to worst when we use brute force to compute Bayesian utilities for an election profile. This function assumes that getCandScoresMapBruteForce(profile) is implemented for the child Mechanism class. Note that the returned list gives no indication of ties between candidates. :ivar Profile profile: A Profile object that represents an election profile. """ # We generate a map that associates each score with the candidates that have that score. candScoresMapBruteForce = self.getCandScoresMapBruteForce(profile) reverseCandScoresMap = dict() for key, value in candScoresMapBruteForce.items(): if value not in reverseCandScoresMap.keys(): reverseCandScoresMap[value] = [key] else: reverseCandScoresMap[value].append(key) # We sort the scores by either decreasing order or increasing order. if self.maximizeCandScore == True: sortedCandScores = sorted(reverseCandScoresMap.keys(), reverse=True) else: sortedCandScores = sorted(reverseCandScoresMap.keys()) # We put the candidates into our ranking based on the order in which their score appears ranking = [] for candScore in sortedCandScores: for cand in reverseCandScoresMap[candScore]: ranking.append(cand) return ranking
[ "def", "getRankingBruteForce", "(", "self", ",", "profile", ")", ":", "# We generate a map that associates each score with the candidates that have that score.", "candScoresMapBruteForce", "=", "self", ".", "getCandScoresMapBruteForce", "(", "profile", ")", "reverseCandScoresMap", "=", "dict", "(", ")", "for", "key", ",", "value", "in", "candScoresMapBruteForce", ".", "items", "(", ")", ":", "if", "value", "not", "in", "reverseCandScoresMap", ".", "keys", "(", ")", ":", "reverseCandScoresMap", "[", "value", "]", "=", "[", "key", "]", "else", ":", "reverseCandScoresMap", "[", "value", "]", ".", "append", "(", "key", ")", "# We sort the scores by either decreasing order or increasing order.", "if", "self", ".", "maximizeCandScore", "==", "True", ":", "sortedCandScores", "=", "sorted", "(", "reverseCandScoresMap", ".", "keys", "(", ")", ",", "reverse", "=", "True", ")", "else", ":", "sortedCandScores", "=", "sorted", "(", "reverseCandScoresMap", ".", "keys", "(", ")", ")", "# We put the candidates into our ranking based on the order in which their score appears", "ranking", "=", "[", "]", "for", "candScore", "in", "sortedCandScores", ":", "for", "cand", "in", "reverseCandScoresMap", "[", "candScore", "]", ":", "ranking", ".", "append", "(", "cand", ")", "return", "ranking" ]
Returns a list that orders all candidates from best to worst when we use brute force to compute Bayesian utilities for an election profile. This function assumes that getCandScoresMapBruteForce(profile) is implemented for the child Mechanism class. Note that the returned list gives no indication of ties between candidates. :ivar Profile profile: A Profile object that represents an election profile.
[ "Returns", "a", "list", "that", "orders", "all", "candidates", "from", "best", "to", "worst", "when", "we", "use", "brute", "force", "to", "compute", "Bayesian", "utilities", "for", "an", "election", "profile", ".", "This", "function", "assumes", "that", "getCandScoresMapBruteForce", "(", "profile", ")", "is", "implemented", "for", "the", "child", "Mechanism", "class", ".", "Note", "that", "the", "returned", "list", "gives", "no", "indication", "of", "ties", "between", "candidates", ".", ":", "ivar", "Profile", "profile", ":", "A", "Profile", "object", "that", "represents", "an", "election", "profile", "." ]
python
train
ministryofjustice/money-to-prisoners-common
mtp_common/build_tasks/tasks.py
https://github.com/ministryofjustice/money-to-prisoners-common/blob/33c43a2912cb990d9148da7c8718f480f07d90a1/mtp_common/build_tasks/tasks.py#L131-L139
def python_dependencies(context: Context, common_path=None): """ Updates python dependencies """ context.pip_command('install', '-r', context.requirements_file) if common_path: context.pip_command('uninstall', '--yes', 'money-to-prisoners-common') context.pip_command('install', '--force-reinstall', '-e', common_path) context.shell('rm', '-rf', 'webpack.config.js')
[ "def", "python_dependencies", "(", "context", ":", "Context", ",", "common_path", "=", "None", ")", ":", "context", ".", "pip_command", "(", "'install'", ",", "'-r'", ",", "context", ".", "requirements_file", ")", "if", "common_path", ":", "context", ".", "pip_command", "(", "'uninstall'", ",", "'--yes'", ",", "'money-to-prisoners-common'", ")", "context", ".", "pip_command", "(", "'install'", ",", "'--force-reinstall'", ",", "'-e'", ",", "common_path", ")", "context", ".", "shell", "(", "'rm'", ",", "'-rf'", ",", "'webpack.config.js'", ")" ]
Updates python dependencies
[ "Updates", "python", "dependencies" ]
python
train
flo-compbio/xlmhg
xlmhg/result.py
https://github.com/flo-compbio/xlmhg/blob/8e5929ee1dc91b95e343b7a2b1b1d6664c4540a1/xlmhg/result.py#L181-L188
def escore(self): """(property) Returns the E-score associated with the result.""" hg_pval_thresh = self.escore_pval_thresh or self.pval escore_tol = self.escore_tol or mhg_cython.get_default_tol() es = mhg_cython.get_xlmhg_escore( self.indices, self.N, self.K, self.X, self.L, hg_pval_thresh, escore_tol) return es
[ "def", "escore", "(", "self", ")", ":", "hg_pval_thresh", "=", "self", ".", "escore_pval_thresh", "or", "self", ".", "pval", "escore_tol", "=", "self", ".", "escore_tol", "or", "mhg_cython", ".", "get_default_tol", "(", ")", "es", "=", "mhg_cython", ".", "get_xlmhg_escore", "(", "self", ".", "indices", ",", "self", ".", "N", ",", "self", ".", "K", ",", "self", ".", "X", ",", "self", ".", "L", ",", "hg_pval_thresh", ",", "escore_tol", ")", "return", "es" ]
(property) Returns the E-score associated with the result.
[ "(", "property", ")", "Returns", "the", "E", "-", "score", "associated", "with", "the", "result", "." ]
python
train
KelSolaar/Foundations
foundations/exceptions.py
https://github.com/KelSolaar/Foundations/blob/5c141330faf09dad70a12bc321f4c564917d0a91/foundations/exceptions.py#L171-L196
def extract_locals(trcback): """ Extracts the frames locals of given traceback. :param trcback: Traceback. :type trcback: Traceback :return: Frames locals. :rtype: list """ output = [] stack = extract_stack(get_inner_most_frame(trcback)) for frame, file_name, line_number, name, context, index in stack: args_names, nameless, keyword = extract_arguments(frame) arguments, nameless_args, keyword_args, locals = OrderedDict(), [], {}, {} for key, data in frame.f_locals.iteritems(): if key == nameless: nameless_args = map(repr, frame.f_locals.get(nameless, ())) elif key == keyword: keyword_args = dict((arg, repr(value)) for arg, value in frame.f_locals.get(keyword, {}).iteritems()) elif key in args_names: arguments[key] = repr(data) else: locals[key] = repr(data) output.append(((name, file_name, line_number), (arguments, nameless_args, keyword_args, locals))) return output
[ "def", "extract_locals", "(", "trcback", ")", ":", "output", "=", "[", "]", "stack", "=", "extract_stack", "(", "get_inner_most_frame", "(", "trcback", ")", ")", "for", "frame", ",", "file_name", ",", "line_number", ",", "name", ",", "context", ",", "index", "in", "stack", ":", "args_names", ",", "nameless", ",", "keyword", "=", "extract_arguments", "(", "frame", ")", "arguments", ",", "nameless_args", ",", "keyword_args", ",", "locals", "=", "OrderedDict", "(", ")", ",", "[", "]", ",", "{", "}", ",", "{", "}", "for", "key", ",", "data", "in", "frame", ".", "f_locals", ".", "iteritems", "(", ")", ":", "if", "key", "==", "nameless", ":", "nameless_args", "=", "map", "(", "repr", ",", "frame", ".", "f_locals", ".", "get", "(", "nameless", ",", "(", ")", ")", ")", "elif", "key", "==", "keyword", ":", "keyword_args", "=", "dict", "(", "(", "arg", ",", "repr", "(", "value", ")", ")", "for", "arg", ",", "value", "in", "frame", ".", "f_locals", ".", "get", "(", "keyword", ",", "{", "}", ")", ".", "iteritems", "(", ")", ")", "elif", "key", "in", "args_names", ":", "arguments", "[", "key", "]", "=", "repr", "(", "data", ")", "else", ":", "locals", "[", "key", "]", "=", "repr", "(", "data", ")", "output", ".", "append", "(", "(", "(", "name", ",", "file_name", ",", "line_number", ")", ",", "(", "arguments", ",", "nameless_args", ",", "keyword_args", ",", "locals", ")", ")", ")", "return", "output" ]
Extracts the frames locals of given traceback. :param trcback: Traceback. :type trcback: Traceback :return: Frames locals. :rtype: list
[ "Extracts", "the", "frames", "locals", "of", "given", "traceback", "." ]
python
train
Nachtfeuer/pipeline
spline/tools/report/generator.py
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/report/generator.py#L26-L45
def generate_html(store): """ Generating HTML report. Args: store (Store): report data. Returns: str: rendered HTML template. """ spline = { 'version': VERSION, 'url': 'https://github.com/Nachtfeuer/pipeline', 'generated': datetime.now().strftime("%A, %d. %B %Y - %I:%M:%S %p") } html_template_file = os.path.join(os.path.dirname(__file__), 'templates/report.html.j2') with open(html_template_file) as handle: html_template = handle.read() return render(html_template, spline=spline, store=store)
[ "def", "generate_html", "(", "store", ")", ":", "spline", "=", "{", "'version'", ":", "VERSION", ",", "'url'", ":", "'https://github.com/Nachtfeuer/pipeline'", ",", "'generated'", ":", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "\"%A, %d. %B %Y - %I:%M:%S %p\"", ")", "}", "html_template_file", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "'templates/report.html.j2'", ")", "with", "open", "(", "html_template_file", ")", "as", "handle", ":", "html_template", "=", "handle", ".", "read", "(", ")", "return", "render", "(", "html_template", ",", "spline", "=", "spline", ",", "store", "=", "store", ")" ]
Generating HTML report. Args: store (Store): report data. Returns: str: rendered HTML template.
[ "Generating", "HTML", "report", "." ]
python
train
rackerlabs/simpl
simpl/rest.py
https://github.com/rackerlabs/simpl/blob/60ed3336a931cd6a7a7246e60f26165d9dc7c99c/simpl/rest.py#L169-L175
def validate_range_values(request, label, kwargs): """Ensure value contained in label is a positive integer.""" value = kwargs.get(label, request.query.get(label)) if value: kwargs[label] = int(value) if kwargs[label] < 0 or kwargs[label] > MAX_PAGE_SIZE: raise ValueError
[ "def", "validate_range_values", "(", "request", ",", "label", ",", "kwargs", ")", ":", "value", "=", "kwargs", ".", "get", "(", "label", ",", "request", ".", "query", ".", "get", "(", "label", ")", ")", "if", "value", ":", "kwargs", "[", "label", "]", "=", "int", "(", "value", ")", "if", "kwargs", "[", "label", "]", "<", "0", "or", "kwargs", "[", "label", "]", ">", "MAX_PAGE_SIZE", ":", "raise", "ValueError" ]
Ensure value contained in label is a positive integer.
[ "Ensure", "value", "contained", "in", "label", "is", "a", "positive", "integer", "." ]
python
train
portantier/habu
habu/cli/cmd_hasher.py
https://github.com/portantier/habu/blob/87091e389dc6332fe1b82830c22b2eefc55816f2/habu/cli/cmd_hasher.py#L11-L43
def cmd_hasher(f, algorithm): """Compute various hashes for the input data, that can be a file or a stream. Example: \b $ habu.hasher README.rst md5 992a833cd162047daaa6a236b8ac15ae README.rst ripemd160 0566f9141e65e57cae93e0e3b70d1d8c2ccb0623 README.rst sha1 d7dbfd2c5e2828eb22f776550c826e4166526253 README.rst sha256 6bb22d927e1b6307ced616821a1877b6cc35e... README.rst sha512 8743f3eb12a11cf3edcc16e400fb14d599b4a... README.rst whirlpool 96bcc083242e796992c0f3462f330811f9e8c... README.rst You can also specify which algorithm to use. In such case, the output is only the value of the calculated hash: \b $ habu.hasher -a md5 README.rst 992a833cd162047daaa6a236b8ac15ae README.rst """ data = f.read() if not data: print("Empty file or string!") return 1 if algorithm: print(hasher(data, algorithm)[algorithm], f.name) else: for algo, result in hasher(data).items(): print("{:<12} {} {}".format(algo, result, f.name))
[ "def", "cmd_hasher", "(", "f", ",", "algorithm", ")", ":", "data", "=", "f", ".", "read", "(", ")", "if", "not", "data", ":", "print", "(", "\"Empty file or string!\"", ")", "return", "1", "if", "algorithm", ":", "print", "(", "hasher", "(", "data", ",", "algorithm", ")", "[", "algorithm", "]", ",", "f", ".", "name", ")", "else", ":", "for", "algo", ",", "result", "in", "hasher", "(", "data", ")", ".", "items", "(", ")", ":", "print", "(", "\"{:<12} {} {}\"", ".", "format", "(", "algo", ",", "result", ",", "f", ".", "name", ")", ")" ]
Compute various hashes for the input data, that can be a file or a stream. Example: \b $ habu.hasher README.rst md5 992a833cd162047daaa6a236b8ac15ae README.rst ripemd160 0566f9141e65e57cae93e0e3b70d1d8c2ccb0623 README.rst sha1 d7dbfd2c5e2828eb22f776550c826e4166526253 README.rst sha256 6bb22d927e1b6307ced616821a1877b6cc35e... README.rst sha512 8743f3eb12a11cf3edcc16e400fb14d599b4a... README.rst whirlpool 96bcc083242e796992c0f3462f330811f9e8c... README.rst You can also specify which algorithm to use. In such case, the output is only the value of the calculated hash: \b $ habu.hasher -a md5 README.rst 992a833cd162047daaa6a236b8ac15ae README.rst
[ "Compute", "various", "hashes", "for", "the", "input", "data", "that", "can", "be", "a", "file", "or", "a", "stream", "." ]
python
train
DLR-RM/RAFCON
source/rafcon/gui/controllers/graphical_editor_gaphas.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/controllers/graphical_editor_gaphas.py#L380-L627
def state_machine_change_after(self, model, prop_name, info): """Called on any change within th state machine This method is called, when any state, transition, data flow, etc. within the state machine changes. This then typically requires a redraw of the graphical editor, to display these changes immediately. :param rafcon.gui.models.state_machine.StateMachineModel model: The state machine model :param str prop_name: The property that was changed :param dict info: Information about the change """ if 'method_name' in info and info['method_name'] == 'root_state_change': method_name, model, result, arguments, instance = self._extract_info_data(info['kwargs']) if self.model.ongoing_complex_actions: return # The method causing the change raised an exception, thus nothing was changed if (isinstance(result, string_types) and "CRASH" in result) or isinstance(result, Exception): return # avoid to remove views of elements of states which parent state is destroyed recursively if 'remove' in method_name: # for remove the model is always a state and in case of remove_state it is the container_state # that performs the operation therefore if is_about_to_be_destroyed_recursively is False # the child state can be removed and for True ignored because its parent will create a notification if model.is_about_to_be_destroyed_recursively: return # only react to the notification if the model is a model, which has to be drawn # if it is a model inside a library state, this is eventually not the case if isinstance(model, AbstractStateModel): library_root_state = model.state.get_next_upper_library_root_state() if library_root_state: parent_library_root_state_m = self.model.get_state_model_by_path(library_root_state.get_path()) if not parent_library_root_state_m.parent.show_content(): return if method_name == 'state_execution_status': state_v = self.canvas.get_view_for_model(model) if state_v: # Children of LibraryStates are not modeled, yet self.canvas.request_update(state_v, matrix=False) elif method_name == 'add_state': new_state = arguments[1] new_state_m = model.states[new_state.state_id] self.add_state_view_with_meta_data_for_model(new_state_m, model) if not self.perform_drag_and_drop: self.canvas.wait_for_update() elif method_name == 'remove_state': state_v = self.canvas.get_view_for_core_element(result) if state_v: parent_v = self.canvas.get_parent(state_v) state_v.remove() if parent_v: self.canvas.request_update(parent_v) self.canvas.wait_for_update() # ---------------------------------- # TRANSITIONS # ---------------------------------- elif method_name == 'add_transition': transitions_models = model.transitions transition_id = result for transition_m in transitions_models: if transition_m.transition.transition_id == transition_id: self.add_transition_view_for_model(transition_m, model) self.canvas.wait_for_update() break elif method_name == 'remove_transition': transition_v = self.canvas.get_view_for_core_element(result) if transition_v: state_m = model state_v = self.canvas.get_view_for_model(state_m) transition_v.remove() self.canvas.request_update(state_v, matrix=False) self.canvas.wait_for_update() elif method_name == 'transition_change': transition_m = model transition_v = self.canvas.get_view_for_model(transition_m) self._reconnect_transition(transition_v, transition_m, transition_m.parent) self.canvas.wait_for_update() # ---------------------------------- # DATA FLOW # ---------------------------------- elif method_name == 'add_data_flow': data_flow_models = model.data_flows data_flow_id = result for data_flow_m in data_flow_models: if data_flow_m.data_flow.data_flow_id == data_flow_id: self.add_data_flow_view_for_model(data_flow_m, model) self.canvas.wait_for_update() break elif method_name == 'remove_data_flow': data_flow_v = self.canvas.get_view_for_core_element(result) if data_flow_v: state_m = model state_v = self.canvas.get_view_for_model(state_m) self.canvas.request_update(state_v, matrix=False) data_flow_v.remove() self.canvas.wait_for_update() elif method_name == 'data_flow_change': data_flow_m = model data_flow_v = self.canvas.get_view_for_model(data_flow_m) self._reconnect_data_flow(data_flow_v, data_flow_m, data_flow_m.parent) self.canvas.wait_for_update() # ---------------------------------- # OUTCOMES # ---------------------------------- elif method_name == 'add_outcome': state_m = model state_v = self.canvas.get_view_for_model(state_m) for outcome_m in state_m.outcomes: if outcome_m.outcome.outcome_id == result: state_v.add_outcome(outcome_m) self.canvas.request_update(state_v, matrix=False) self.canvas.wait_for_update() break elif method_name == 'remove_outcome': state_m = model state_v = self.canvas.get_view_for_model(state_m) if state_v is None: logger.debug("no state_v found for method_name '{}'".format(method_name)) else: outcome_v = self.canvas.get_view_for_core_element(result) if outcome_v: state_v.remove_outcome(outcome_v) self.canvas.request_update(state_v, matrix=False) self.canvas.wait_for_update() # ---------------------------------- # DATA PORTS # ---------------------------------- elif method_name == 'add_input_data_port': state_m = model state_v = self.canvas.get_view_for_model(state_m) for input_data_port_m in state_m.input_data_ports: if input_data_port_m.data_port.data_port_id == result: state_v.add_input_port(input_data_port_m) self.canvas.request_update(state_v, matrix=False) self.canvas.wait_for_update() break elif method_name == 'add_output_data_port': state_m = model state_v = self.canvas.get_view_for_model(state_m) for output_data_port_m in state_m.output_data_ports: if output_data_port_m.data_port.data_port_id == result: state_v.add_output_port(output_data_port_m) self.canvas.request_update(state_v, matrix=False) self.canvas.wait_for_update() break elif method_name == 'remove_input_data_port': state_m = model state_v = self.canvas.get_view_for_model(state_m) if state_v is None: logger.debug("no state_v found for method_name '{}'".format(method_name)) else: input_port_v = self.canvas.get_view_for_core_element(result) if input_port_v: state_v.remove_input_port(input_port_v) self.canvas.request_update(state_v, matrix=False) self.canvas.wait_for_update() elif method_name == 'remove_output_data_port': state_m = model state_v = self.canvas.get_view_for_model(state_m) if state_v is None: logger.debug("no state_v found for method_name '{}'".format(method_name)) else: output_port_v = self.canvas.get_view_for_core_element(result) if output_port_v: state_v.remove_output_port(output_port_v) self.canvas.request_update(state_v, matrix=False) self.canvas.wait_for_update() elif method_name in ['data_type', 'change_data_type']: pass elif method_name == 'default_value': pass # ---------------------------------- # SCOPED VARIABLES # ---------------------------------- elif method_name == 'add_scoped_variable': state_m = model state_v = self.canvas.get_view_for_model(state_m) for scoped_variable_m in state_m.scoped_variables: if scoped_variable_m.scoped_variable.data_port_id == result: state_v.add_scoped_variable(scoped_variable_m) self.canvas.request_update(state_v, matrix=False) self.canvas.wait_for_update() break elif method_name == 'remove_scoped_variable': state_m = model state_v = self.canvas.get_view_for_model(state_m) if state_v is None: logger.debug("no state_v found for method_name '{}'".format(method_name)) else: scoped_variable_v = self.canvas.get_view_for_core_element(result) if scoped_variable_v: state_v.remove_scoped_variable(scoped_variable_v) self.canvas.request_update(state_v, matrix=False) self.canvas.wait_for_update() # ---------------------------------- # STATE MISCELLANEOUS # ---------------------------------- elif method_name == 'name': # The name of a state was changed if not isinstance(model, AbstractStateModel): parent_model = model.parent # The name of a port (input, output, scoped var, outcome) was changed else: parent_model = model state_v = self.canvas.get_view_for_model(parent_model) if parent_model is model: state_v.name_view.name = arguments[1] self.canvas.request_update(state_v.name_view, matrix=False) else: self.canvas.request_update(state_v, matrix=False) self.canvas.wait_for_update() elif method_name == 'parent': pass elif method_name == 'description': pass elif method_name == 'script_text': pass # TODO handle the following method calls -> for now those are explicit (in the past implicit) ignored # TODO -> correct the complex actions which are used in some test (by test calls or by adapting the model) elif method_name in ['input_data_ports', 'output_data_ports', 'outcomes', 'change_root_state_type', 'change_state_type', 'group_states', 'ungroup_state', 'substitute_state']: pass else: known_ignore_list = ['set_input_runtime_value', 'set_use_input_runtime_value', # from library State 'set_output_runtime_value', 'set_use_output_runtime_value', 'input_data_port_runtime_values', 'use_runtime_value_input_data_ports', 'output_data_port_runtime_values', 'use_runtime_value_output_data_ports', 'semantic_data', 'add_semantic_data', 'remove_semantic_data', 'remove_income'] if method_name not in known_ignore_list: logger.warning("Method {0} not caught in GraphicalViewer, details: {1}".format(method_name, info)) if method_name in ['add_state', 'add_transition', 'add_data_flow', 'add_outcome', 'add_input_data_port', 'add_output_data_port', 'add_scoped_variable', 'data_flow_change', 'transition_change']: try: self._meta_data_changed(None, model, 'append_to_last_change', True) except Exception as e: logger.exception('Error while trying to emit meta data signal {0} {1}'.format(e, model))
[ "def", "state_machine_change_after", "(", "self", ",", "model", ",", "prop_name", ",", "info", ")", ":", "if", "'method_name'", "in", "info", "and", "info", "[", "'method_name'", "]", "==", "'root_state_change'", ":", "method_name", ",", "model", ",", "result", ",", "arguments", ",", "instance", "=", "self", ".", "_extract_info_data", "(", "info", "[", "'kwargs'", "]", ")", "if", "self", ".", "model", ".", "ongoing_complex_actions", ":", "return", "# The method causing the change raised an exception, thus nothing was changed", "if", "(", "isinstance", "(", "result", ",", "string_types", ")", "and", "\"CRASH\"", "in", "result", ")", "or", "isinstance", "(", "result", ",", "Exception", ")", ":", "return", "# avoid to remove views of elements of states which parent state is destroyed recursively", "if", "'remove'", "in", "method_name", ":", "# for remove the model is always a state and in case of remove_state it is the container_state", "# that performs the operation therefore if is_about_to_be_destroyed_recursively is False", "# the child state can be removed and for True ignored because its parent will create a notification", "if", "model", ".", "is_about_to_be_destroyed_recursively", ":", "return", "# only react to the notification if the model is a model, which has to be drawn", "# if it is a model inside a library state, this is eventually not the case", "if", "isinstance", "(", "model", ",", "AbstractStateModel", ")", ":", "library_root_state", "=", "model", ".", "state", ".", "get_next_upper_library_root_state", "(", ")", "if", "library_root_state", ":", "parent_library_root_state_m", "=", "self", ".", "model", ".", "get_state_model_by_path", "(", "library_root_state", ".", "get_path", "(", ")", ")", "if", "not", "parent_library_root_state_m", ".", "parent", ".", "show_content", "(", ")", ":", "return", "if", "method_name", "==", "'state_execution_status'", ":", "state_v", "=", "self", ".", "canvas", ".", "get_view_for_model", "(", "model", ")", "if", "state_v", ":", "# Children of LibraryStates are not modeled, yet", "self", ".", "canvas", ".", "request_update", "(", "state_v", ",", "matrix", "=", "False", ")", "elif", "method_name", "==", "'add_state'", ":", "new_state", "=", "arguments", "[", "1", "]", "new_state_m", "=", "model", ".", "states", "[", "new_state", ".", "state_id", "]", "self", ".", "add_state_view_with_meta_data_for_model", "(", "new_state_m", ",", "model", ")", "if", "not", "self", ".", "perform_drag_and_drop", ":", "self", ".", "canvas", ".", "wait_for_update", "(", ")", "elif", "method_name", "==", "'remove_state'", ":", "state_v", "=", "self", ".", "canvas", ".", "get_view_for_core_element", "(", "result", ")", "if", "state_v", ":", "parent_v", "=", "self", ".", "canvas", ".", "get_parent", "(", "state_v", ")", "state_v", ".", "remove", "(", ")", "if", "parent_v", ":", "self", ".", "canvas", ".", "request_update", "(", "parent_v", ")", "self", ".", "canvas", ".", "wait_for_update", "(", ")", "# ----------------------------------", "# TRANSITIONS", "# ----------------------------------", "elif", "method_name", "==", "'add_transition'", ":", "transitions_models", "=", "model", ".", "transitions", "transition_id", "=", "result", "for", "transition_m", "in", "transitions_models", ":", "if", "transition_m", ".", "transition", ".", "transition_id", "==", "transition_id", ":", "self", ".", "add_transition_view_for_model", "(", "transition_m", ",", "model", ")", "self", ".", "canvas", ".", "wait_for_update", "(", ")", "break", "elif", "method_name", "==", "'remove_transition'", ":", "transition_v", "=", "self", ".", "canvas", ".", "get_view_for_core_element", "(", "result", ")", "if", "transition_v", ":", "state_m", "=", "model", "state_v", "=", "self", ".", "canvas", ".", "get_view_for_model", "(", "state_m", ")", "transition_v", ".", "remove", "(", ")", "self", ".", "canvas", ".", "request_update", "(", "state_v", ",", "matrix", "=", "False", ")", "self", ".", "canvas", ".", "wait_for_update", "(", ")", "elif", "method_name", "==", "'transition_change'", ":", "transition_m", "=", "model", "transition_v", "=", "self", ".", "canvas", ".", "get_view_for_model", "(", "transition_m", ")", "self", ".", "_reconnect_transition", "(", "transition_v", ",", "transition_m", ",", "transition_m", ".", "parent", ")", "self", ".", "canvas", ".", "wait_for_update", "(", ")", "# ----------------------------------", "# DATA FLOW", "# ----------------------------------", "elif", "method_name", "==", "'add_data_flow'", ":", "data_flow_models", "=", "model", ".", "data_flows", "data_flow_id", "=", "result", "for", "data_flow_m", "in", "data_flow_models", ":", "if", "data_flow_m", ".", "data_flow", ".", "data_flow_id", "==", "data_flow_id", ":", "self", ".", "add_data_flow_view_for_model", "(", "data_flow_m", ",", "model", ")", "self", ".", "canvas", ".", "wait_for_update", "(", ")", "break", "elif", "method_name", "==", "'remove_data_flow'", ":", "data_flow_v", "=", "self", ".", "canvas", ".", "get_view_for_core_element", "(", "result", ")", "if", "data_flow_v", ":", "state_m", "=", "model", "state_v", "=", "self", ".", "canvas", ".", "get_view_for_model", "(", "state_m", ")", "self", ".", "canvas", ".", "request_update", "(", "state_v", ",", "matrix", "=", "False", ")", "data_flow_v", ".", "remove", "(", ")", "self", ".", "canvas", ".", "wait_for_update", "(", ")", "elif", "method_name", "==", "'data_flow_change'", ":", "data_flow_m", "=", "model", "data_flow_v", "=", "self", ".", "canvas", ".", "get_view_for_model", "(", "data_flow_m", ")", "self", ".", "_reconnect_data_flow", "(", "data_flow_v", ",", "data_flow_m", ",", "data_flow_m", ".", "parent", ")", "self", ".", "canvas", ".", "wait_for_update", "(", ")", "# ----------------------------------", "# OUTCOMES", "# ----------------------------------", "elif", "method_name", "==", "'add_outcome'", ":", "state_m", "=", "model", "state_v", "=", "self", ".", "canvas", ".", "get_view_for_model", "(", "state_m", ")", "for", "outcome_m", "in", "state_m", ".", "outcomes", ":", "if", "outcome_m", ".", "outcome", ".", "outcome_id", "==", "result", ":", "state_v", ".", "add_outcome", "(", "outcome_m", ")", "self", ".", "canvas", ".", "request_update", "(", "state_v", ",", "matrix", "=", "False", ")", "self", ".", "canvas", ".", "wait_for_update", "(", ")", "break", "elif", "method_name", "==", "'remove_outcome'", ":", "state_m", "=", "model", "state_v", "=", "self", ".", "canvas", ".", "get_view_for_model", "(", "state_m", ")", "if", "state_v", "is", "None", ":", "logger", ".", "debug", "(", "\"no state_v found for method_name '{}'\"", ".", "format", "(", "method_name", ")", ")", "else", ":", "outcome_v", "=", "self", ".", "canvas", ".", "get_view_for_core_element", "(", "result", ")", "if", "outcome_v", ":", "state_v", ".", "remove_outcome", "(", "outcome_v", ")", "self", ".", "canvas", ".", "request_update", "(", "state_v", ",", "matrix", "=", "False", ")", "self", ".", "canvas", ".", "wait_for_update", "(", ")", "# ----------------------------------", "# DATA PORTS", "# ----------------------------------", "elif", "method_name", "==", "'add_input_data_port'", ":", "state_m", "=", "model", "state_v", "=", "self", ".", "canvas", ".", "get_view_for_model", "(", "state_m", ")", "for", "input_data_port_m", "in", "state_m", ".", "input_data_ports", ":", "if", "input_data_port_m", ".", "data_port", ".", "data_port_id", "==", "result", ":", "state_v", ".", "add_input_port", "(", "input_data_port_m", ")", "self", ".", "canvas", ".", "request_update", "(", "state_v", ",", "matrix", "=", "False", ")", "self", ".", "canvas", ".", "wait_for_update", "(", ")", "break", "elif", "method_name", "==", "'add_output_data_port'", ":", "state_m", "=", "model", "state_v", "=", "self", ".", "canvas", ".", "get_view_for_model", "(", "state_m", ")", "for", "output_data_port_m", "in", "state_m", ".", "output_data_ports", ":", "if", "output_data_port_m", ".", "data_port", ".", "data_port_id", "==", "result", ":", "state_v", ".", "add_output_port", "(", "output_data_port_m", ")", "self", ".", "canvas", ".", "request_update", "(", "state_v", ",", "matrix", "=", "False", ")", "self", ".", "canvas", ".", "wait_for_update", "(", ")", "break", "elif", "method_name", "==", "'remove_input_data_port'", ":", "state_m", "=", "model", "state_v", "=", "self", ".", "canvas", ".", "get_view_for_model", "(", "state_m", ")", "if", "state_v", "is", "None", ":", "logger", ".", "debug", "(", "\"no state_v found for method_name '{}'\"", ".", "format", "(", "method_name", ")", ")", "else", ":", "input_port_v", "=", "self", ".", "canvas", ".", "get_view_for_core_element", "(", "result", ")", "if", "input_port_v", ":", "state_v", ".", "remove_input_port", "(", "input_port_v", ")", "self", ".", "canvas", ".", "request_update", "(", "state_v", ",", "matrix", "=", "False", ")", "self", ".", "canvas", ".", "wait_for_update", "(", ")", "elif", "method_name", "==", "'remove_output_data_port'", ":", "state_m", "=", "model", "state_v", "=", "self", ".", "canvas", ".", "get_view_for_model", "(", "state_m", ")", "if", "state_v", "is", "None", ":", "logger", ".", "debug", "(", "\"no state_v found for method_name '{}'\"", ".", "format", "(", "method_name", ")", ")", "else", ":", "output_port_v", "=", "self", ".", "canvas", ".", "get_view_for_core_element", "(", "result", ")", "if", "output_port_v", ":", "state_v", ".", "remove_output_port", "(", "output_port_v", ")", "self", ".", "canvas", ".", "request_update", "(", "state_v", ",", "matrix", "=", "False", ")", "self", ".", "canvas", ".", "wait_for_update", "(", ")", "elif", "method_name", "in", "[", "'data_type'", ",", "'change_data_type'", "]", ":", "pass", "elif", "method_name", "==", "'default_value'", ":", "pass", "# ----------------------------------", "# SCOPED VARIABLES", "# ----------------------------------", "elif", "method_name", "==", "'add_scoped_variable'", ":", "state_m", "=", "model", "state_v", "=", "self", ".", "canvas", ".", "get_view_for_model", "(", "state_m", ")", "for", "scoped_variable_m", "in", "state_m", ".", "scoped_variables", ":", "if", "scoped_variable_m", ".", "scoped_variable", ".", "data_port_id", "==", "result", ":", "state_v", ".", "add_scoped_variable", "(", "scoped_variable_m", ")", "self", ".", "canvas", ".", "request_update", "(", "state_v", ",", "matrix", "=", "False", ")", "self", ".", "canvas", ".", "wait_for_update", "(", ")", "break", "elif", "method_name", "==", "'remove_scoped_variable'", ":", "state_m", "=", "model", "state_v", "=", "self", ".", "canvas", ".", "get_view_for_model", "(", "state_m", ")", "if", "state_v", "is", "None", ":", "logger", ".", "debug", "(", "\"no state_v found for method_name '{}'\"", ".", "format", "(", "method_name", ")", ")", "else", ":", "scoped_variable_v", "=", "self", ".", "canvas", ".", "get_view_for_core_element", "(", "result", ")", "if", "scoped_variable_v", ":", "state_v", ".", "remove_scoped_variable", "(", "scoped_variable_v", ")", "self", ".", "canvas", ".", "request_update", "(", "state_v", ",", "matrix", "=", "False", ")", "self", ".", "canvas", ".", "wait_for_update", "(", ")", "# ----------------------------------", "# STATE MISCELLANEOUS", "# ----------------------------------", "elif", "method_name", "==", "'name'", ":", "# The name of a state was changed", "if", "not", "isinstance", "(", "model", ",", "AbstractStateModel", ")", ":", "parent_model", "=", "model", ".", "parent", "# The name of a port (input, output, scoped var, outcome) was changed", "else", ":", "parent_model", "=", "model", "state_v", "=", "self", ".", "canvas", ".", "get_view_for_model", "(", "parent_model", ")", "if", "parent_model", "is", "model", ":", "state_v", ".", "name_view", ".", "name", "=", "arguments", "[", "1", "]", "self", ".", "canvas", ".", "request_update", "(", "state_v", ".", "name_view", ",", "matrix", "=", "False", ")", "else", ":", "self", ".", "canvas", ".", "request_update", "(", "state_v", ",", "matrix", "=", "False", ")", "self", ".", "canvas", ".", "wait_for_update", "(", ")", "elif", "method_name", "==", "'parent'", ":", "pass", "elif", "method_name", "==", "'description'", ":", "pass", "elif", "method_name", "==", "'script_text'", ":", "pass", "# TODO handle the following method calls -> for now those are explicit (in the past implicit) ignored", "# TODO -> correct the complex actions which are used in some test (by test calls or by adapting the model)", "elif", "method_name", "in", "[", "'input_data_ports'", ",", "'output_data_ports'", ",", "'outcomes'", ",", "'change_root_state_type'", ",", "'change_state_type'", ",", "'group_states'", ",", "'ungroup_state'", ",", "'substitute_state'", "]", ":", "pass", "else", ":", "known_ignore_list", "=", "[", "'set_input_runtime_value'", ",", "'set_use_input_runtime_value'", ",", "# from library State", "'set_output_runtime_value'", ",", "'set_use_output_runtime_value'", ",", "'input_data_port_runtime_values'", ",", "'use_runtime_value_input_data_ports'", ",", "'output_data_port_runtime_values'", ",", "'use_runtime_value_output_data_ports'", ",", "'semantic_data'", ",", "'add_semantic_data'", ",", "'remove_semantic_data'", ",", "'remove_income'", "]", "if", "method_name", "not", "in", "known_ignore_list", ":", "logger", ".", "warning", "(", "\"Method {0} not caught in GraphicalViewer, details: {1}\"", ".", "format", "(", "method_name", ",", "info", ")", ")", "if", "method_name", "in", "[", "'add_state'", ",", "'add_transition'", ",", "'add_data_flow'", ",", "'add_outcome'", ",", "'add_input_data_port'", ",", "'add_output_data_port'", ",", "'add_scoped_variable'", ",", "'data_flow_change'", ",", "'transition_change'", "]", ":", "try", ":", "self", ".", "_meta_data_changed", "(", "None", ",", "model", ",", "'append_to_last_change'", ",", "True", ")", "except", "Exception", "as", "e", ":", "logger", ".", "exception", "(", "'Error while trying to emit meta data signal {0} {1}'", ".", "format", "(", "e", ",", "model", ")", ")" ]
Called on any change within th state machine This method is called, when any state, transition, data flow, etc. within the state machine changes. This then typically requires a redraw of the graphical editor, to display these changes immediately. :param rafcon.gui.models.state_machine.StateMachineModel model: The state machine model :param str prop_name: The property that was changed :param dict info: Information about the change
[ "Called", "on", "any", "change", "within", "th", "state", "machine" ]
python
train
eyeseast/propublica-congress
congress/members.py
https://github.com/eyeseast/propublica-congress/blob/03e519341063c5703080b4723112f1831816c77e/congress/members.py#L51-L60
def compare(self, first, second, chamber, type='votes', congress=CURRENT_CONGRESS): """ See how often two members voted together in a given Congress. Takes two member IDs, a chamber and a Congress number. """ check_chamber(chamber) path = "members/{first}/{type}/{second}/{congress}/{chamber}.json" path = path.format(first=first, second=second, type=type, congress=congress, chamber=chamber) return self.fetch(path)
[ "def", "compare", "(", "self", ",", "first", ",", "second", ",", "chamber", ",", "type", "=", "'votes'", ",", "congress", "=", "CURRENT_CONGRESS", ")", ":", "check_chamber", "(", "chamber", ")", "path", "=", "\"members/{first}/{type}/{second}/{congress}/{chamber}.json\"", "path", "=", "path", ".", "format", "(", "first", "=", "first", ",", "second", "=", "second", ",", "type", "=", "type", ",", "congress", "=", "congress", ",", "chamber", "=", "chamber", ")", "return", "self", ".", "fetch", "(", "path", ")" ]
See how often two members voted together in a given Congress. Takes two member IDs, a chamber and a Congress number.
[ "See", "how", "often", "two", "members", "voted", "together", "in", "a", "given", "Congress", ".", "Takes", "two", "member", "IDs", "a", "chamber", "and", "a", "Congress", "number", "." ]
python
train
MillionIntegrals/vel
vel/rl/algo/policy_gradient/trpo.py
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/algo/policy_gradient/trpo.py#L263-L272
def create(max_kl, cg_iters, line_search_iters, cg_damping, entropy_coef, vf_iters, discount_factor, gae_lambda=1.0, improvement_acceptance_ratio=0.1, max_grad_norm=0.5): """ Vel factory function """ return TrpoPolicyGradient( max_kl, int(cg_iters), int(line_search_iters), cg_damping, entropy_coef, vf_iters, discount_factor=discount_factor, gae_lambda=gae_lambda, improvement_acceptance_ratio=improvement_acceptance_ratio, max_grad_norm=max_grad_norm )
[ "def", "create", "(", "max_kl", ",", "cg_iters", ",", "line_search_iters", ",", "cg_damping", ",", "entropy_coef", ",", "vf_iters", ",", "discount_factor", ",", "gae_lambda", "=", "1.0", ",", "improvement_acceptance_ratio", "=", "0.1", ",", "max_grad_norm", "=", "0.5", ")", ":", "return", "TrpoPolicyGradient", "(", "max_kl", ",", "int", "(", "cg_iters", ")", ",", "int", "(", "line_search_iters", ")", ",", "cg_damping", ",", "entropy_coef", ",", "vf_iters", ",", "discount_factor", "=", "discount_factor", ",", "gae_lambda", "=", "gae_lambda", ",", "improvement_acceptance_ratio", "=", "improvement_acceptance_ratio", ",", "max_grad_norm", "=", "max_grad_norm", ")" ]
Vel factory function
[ "Vel", "factory", "function" ]
python
train
dereneaton/ipyrad
ipyrad/analysis/tetrad2.py
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/tetrad2.py#L1651-L1689
def find_clades(trees, names): """ A subfunc of consensus_tree(). Traverses trees to count clade occurrences. Names are ordered by names, else they are in the order of the first tree. """ ## index names from the first tree if not names: names = trees[0].get_leaf_names() ndict = {j:i for i, j in enumerate(names)} namedict = {i:j for i, j in enumerate(names)} ## store counts clade_counts = defaultdict(int) ## count as bitarray clades in each tree for tree in trees: tree.unroot() for node in tree.traverse('postorder'): #bits = bitarray('0'*len(tree)) bits = np.zeros(len(tree), dtype=np.bool_) for child in node.iter_leaf_names(): bits[ndict[child]] = True ## if parent is root then mirror flip one child (where bit[0]=0) # if not node.is_root(): # if node.up.is_root(): # if bits[0]: # bits.invert() bitstring = "".join([np.binary_repr(i) for i in bits]) clade_counts[bitstring] += 1 ## convert to freq for key, val in clade_counts.items(): clade_counts[key] = val / float(len(trees)) ## return in sorted order clade_counts = sorted(clade_counts.items(), key=lambda x: x[1], reverse=True) return namedict, clade_counts
[ "def", "find_clades", "(", "trees", ",", "names", ")", ":", "## index names from the first tree", "if", "not", "names", ":", "names", "=", "trees", "[", "0", "]", ".", "get_leaf_names", "(", ")", "ndict", "=", "{", "j", ":", "i", "for", "i", ",", "j", "in", "enumerate", "(", "names", ")", "}", "namedict", "=", "{", "i", ":", "j", "for", "i", ",", "j", "in", "enumerate", "(", "names", ")", "}", "## store counts", "clade_counts", "=", "defaultdict", "(", "int", ")", "## count as bitarray clades in each tree", "for", "tree", "in", "trees", ":", "tree", ".", "unroot", "(", ")", "for", "node", "in", "tree", ".", "traverse", "(", "'postorder'", ")", ":", "#bits = bitarray('0'*len(tree))", "bits", "=", "np", ".", "zeros", "(", "len", "(", "tree", ")", ",", "dtype", "=", "np", ".", "bool_", ")", "for", "child", "in", "node", ".", "iter_leaf_names", "(", ")", ":", "bits", "[", "ndict", "[", "child", "]", "]", "=", "True", "## if parent is root then mirror flip one child (where bit[0]=0)", "# if not node.is_root():", "# if node.up.is_root():", "# if bits[0]:", "# bits.invert()", "bitstring", "=", "\"\"", ".", "join", "(", "[", "np", ".", "binary_repr", "(", "i", ")", "for", "i", "in", "bits", "]", ")", "clade_counts", "[", "bitstring", "]", "+=", "1", "## convert to freq", "for", "key", ",", "val", "in", "clade_counts", ".", "items", "(", ")", ":", "clade_counts", "[", "key", "]", "=", "val", "/", "float", "(", "len", "(", "trees", ")", ")", "## return in sorted order", "clade_counts", "=", "sorted", "(", "clade_counts", ".", "items", "(", ")", ",", "key", "=", "lambda", "x", ":", "x", "[", "1", "]", ",", "reverse", "=", "True", ")", "return", "namedict", ",", "clade_counts" ]
A subfunc of consensus_tree(). Traverses trees to count clade occurrences. Names are ordered by names, else they are in the order of the first tree.
[ "A", "subfunc", "of", "consensus_tree", "()", ".", "Traverses", "trees", "to", "count", "clade", "occurrences", ".", "Names", "are", "ordered", "by", "names", "else", "they", "are", "in", "the", "order", "of", "the", "first", "tree", "." ]
python
valid
josiah-wolf-oberholtzer/uqbar
uqbar/sphinx/api.py
https://github.com/josiah-wolf-oberholtzer/uqbar/blob/eca7fefebbbee1e2ae13bf5d6baa838be66b1db6/uqbar/sphinx/api.py#L116-L134
def setup(app) -> Dict[str, Any]: """ Sets up Sphinx extension. """ app.add_config_value("uqbar_api_directory_name", "api", "env") app.add_config_value("uqbar_api_document_empty_modules", False, "env") app.add_config_value("uqbar_api_document_private_members", False, "env") app.add_config_value("uqbar_api_document_private_modules", False, "env") app.add_config_value("uqbar_api_member_documenter_classes", None, "env") app.add_config_value("uqbar_api_module_documenter_class", None, "env") app.add_config_value("uqbar_api_root_documenter_class", None, "env") app.add_config_value("uqbar_api_source_paths", None, "env") app.add_config_value("uqbar_api_title", "API", "html") app.connect("builder-inited", on_builder_inited) return { "version": uqbar.__version__, "parallel_read_safe": True, "parallel_write_safe": True, }
[ "def", "setup", "(", "app", ")", "->", "Dict", "[", "str", ",", "Any", "]", ":", "app", ".", "add_config_value", "(", "\"uqbar_api_directory_name\"", ",", "\"api\"", ",", "\"env\"", ")", "app", ".", "add_config_value", "(", "\"uqbar_api_document_empty_modules\"", ",", "False", ",", "\"env\"", ")", "app", ".", "add_config_value", "(", "\"uqbar_api_document_private_members\"", ",", "False", ",", "\"env\"", ")", "app", ".", "add_config_value", "(", "\"uqbar_api_document_private_modules\"", ",", "False", ",", "\"env\"", ")", "app", ".", "add_config_value", "(", "\"uqbar_api_member_documenter_classes\"", ",", "None", ",", "\"env\"", ")", "app", ".", "add_config_value", "(", "\"uqbar_api_module_documenter_class\"", ",", "None", ",", "\"env\"", ")", "app", ".", "add_config_value", "(", "\"uqbar_api_root_documenter_class\"", ",", "None", ",", "\"env\"", ")", "app", ".", "add_config_value", "(", "\"uqbar_api_source_paths\"", ",", "None", ",", "\"env\"", ")", "app", ".", "add_config_value", "(", "\"uqbar_api_title\"", ",", "\"API\"", ",", "\"html\"", ")", "app", ".", "connect", "(", "\"builder-inited\"", ",", "on_builder_inited", ")", "return", "{", "\"version\"", ":", "uqbar", ".", "__version__", ",", "\"parallel_read_safe\"", ":", "True", ",", "\"parallel_write_safe\"", ":", "True", ",", "}" ]
Sets up Sphinx extension.
[ "Sets", "up", "Sphinx", "extension", "." ]
python
train
SKA-ScienceDataProcessor/integration-prototype
sip/tango_control/tango_subarray/app/subarray_device.py
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/tango_control/tango_subarray/app/subarray_device.py#L16-L20
def init_device(self): """Initialise the device.""" Device.init_device(self) time.sleep(0.1) self.set_state(DevState.STANDBY)
[ "def", "init_device", "(", "self", ")", ":", "Device", ".", "init_device", "(", "self", ")", "time", ".", "sleep", "(", "0.1", ")", "self", ".", "set_state", "(", "DevState", ".", "STANDBY", ")" ]
Initialise the device.
[ "Initialise", "the", "device", "." ]
python
train
zetaops/zengine
zengine/messaging/views.py
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/messaging/views.py#L794-L822
def edit_message(current): """ Edit a message a user own. .. code-block:: python # request: { 'view':'_zops_edit_message', 'message': { 'body': string, # message text 'key': key } } # response: { 'status': string, # 'OK' for success 'code': int, # 200 for success } """ current.output = {'status': 'OK', 'code': 200} in_msg = current.input['message'] try: msg = Message(current).objects.get(sender_id=current.user_id, key=in_msg['key']) msg.body = in_msg['body'] msg.save() except ObjectDoesNotExist: raise HTTPError(404, "")
[ "def", "edit_message", "(", "current", ")", ":", "current", ".", "output", "=", "{", "'status'", ":", "'OK'", ",", "'code'", ":", "200", "}", "in_msg", "=", "current", ".", "input", "[", "'message'", "]", "try", ":", "msg", "=", "Message", "(", "current", ")", ".", "objects", ".", "get", "(", "sender_id", "=", "current", ".", "user_id", ",", "key", "=", "in_msg", "[", "'key'", "]", ")", "msg", ".", "body", "=", "in_msg", "[", "'body'", "]", "msg", ".", "save", "(", ")", "except", "ObjectDoesNotExist", ":", "raise", "HTTPError", "(", "404", ",", "\"\"", ")" ]
Edit a message a user own. .. code-block:: python # request: { 'view':'_zops_edit_message', 'message': { 'body': string, # message text 'key': key } } # response: { 'status': string, # 'OK' for success 'code': int, # 200 for success }
[ "Edit", "a", "message", "a", "user", "own", "." ]
python
train
Atomistica/atomistica
src/python/atomistica/mdcore_io.py
https://github.com/Atomistica/atomistica/blob/5ed79d776c92b91a566be22615bfb304ecc75db7/src/python/atomistica/mdcore_io.py#L164-L190
def read_cyc(this, fn, conv=1.0): """ Read the lattice information from a cyc.dat file (i.e., tblmd input file) """ f = paropen(fn, "r") f.readline() f.readline() f.readline() f.readline() cell = np.array( [ [ 0.0, 0.0, 0.0 ], [ 0.0, 0.0, 0.0 ], [ 0.0, 0.0, 0.0 ] ] ) l = f.readline() s = map(float, l.split()) cell[0, 0] = s[0]*conv cell[1, 0] = s[1]*conv cell[2, 0] = s[2]*conv l = f.readline() s = map(float, l.split()) cell[0, 1] = s[0]*conv cell[1, 1] = s[1]*conv cell[2, 1] = s[2]*conv l = f.readline() s = map(float, l.split()) cell[0, 2] = s[0]*conv cell[1, 2] = s[1]*conv cell[2, 2] = s[2]*conv this.set_cell(cell) this.set_pbc(True) f.close()
[ "def", "read_cyc", "(", "this", ",", "fn", ",", "conv", "=", "1.0", ")", ":", "f", "=", "paropen", "(", "fn", ",", "\"r\"", ")", "f", ".", "readline", "(", ")", "f", ".", "readline", "(", ")", "f", ".", "readline", "(", ")", "f", ".", "readline", "(", ")", "cell", "=", "np", ".", "array", "(", "[", "[", "0.0", ",", "0.0", ",", "0.0", "]", ",", "[", "0.0", ",", "0.0", ",", "0.0", "]", ",", "[", "0.0", ",", "0.0", ",", "0.0", "]", "]", ")", "l", "=", "f", ".", "readline", "(", ")", "s", "=", "map", "(", "float", ",", "l", ".", "split", "(", ")", ")", "cell", "[", "0", ",", "0", "]", "=", "s", "[", "0", "]", "*", "conv", "cell", "[", "1", ",", "0", "]", "=", "s", "[", "1", "]", "*", "conv", "cell", "[", "2", ",", "0", "]", "=", "s", "[", "2", "]", "*", "conv", "l", "=", "f", ".", "readline", "(", ")", "s", "=", "map", "(", "float", ",", "l", ".", "split", "(", ")", ")", "cell", "[", "0", ",", "1", "]", "=", "s", "[", "0", "]", "*", "conv", "cell", "[", "1", ",", "1", "]", "=", "s", "[", "1", "]", "*", "conv", "cell", "[", "2", ",", "1", "]", "=", "s", "[", "2", "]", "*", "conv", "l", "=", "f", ".", "readline", "(", ")", "s", "=", "map", "(", "float", ",", "l", ".", "split", "(", ")", ")", "cell", "[", "0", ",", "2", "]", "=", "s", "[", "0", "]", "*", "conv", "cell", "[", "1", ",", "2", "]", "=", "s", "[", "1", "]", "*", "conv", "cell", "[", "2", ",", "2", "]", "=", "s", "[", "2", "]", "*", "conv", "this", ".", "set_cell", "(", "cell", ")", "this", ".", "set_pbc", "(", "True", ")", "f", ".", "close", "(", ")" ]
Read the lattice information from a cyc.dat file (i.e., tblmd input file)
[ "Read", "the", "lattice", "information", "from", "a", "cyc", ".", "dat", "file", "(", "i", ".", "e", ".", "tblmd", "input", "file", ")" ]
python
train
aouyar/PyMunin
pymunin/plugins/memcachedstats.py
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/memcachedstats.py#L305-L440
def retrieveVals(self): """Retrieve values for graphs.""" if self._stats is None: serverInfo = MemcachedInfo(self._host, self._port, self._socket_file) stats = serverInfo.getStats() else: stats = self._stats if stats is None: raise Exception("Undetermined error accesing stats.") stats['set_hits'] = stats.get('total_items') if stats.has_key('cmd_set') and stats.has_key('total_items'): stats['set_misses'] = stats['cmd_set'] - stats['total_items'] self.saveState(stats) if self.hasGraph('memcached_connections'): self.setGraphVal('memcached_connections', 'conn', stats.get('curr_connections')) if self.hasGraph('memcached_items'): self.setGraphVal('memcached_items', 'items', stats.get('curr_items')) if self.hasGraph('memcached_memory'): self.setGraphVal('memcached_memory', 'bytes', stats.get('bytes')) if self.hasGraph('memcached_connrate'): self.setGraphVal('memcached_connrate', 'conn', stats.get('total_connections')) if self.hasGraph('memcached_traffic'): self.setGraphVal('memcached_traffic', 'rxbytes', stats.get('bytes_read')) self.setGraphVal('memcached_traffic', 'txbytes', stats.get('bytes_written')) if self.hasGraph('memcached_reqrate'): self.setGraphVal('memcached_reqrate', 'set', stats.get('cmd_set')) self.setGraphVal('memcached_reqrate', 'get', stats.get('cmd_get')) if self.graphHasField('memcached_reqrate', 'del'): self.setGraphVal('memcached_reqrate', 'del', safe_sum([stats.get('delete_hits'), stats.get('delete_misses')])) if self.graphHasField('memcached_reqrate', 'cas'): self.setGraphVal('memcached_reqrate', 'cas', safe_sum([stats.get('cas_hits'), stats.get('cas_misses'), stats.get('cas_badval')])) if self.graphHasField('memcached_reqrate', 'incr'): self.setGraphVal('memcached_reqrate', 'incr', safe_sum([stats.get('incr_hits'), stats.get('incr_misses')])) if self.graphHasField('memcached_reqrate', 'decr'): self.setGraphVal('memcached_reqrate', 'decr', safe_sum([stats.get('decr_hits'), stats.get('decr_misses')])) if self.hasGraph('memcached_statget'): self.setGraphVal('memcached_statget', 'hit', stats.get('get_hits')) self.setGraphVal('memcached_statget', 'miss', stats.get('get_misses')) self.setGraphVal('memcached_statget', 'total', safe_sum([stats.get('get_hits'), stats.get('get_misses')])) if self.hasGraph('memcached_statset'): self.setGraphVal('memcached_statset', 'hit', stats.get('set_hits')) self.setGraphVal('memcached_statset', 'miss', stats.get('set_misses')) self.setGraphVal('memcached_statset', 'total', safe_sum([stats.get('set_hits'), stats.get('set_misses')])) if self.hasGraph('memcached_statdel'): self.setGraphVal('memcached_statdel', 'hit', stats.get('delete_hits')) self.setGraphVal('memcached_statdel', 'miss', stats.get('delete_misses')) self.setGraphVal('memcached_statdel', 'total', safe_sum([stats.get('delete_hits'), stats.get('delete_misses')])) if self.hasGraph('memcached_statcas'): self.setGraphVal('memcached_statcas', 'hit', stats.get('cas_hits')) self.setGraphVal('memcached_statcas', 'miss', stats.get('cas_misses')) self.setGraphVal('memcached_statcas', 'badval', stats.get('cas_badval')) self.setGraphVal('memcached_statcas', 'total', safe_sum([stats.get('cas_hits'), stats.get('cas_misses'), stats.get('cas_badval')])) if self.hasGraph('memcached_statincrdecr'): self.setGraphVal('memcached_statincrdecr', 'incr_hit', stats.get('incr_hits')) self.setGraphVal('memcached_statincrdecr', 'decr_hit', stats.get('decr_hits')) self.setGraphVal('memcached_statincrdecr', 'incr_miss', stats.get('incr_misses')) self.setGraphVal('memcached_statincrdecr', 'decr_miss', stats.get('decr_misses')) self.setGraphVal('memcached_statincrdecr', 'total', safe_sum([stats.get('incr_hits'), stats.get('decr_hits'), stats.get('incr_misses'), stats.get('decr_misses')])) if self.hasGraph('memcached_statevict'): self.setGraphVal('memcached_statevict', 'evict', stats.get('evictions')) if self.graphHasField('memcached_statevict', 'reclaim'): self.setGraphVal('memcached_statevict', 'reclaim', stats.get('reclaimed')) if self.hasGraph('memcached_statauth'): self.setGraphVal('memcached_statauth', 'reqs', stats.get('auth_cmds')) self.setGraphVal('memcached_statauth', 'errors', stats.get('auth_errors')) if self.hasGraph('memcached_hitpct'): prev_stats = self._prev_stats for (field_name, field_hits, field_misses) in ( ('set', 'set_hits', 'set_misses'), ('get', 'get_hits', 'get_misses'), ('del', 'delete_hits', 'delete_misses'), ('cas', 'cas_hits', 'cas_misses'), ('incr', 'incr_hits', 'incr_misses'), ('decr', 'decr_hits', 'decr_misses') ): if prev_stats: if (stats.has_key(field_hits) and prev_stats.has_key(field_hits) and stats.has_key(field_misses) and prev_stats.has_key(field_misses)): hits = stats[field_hits] - prev_stats[field_hits] misses = stats[field_misses] - prev_stats[field_misses] total = hits + misses if total > 0: val = 100.0 * hits / total else: val = 0 self.setGraphVal('memcached_hitpct', field_name, round(val, 2))
[ "def", "retrieveVals", "(", "self", ")", ":", "if", "self", ".", "_stats", "is", "None", ":", "serverInfo", "=", "MemcachedInfo", "(", "self", ".", "_host", ",", "self", ".", "_port", ",", "self", ".", "_socket_file", ")", "stats", "=", "serverInfo", ".", "getStats", "(", ")", "else", ":", "stats", "=", "self", ".", "_stats", "if", "stats", "is", "None", ":", "raise", "Exception", "(", "\"Undetermined error accesing stats.\"", ")", "stats", "[", "'set_hits'", "]", "=", "stats", ".", "get", "(", "'total_items'", ")", "if", "stats", ".", "has_key", "(", "'cmd_set'", ")", "and", "stats", ".", "has_key", "(", "'total_items'", ")", ":", "stats", "[", "'set_misses'", "]", "=", "stats", "[", "'cmd_set'", "]", "-", "stats", "[", "'total_items'", "]", "self", ".", "saveState", "(", "stats", ")", "if", "self", ".", "hasGraph", "(", "'memcached_connections'", ")", ":", "self", ".", "setGraphVal", "(", "'memcached_connections'", ",", "'conn'", ",", "stats", ".", "get", "(", "'curr_connections'", ")", ")", "if", "self", ".", "hasGraph", "(", "'memcached_items'", ")", ":", "self", ".", "setGraphVal", "(", "'memcached_items'", ",", "'items'", ",", "stats", ".", "get", "(", "'curr_items'", ")", ")", "if", "self", ".", "hasGraph", "(", "'memcached_memory'", ")", ":", "self", ".", "setGraphVal", "(", "'memcached_memory'", ",", "'bytes'", ",", "stats", ".", "get", "(", "'bytes'", ")", ")", "if", "self", ".", "hasGraph", "(", "'memcached_connrate'", ")", ":", "self", ".", "setGraphVal", "(", "'memcached_connrate'", ",", "'conn'", ",", "stats", ".", "get", "(", "'total_connections'", ")", ")", "if", "self", ".", "hasGraph", "(", "'memcached_traffic'", ")", ":", "self", ".", "setGraphVal", "(", "'memcached_traffic'", ",", "'rxbytes'", ",", "stats", ".", "get", "(", "'bytes_read'", ")", ")", "self", ".", "setGraphVal", "(", "'memcached_traffic'", ",", "'txbytes'", ",", "stats", ".", "get", "(", "'bytes_written'", ")", ")", "if", "self", ".", "hasGraph", "(", "'memcached_reqrate'", ")", ":", "self", ".", "setGraphVal", "(", "'memcached_reqrate'", ",", "'set'", ",", "stats", ".", "get", "(", "'cmd_set'", ")", ")", "self", ".", "setGraphVal", "(", "'memcached_reqrate'", ",", "'get'", ",", "stats", ".", "get", "(", "'cmd_get'", ")", ")", "if", "self", ".", "graphHasField", "(", "'memcached_reqrate'", ",", "'del'", ")", ":", "self", ".", "setGraphVal", "(", "'memcached_reqrate'", ",", "'del'", ",", "safe_sum", "(", "[", "stats", ".", "get", "(", "'delete_hits'", ")", ",", "stats", ".", "get", "(", "'delete_misses'", ")", "]", ")", ")", "if", "self", ".", "graphHasField", "(", "'memcached_reqrate'", ",", "'cas'", ")", ":", "self", ".", "setGraphVal", "(", "'memcached_reqrate'", ",", "'cas'", ",", "safe_sum", "(", "[", "stats", ".", "get", "(", "'cas_hits'", ")", ",", "stats", ".", "get", "(", "'cas_misses'", ")", ",", "stats", ".", "get", "(", "'cas_badval'", ")", "]", ")", ")", "if", "self", ".", "graphHasField", "(", "'memcached_reqrate'", ",", "'incr'", ")", ":", "self", ".", "setGraphVal", "(", "'memcached_reqrate'", ",", "'incr'", ",", "safe_sum", "(", "[", "stats", ".", "get", "(", "'incr_hits'", ")", ",", "stats", ".", "get", "(", "'incr_misses'", ")", "]", ")", ")", "if", "self", ".", "graphHasField", "(", "'memcached_reqrate'", ",", "'decr'", ")", ":", "self", ".", "setGraphVal", "(", "'memcached_reqrate'", ",", "'decr'", ",", "safe_sum", "(", "[", "stats", ".", "get", "(", "'decr_hits'", ")", ",", "stats", ".", "get", "(", "'decr_misses'", ")", "]", ")", ")", "if", "self", ".", "hasGraph", "(", "'memcached_statget'", ")", ":", "self", ".", "setGraphVal", "(", "'memcached_statget'", ",", "'hit'", ",", "stats", ".", "get", "(", "'get_hits'", ")", ")", "self", ".", "setGraphVal", "(", "'memcached_statget'", ",", "'miss'", ",", "stats", ".", "get", "(", "'get_misses'", ")", ")", "self", ".", "setGraphVal", "(", "'memcached_statget'", ",", "'total'", ",", "safe_sum", "(", "[", "stats", ".", "get", "(", "'get_hits'", ")", ",", "stats", ".", "get", "(", "'get_misses'", ")", "]", ")", ")", "if", "self", ".", "hasGraph", "(", "'memcached_statset'", ")", ":", "self", ".", "setGraphVal", "(", "'memcached_statset'", ",", "'hit'", ",", "stats", ".", "get", "(", "'set_hits'", ")", ")", "self", ".", "setGraphVal", "(", "'memcached_statset'", ",", "'miss'", ",", "stats", ".", "get", "(", "'set_misses'", ")", ")", "self", ".", "setGraphVal", "(", "'memcached_statset'", ",", "'total'", ",", "safe_sum", "(", "[", "stats", ".", "get", "(", "'set_hits'", ")", ",", "stats", ".", "get", "(", "'set_misses'", ")", "]", ")", ")", "if", "self", ".", "hasGraph", "(", "'memcached_statdel'", ")", ":", "self", ".", "setGraphVal", "(", "'memcached_statdel'", ",", "'hit'", ",", "stats", ".", "get", "(", "'delete_hits'", ")", ")", "self", ".", "setGraphVal", "(", "'memcached_statdel'", ",", "'miss'", ",", "stats", ".", "get", "(", "'delete_misses'", ")", ")", "self", ".", "setGraphVal", "(", "'memcached_statdel'", ",", "'total'", ",", "safe_sum", "(", "[", "stats", ".", "get", "(", "'delete_hits'", ")", ",", "stats", ".", "get", "(", "'delete_misses'", ")", "]", ")", ")", "if", "self", ".", "hasGraph", "(", "'memcached_statcas'", ")", ":", "self", ".", "setGraphVal", "(", "'memcached_statcas'", ",", "'hit'", ",", "stats", ".", "get", "(", "'cas_hits'", ")", ")", "self", ".", "setGraphVal", "(", "'memcached_statcas'", ",", "'miss'", ",", "stats", ".", "get", "(", "'cas_misses'", ")", ")", "self", ".", "setGraphVal", "(", "'memcached_statcas'", ",", "'badval'", ",", "stats", ".", "get", "(", "'cas_badval'", ")", ")", "self", ".", "setGraphVal", "(", "'memcached_statcas'", ",", "'total'", ",", "safe_sum", "(", "[", "stats", ".", "get", "(", "'cas_hits'", ")", ",", "stats", ".", "get", "(", "'cas_misses'", ")", ",", "stats", ".", "get", "(", "'cas_badval'", ")", "]", ")", ")", "if", "self", ".", "hasGraph", "(", "'memcached_statincrdecr'", ")", ":", "self", ".", "setGraphVal", "(", "'memcached_statincrdecr'", ",", "'incr_hit'", ",", "stats", ".", "get", "(", "'incr_hits'", ")", ")", "self", ".", "setGraphVal", "(", "'memcached_statincrdecr'", ",", "'decr_hit'", ",", "stats", ".", "get", "(", "'decr_hits'", ")", ")", "self", ".", "setGraphVal", "(", "'memcached_statincrdecr'", ",", "'incr_miss'", ",", "stats", ".", "get", "(", "'incr_misses'", ")", ")", "self", ".", "setGraphVal", "(", "'memcached_statincrdecr'", ",", "'decr_miss'", ",", "stats", ".", "get", "(", "'decr_misses'", ")", ")", "self", ".", "setGraphVal", "(", "'memcached_statincrdecr'", ",", "'total'", ",", "safe_sum", "(", "[", "stats", ".", "get", "(", "'incr_hits'", ")", ",", "stats", ".", "get", "(", "'decr_hits'", ")", ",", "stats", ".", "get", "(", "'incr_misses'", ")", ",", "stats", ".", "get", "(", "'decr_misses'", ")", "]", ")", ")", "if", "self", ".", "hasGraph", "(", "'memcached_statevict'", ")", ":", "self", ".", "setGraphVal", "(", "'memcached_statevict'", ",", "'evict'", ",", "stats", ".", "get", "(", "'evictions'", ")", ")", "if", "self", ".", "graphHasField", "(", "'memcached_statevict'", ",", "'reclaim'", ")", ":", "self", ".", "setGraphVal", "(", "'memcached_statevict'", ",", "'reclaim'", ",", "stats", ".", "get", "(", "'reclaimed'", ")", ")", "if", "self", ".", "hasGraph", "(", "'memcached_statauth'", ")", ":", "self", ".", "setGraphVal", "(", "'memcached_statauth'", ",", "'reqs'", ",", "stats", ".", "get", "(", "'auth_cmds'", ")", ")", "self", ".", "setGraphVal", "(", "'memcached_statauth'", ",", "'errors'", ",", "stats", ".", "get", "(", "'auth_errors'", ")", ")", "if", "self", ".", "hasGraph", "(", "'memcached_hitpct'", ")", ":", "prev_stats", "=", "self", ".", "_prev_stats", "for", "(", "field_name", ",", "field_hits", ",", "field_misses", ")", "in", "(", "(", "'set'", ",", "'set_hits'", ",", "'set_misses'", ")", ",", "(", "'get'", ",", "'get_hits'", ",", "'get_misses'", ")", ",", "(", "'del'", ",", "'delete_hits'", ",", "'delete_misses'", ")", ",", "(", "'cas'", ",", "'cas_hits'", ",", "'cas_misses'", ")", ",", "(", "'incr'", ",", "'incr_hits'", ",", "'incr_misses'", ")", ",", "(", "'decr'", ",", "'decr_hits'", ",", "'decr_misses'", ")", ")", ":", "if", "prev_stats", ":", "if", "(", "stats", ".", "has_key", "(", "field_hits", ")", "and", "prev_stats", ".", "has_key", "(", "field_hits", ")", "and", "stats", ".", "has_key", "(", "field_misses", ")", "and", "prev_stats", ".", "has_key", "(", "field_misses", ")", ")", ":", "hits", "=", "stats", "[", "field_hits", "]", "-", "prev_stats", "[", "field_hits", "]", "misses", "=", "stats", "[", "field_misses", "]", "-", "prev_stats", "[", "field_misses", "]", "total", "=", "hits", "+", "misses", "if", "total", ">", "0", ":", "val", "=", "100.0", "*", "hits", "/", "total", "else", ":", "val", "=", "0", "self", ".", "setGraphVal", "(", "'memcached_hitpct'", ",", "field_name", ",", "round", "(", "val", ",", "2", ")", ")" ]
Retrieve values for graphs.
[ "Retrieve", "values", "for", "graphs", "." ]
python
train
watson-developer-cloud/python-sdk
ibm_watson/assistant_v1.py
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/assistant_v1.py#L5485-L5492
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'intents') and self.intents is not None: _dict['intents'] = [x._to_dict() for x in self.intents] if hasattr(self, 'pagination') and self.pagination is not None: _dict['pagination'] = self.pagination._to_dict() return _dict
[ "def", "_to_dict", "(", "self", ")", ":", "_dict", "=", "{", "}", "if", "hasattr", "(", "self", ",", "'intents'", ")", "and", "self", ".", "intents", "is", "not", "None", ":", "_dict", "[", "'intents'", "]", "=", "[", "x", ".", "_to_dict", "(", ")", "for", "x", "in", "self", ".", "intents", "]", "if", "hasattr", "(", "self", ",", "'pagination'", ")", "and", "self", ".", "pagination", "is", "not", "None", ":", "_dict", "[", "'pagination'", "]", "=", "self", ".", "pagination", ".", "_to_dict", "(", ")", "return", "_dict" ]
Return a json dictionary representing this model.
[ "Return", "a", "json", "dictionary", "representing", "this", "model", "." ]
python
train
michael-lazar/rtv
rtv/packages/praw/__init__.py
https://github.com/michael-lazar/rtv/blob/ccef2af042566ad384977028cf0bde01bc524dda/rtv/packages/praw/__init__.py#L491-L500
def evict(self, urls): """Evict url(s) from the cache. :param urls: An iterable containing normalized urls. :returns: The number of items removed from the cache. """ if isinstance(urls, six.string_types): urls = (urls,) return self.handler.evict(urls)
[ "def", "evict", "(", "self", ",", "urls", ")", ":", "if", "isinstance", "(", "urls", ",", "six", ".", "string_types", ")", ":", "urls", "=", "(", "urls", ",", ")", "return", "self", ".", "handler", ".", "evict", "(", "urls", ")" ]
Evict url(s) from the cache. :param urls: An iterable containing normalized urls. :returns: The number of items removed from the cache.
[ "Evict", "url", "(", "s", ")", "from", "the", "cache", "." ]
python
train
evonove/django-stored-messages
stored_messages/api.py
https://github.com/evonove/django-stored-messages/blob/23b71f952d5d3fd03285f5e700879d05796ef7ba/stored_messages/api.py#L12-L28
def add_message_for(users, level, message_text, extra_tags='', date=None, url=None, fail_silently=False): """ Send a message to a list of users without passing through `django.contrib.messages` :param users: an iterable containing the recipients of the messages :param level: message level :param message_text: the string containing the message :param extra_tags: like the Django api, a string containing extra tags for the message :param date: a date, different than the default timezone.now :param url: an optional url :param fail_silently: not used at the moment """ BackendClass = stored_messages_settings.STORAGE_BACKEND backend = BackendClass() m = backend.create_message(level, message_text, extra_tags, date, url) backend.archive_store(users, m) backend.inbox_store(users, m)
[ "def", "add_message_for", "(", "users", ",", "level", ",", "message_text", ",", "extra_tags", "=", "''", ",", "date", "=", "None", ",", "url", "=", "None", ",", "fail_silently", "=", "False", ")", ":", "BackendClass", "=", "stored_messages_settings", ".", "STORAGE_BACKEND", "backend", "=", "BackendClass", "(", ")", "m", "=", "backend", ".", "create_message", "(", "level", ",", "message_text", ",", "extra_tags", ",", "date", ",", "url", ")", "backend", ".", "archive_store", "(", "users", ",", "m", ")", "backend", ".", "inbox_store", "(", "users", ",", "m", ")" ]
Send a message to a list of users without passing through `django.contrib.messages` :param users: an iterable containing the recipients of the messages :param level: message level :param message_text: the string containing the message :param extra_tags: like the Django api, a string containing extra tags for the message :param date: a date, different than the default timezone.now :param url: an optional url :param fail_silently: not used at the moment
[ "Send", "a", "message", "to", "a", "list", "of", "users", "without", "passing", "through", "django", ".", "contrib", ".", "messages" ]
python
valid
etcher-be/emiz
emiz/avwx/__init__.py
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/avwx/__init__.py#L148-L154
def summary(self): # type: ignore """ Condensed summary for each forecast created from translations """ if not self.translations: self.update() return [summary.taf(trans) for trans in self.translations.forecast]
[ "def", "summary", "(", "self", ")", ":", "# type: ignore", "if", "not", "self", ".", "translations", ":", "self", ".", "update", "(", ")", "return", "[", "summary", ".", "taf", "(", "trans", ")", "for", "trans", "in", "self", ".", "translations", ".", "forecast", "]" ]
Condensed summary for each forecast created from translations
[ "Condensed", "summary", "for", "each", "forecast", "created", "from", "translations" ]
python
train
modin-project/modin
modin/pandas/indexing.py
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/indexing.py#L127-L140
def _compute_ndim(row_loc, col_loc): """Compute the ndim of result from locators """ row_scaler = is_scalar(row_loc) col_scaler = is_scalar(col_loc) if row_scaler and col_scaler: ndim = 0 elif row_scaler ^ col_scaler: ndim = 1 else: ndim = 2 return ndim
[ "def", "_compute_ndim", "(", "row_loc", ",", "col_loc", ")", ":", "row_scaler", "=", "is_scalar", "(", "row_loc", ")", "col_scaler", "=", "is_scalar", "(", "col_loc", ")", "if", "row_scaler", "and", "col_scaler", ":", "ndim", "=", "0", "elif", "row_scaler", "^", "col_scaler", ":", "ndim", "=", "1", "else", ":", "ndim", "=", "2", "return", "ndim" ]
Compute the ndim of result from locators
[ "Compute", "the", "ndim", "of", "result", "from", "locators" ]
python
train
sampsyo/confuse
setup.py
https://github.com/sampsyo/confuse/blob/9ff0992e30470f6822824711950e6dd906e253fb/setup.py#L21-L30
def export_live_eggs(self, env=False): """Adds all of the eggs in the current environment to PYTHONPATH.""" path_eggs = [p for p in sys.path if p.endswith('.egg')] command = self.get_finalized_command("egg_info") egg_base = path.abspath(command.egg_base) unique_path_eggs = set(path_eggs + [egg_base]) os.environ['PYTHONPATH'] = ':'.join(unique_path_eggs)
[ "def", "export_live_eggs", "(", "self", ",", "env", "=", "False", ")", ":", "path_eggs", "=", "[", "p", "for", "p", "in", "sys", ".", "path", "if", "p", ".", "endswith", "(", "'.egg'", ")", "]", "command", "=", "self", ".", "get_finalized_command", "(", "\"egg_info\"", ")", "egg_base", "=", "path", ".", "abspath", "(", "command", ".", "egg_base", ")", "unique_path_eggs", "=", "set", "(", "path_eggs", "+", "[", "egg_base", "]", ")", "os", ".", "environ", "[", "'PYTHONPATH'", "]", "=", "':'", ".", "join", "(", "unique_path_eggs", ")" ]
Adds all of the eggs in the current environment to PYTHONPATH.
[ "Adds", "all", "of", "the", "eggs", "in", "the", "current", "environment", "to", "PYTHONPATH", "." ]
python
train
volafiled/python-volapi
volapi/volapi.py
https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L232-L267
def on_message(self, new_data): """Processes incoming messages according to engine-io rules""" # https://github.com/socketio/engine.io-protocol LOGGER.debug("new frame [%r]", new_data) try: what = int(new_data[0]) data = new_data[1:] data = data and from_json(data) if what == 0: self.ping_interval = float(data["pingInterval"]) / 1000 LOGGER.debug("adjusted ping interval") return if what == 1: LOGGER.debug("received close") self.reraise(IOError("Connection closed remotely")) return if what == 3: self.__lastpong = time.time() LOGGER.debug("received a pong") return if what == 4: self._on_frame(data) return if what == 6: LOGGER.debug("received noop") self.send_message("5") return LOGGER.debug("unhandled message: [%d] [%r]", what, data) except Exception as ex: self.reraise(ex)
[ "def", "on_message", "(", "self", ",", "new_data", ")", ":", "# https://github.com/socketio/engine.io-protocol", "LOGGER", ".", "debug", "(", "\"new frame [%r]\"", ",", "new_data", ")", "try", ":", "what", "=", "int", "(", "new_data", "[", "0", "]", ")", "data", "=", "new_data", "[", "1", ":", "]", "data", "=", "data", "and", "from_json", "(", "data", ")", "if", "what", "==", "0", ":", "self", ".", "ping_interval", "=", "float", "(", "data", "[", "\"pingInterval\"", "]", ")", "/", "1000", "LOGGER", ".", "debug", "(", "\"adjusted ping interval\"", ")", "return", "if", "what", "==", "1", ":", "LOGGER", ".", "debug", "(", "\"received close\"", ")", "self", ".", "reraise", "(", "IOError", "(", "\"Connection closed remotely\"", ")", ")", "return", "if", "what", "==", "3", ":", "self", ".", "__lastpong", "=", "time", ".", "time", "(", ")", "LOGGER", ".", "debug", "(", "\"received a pong\"", ")", "return", "if", "what", "==", "4", ":", "self", ".", "_on_frame", "(", "data", ")", "return", "if", "what", "==", "6", ":", "LOGGER", ".", "debug", "(", "\"received noop\"", ")", "self", ".", "send_message", "(", "\"5\"", ")", "return", "LOGGER", ".", "debug", "(", "\"unhandled message: [%d] [%r]\"", ",", "what", ",", "data", ")", "except", "Exception", "as", "ex", ":", "self", ".", "reraise", "(", "ex", ")" ]
Processes incoming messages according to engine-io rules
[ "Processes", "incoming", "messages", "according", "to", "engine", "-", "io", "rules" ]
python
train
rflamary/POT
ot/dr.py
https://github.com/rflamary/POT/blob/c5108efc7b6702e1af3928bef1032e6b37734d1c/ot/dr.py#L110-L203
def wda(X, y, p=2, reg=1, k=10, solver=None, maxiter=100, verbose=0, P0=None): """ Wasserstein Discriminant Analysis [11]_ The function solves the following optimization problem: .. math:: P = \\text{arg}\min_P \\frac{\\sum_i W(PX^i,PX^i)}{\\sum_{i,j\\neq i} W(PX^i,PX^j)} where : - :math:`P` is a linear projection operator in the Stiefel(p,d) manifold - :math:`W` is entropic regularized Wasserstein distances - :math:`X^i` are samples in the dataset corresponding to class i Parameters ---------- X : numpy.ndarray (n,d) Training samples y : np.ndarray (n,) labels for training samples p : int, optional size of dimensionnality reduction reg : float, optional Regularization term >0 (entropic regularization) solver : str, optional None for steepest decsent or 'TrustRegions' for trust regions algorithm else shoudl be a pymanopt.solvers P0 : numpy.ndarray (d,p) Initial starting point for projection verbose : int, optional Print information along iterations Returns ------- P : (d x p) ndarray Optimal transportation matrix for the given parameters proj : fun projection function including mean centering References ---------- .. [11] Flamary, R., Cuturi, M., Courty, N., & Rakotomamonjy, A. (2016). Wasserstein Discriminant Analysis. arXiv preprint arXiv:1608.08063. """ # noqa mx = np.mean(X) X -= mx.reshape((1, -1)) # data split between classes d = X.shape[1] xc = split_classes(X, y) # compute uniform weighs wc = [np.ones((x.shape[0]), dtype=np.float32) / x.shape[0] for x in xc] def cost(P): # wda loss loss_b = 0 loss_w = 0 for i, xi in enumerate(xc): xi = np.dot(xi, P) for j, xj in enumerate(xc[i:]): xj = np.dot(xj, P) M = dist(xi, xj) G = sinkhorn(wc[i], wc[j + i], M, reg, k) if j == 0: loss_w += np.sum(G * M) else: loss_b += np.sum(G * M) # loss inversed because minimization return loss_w / loss_b # declare manifold and problem manifold = Stiefel(d, p) problem = Problem(manifold=manifold, cost=cost) # declare solver and solve if solver is None: solver = SteepestDescent(maxiter=maxiter, logverbosity=verbose) elif solver in ['tr', 'TrustRegions']: solver = TrustRegions(maxiter=maxiter, logverbosity=verbose) Popt = solver.solve(problem, x=P0) def proj(X): return (X - mx.reshape((1, -1))).dot(Popt) return Popt, proj
[ "def", "wda", "(", "X", ",", "y", ",", "p", "=", "2", ",", "reg", "=", "1", ",", "k", "=", "10", ",", "solver", "=", "None", ",", "maxiter", "=", "100", ",", "verbose", "=", "0", ",", "P0", "=", "None", ")", ":", "# noqa", "mx", "=", "np", ".", "mean", "(", "X", ")", "X", "-=", "mx", ".", "reshape", "(", "(", "1", ",", "-", "1", ")", ")", "# data split between classes", "d", "=", "X", ".", "shape", "[", "1", "]", "xc", "=", "split_classes", "(", "X", ",", "y", ")", "# compute uniform weighs", "wc", "=", "[", "np", ".", "ones", "(", "(", "x", ".", "shape", "[", "0", "]", ")", ",", "dtype", "=", "np", ".", "float32", ")", "/", "x", ".", "shape", "[", "0", "]", "for", "x", "in", "xc", "]", "def", "cost", "(", "P", ")", ":", "# wda loss", "loss_b", "=", "0", "loss_w", "=", "0", "for", "i", ",", "xi", "in", "enumerate", "(", "xc", ")", ":", "xi", "=", "np", ".", "dot", "(", "xi", ",", "P", ")", "for", "j", ",", "xj", "in", "enumerate", "(", "xc", "[", "i", ":", "]", ")", ":", "xj", "=", "np", ".", "dot", "(", "xj", ",", "P", ")", "M", "=", "dist", "(", "xi", ",", "xj", ")", "G", "=", "sinkhorn", "(", "wc", "[", "i", "]", ",", "wc", "[", "j", "+", "i", "]", ",", "M", ",", "reg", ",", "k", ")", "if", "j", "==", "0", ":", "loss_w", "+=", "np", ".", "sum", "(", "G", "*", "M", ")", "else", ":", "loss_b", "+=", "np", ".", "sum", "(", "G", "*", "M", ")", "# loss inversed because minimization", "return", "loss_w", "/", "loss_b", "# declare manifold and problem", "manifold", "=", "Stiefel", "(", "d", ",", "p", ")", "problem", "=", "Problem", "(", "manifold", "=", "manifold", ",", "cost", "=", "cost", ")", "# declare solver and solve", "if", "solver", "is", "None", ":", "solver", "=", "SteepestDescent", "(", "maxiter", "=", "maxiter", ",", "logverbosity", "=", "verbose", ")", "elif", "solver", "in", "[", "'tr'", ",", "'TrustRegions'", "]", ":", "solver", "=", "TrustRegions", "(", "maxiter", "=", "maxiter", ",", "logverbosity", "=", "verbose", ")", "Popt", "=", "solver", ".", "solve", "(", "problem", ",", "x", "=", "P0", ")", "def", "proj", "(", "X", ")", ":", "return", "(", "X", "-", "mx", ".", "reshape", "(", "(", "1", ",", "-", "1", ")", ")", ")", ".", "dot", "(", "Popt", ")", "return", "Popt", ",", "proj" ]
Wasserstein Discriminant Analysis [11]_ The function solves the following optimization problem: .. math:: P = \\text{arg}\min_P \\frac{\\sum_i W(PX^i,PX^i)}{\\sum_{i,j\\neq i} W(PX^i,PX^j)} where : - :math:`P` is a linear projection operator in the Stiefel(p,d) manifold - :math:`W` is entropic regularized Wasserstein distances - :math:`X^i` are samples in the dataset corresponding to class i Parameters ---------- X : numpy.ndarray (n,d) Training samples y : np.ndarray (n,) labels for training samples p : int, optional size of dimensionnality reduction reg : float, optional Regularization term >0 (entropic regularization) solver : str, optional None for steepest decsent or 'TrustRegions' for trust regions algorithm else shoudl be a pymanopt.solvers P0 : numpy.ndarray (d,p) Initial starting point for projection verbose : int, optional Print information along iterations Returns ------- P : (d x p) ndarray Optimal transportation matrix for the given parameters proj : fun projection function including mean centering References ---------- .. [11] Flamary, R., Cuturi, M., Courty, N., & Rakotomamonjy, A. (2016). Wasserstein Discriminant Analysis. arXiv preprint arXiv:1608.08063.
[ "Wasserstein", "Discriminant", "Analysis", "[", "11", "]", "_" ]
python
train
ankitmathur3193/song-cli
song/commands/FileDownload.py
https://github.com/ankitmathur3193/song-cli/blob/ca8ccfe547e9d702313ff6d14e81ae4355989a67/song/commands/FileDownload.py#L27-L74
def file_download_using_requests(self,url): '''It will download file specified by url using requests module''' file_name=url.split('/')[-1] if os.path.exists(os.path.join(os.getcwd(),file_name)): print 'File already exists' return #print 'Downloading file %s '%file_name #print 'Downloading from %s'%url try: r=requests.get(url,stream=True,timeout=200) except requests.exceptions.SSLError: try: response=requests.get(url,stream=True,verify=False,timeout=200) except requests.exceptions.RequestException as e: print e quit() except requests.exceptions.RequestException as e: print e quit() chunk_size = 1024 total_size = int(r.headers['Content-Length']) total_chunks = total_size/chunk_size file_iterable = r.iter_content(chunk_size = chunk_size) tqdm_iter = tqdm(iterable = file_iterable,total = total_chunks,unit = 'KB', leave = False ) with open(file_name,'wb') as f: for data in tqdm_iter: f.write(data) #total_size=float(r.headers['Content-Length'])/(1024*1024) '''print 'Total size of file to be downloaded %.2f MB '%total_size total_downloaded_size=0.0 with open(file_name,'wb') as f: for chunk in r.iter_content(chunk_size=1*1024*1024): if chunk: size_of_chunk=float(len(chunk))/(1024*1024) total_downloaded_size+=size_of_chunk print '{0:.0%} Downloaded'.format(total_downloaded_size/total_size) f.write(chunk)''' print 'Downloaded file %s '%file_name
[ "def", "file_download_using_requests", "(", "self", ",", "url", ")", ":", "file_name", "=", "url", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", "if", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "os", ".", "getcwd", "(", ")", ",", "file_name", ")", ")", ":", "print", "'File already exists'", "return", "#print 'Downloading file %s '%file_name", "#print 'Downloading from %s'%url", "try", ":", "r", "=", "requests", ".", "get", "(", "url", ",", "stream", "=", "True", ",", "timeout", "=", "200", ")", "except", "requests", ".", "exceptions", ".", "SSLError", ":", "try", ":", "response", "=", "requests", ".", "get", "(", "url", ",", "stream", "=", "True", ",", "verify", "=", "False", ",", "timeout", "=", "200", ")", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "e", ":", "print", "e", "quit", "(", ")", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "e", ":", "print", "e", "quit", "(", ")", "chunk_size", "=", "1024", "total_size", "=", "int", "(", "r", ".", "headers", "[", "'Content-Length'", "]", ")", "total_chunks", "=", "total_size", "/", "chunk_size", "file_iterable", "=", "r", ".", "iter_content", "(", "chunk_size", "=", "chunk_size", ")", "tqdm_iter", "=", "tqdm", "(", "iterable", "=", "file_iterable", ",", "total", "=", "total_chunks", ",", "unit", "=", "'KB'", ",", "leave", "=", "False", ")", "with", "open", "(", "file_name", ",", "'wb'", ")", "as", "f", ":", "for", "data", "in", "tqdm_iter", ":", "f", ".", "write", "(", "data", ")", "#total_size=float(r.headers['Content-Length'])/(1024*1024)", "'''print 'Total size of file to be downloaded %.2f MB '%total_size\n\t\ttotal_downloaded_size=0.0\n\t\twith open(file_name,'wb') as f:\n\t\t\tfor chunk in r.iter_content(chunk_size=1*1024*1024):\n\t\t\t\tif chunk:\n\t\t\t\t\tsize_of_chunk=float(len(chunk))/(1024*1024)\n\t\t\t\t\ttotal_downloaded_size+=size_of_chunk\n\t\t\t\t\tprint '{0:.0%} Downloaded'.format(total_downloaded_size/total_size)\n\t\t\t\t\tf.write(chunk)'''", "print", "'Downloaded file %s '", "%", "file_name" ]
It will download file specified by url using requests module
[ "It", "will", "download", "file", "specified", "by", "url", "using", "requests", "module" ]
python
test
GoogleCloudPlatform/cloud-debug-python
src/googleclouddebugger/capture_collector.py
https://github.com/GoogleCloudPlatform/cloud-debug-python/blob/89ce3782c98b814838a3ecb5479ed3882368cbee/src/googleclouddebugger/capture_collector.py#L360-L383
def CaptureFrameLocals(self, frame): """Captures local variables and arguments of the specified frame. Args: frame: frame to capture locals and arguments. Returns: (arguments, locals) tuple. """ # Capture all local variables (including method arguments). variables = {n: self.CaptureNamedVariable(n, v, 1, self.default_capture_limits) for n, v in six.viewitems(frame.f_locals)} # Split between locals and arguments (keeping arguments in the right order). nargs = frame.f_code.co_argcount if frame.f_code.co_flags & inspect.CO_VARARGS: nargs += 1 if frame.f_code.co_flags & inspect.CO_VARKEYWORDS: nargs += 1 frame_arguments = [] for argname in frame.f_code.co_varnames[:nargs]: if argname in variables: frame_arguments.append(variables.pop(argname)) return (frame_arguments, list(six.viewvalues(variables)))
[ "def", "CaptureFrameLocals", "(", "self", ",", "frame", ")", ":", "# Capture all local variables (including method arguments).", "variables", "=", "{", "n", ":", "self", ".", "CaptureNamedVariable", "(", "n", ",", "v", ",", "1", ",", "self", ".", "default_capture_limits", ")", "for", "n", ",", "v", "in", "six", ".", "viewitems", "(", "frame", ".", "f_locals", ")", "}", "# Split between locals and arguments (keeping arguments in the right order).", "nargs", "=", "frame", ".", "f_code", ".", "co_argcount", "if", "frame", ".", "f_code", ".", "co_flags", "&", "inspect", ".", "CO_VARARGS", ":", "nargs", "+=", "1", "if", "frame", ".", "f_code", ".", "co_flags", "&", "inspect", ".", "CO_VARKEYWORDS", ":", "nargs", "+=", "1", "frame_arguments", "=", "[", "]", "for", "argname", "in", "frame", ".", "f_code", ".", "co_varnames", "[", ":", "nargs", "]", ":", "if", "argname", "in", "variables", ":", "frame_arguments", ".", "append", "(", "variables", ".", "pop", "(", "argname", ")", ")", "return", "(", "frame_arguments", ",", "list", "(", "six", ".", "viewvalues", "(", "variables", ")", ")", ")" ]
Captures local variables and arguments of the specified frame. Args: frame: frame to capture locals and arguments. Returns: (arguments, locals) tuple.
[ "Captures", "local", "variables", "and", "arguments", "of", "the", "specified", "frame", "." ]
python
train
pyviz/holoviews
holoviews/core/data/interface.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/data/interface.py#L278-L314
def select_mask(cls, dataset, selection): """ Given a Dataset object and a dictionary with dimension keys and selection keys (i.e tuple ranges, slices, sets, lists or literals) return a boolean mask over the rows in the Dataset object that have been selected. """ mask = np.ones(len(dataset), dtype=np.bool) for dim, k in selection.items(): if isinstance(k, tuple): k = slice(*k) arr = cls.values(dataset, dim) if isinstance(k, slice): with warnings.catch_warnings(): warnings.filterwarnings('ignore', r'invalid value encountered') if k.start is not None: mask &= k.start <= arr if k.stop is not None: mask &= arr < k.stop elif isinstance(k, (set, list)): iter_slcs = [] for ik in k: with warnings.catch_warnings(): warnings.filterwarnings('ignore', r'invalid value encountered') iter_slcs.append(arr == ik) mask &= np.logical_or.reduce(iter_slcs) elif callable(k): mask &= k(arr) else: index_mask = arr == k if dataset.ndims == 1 and np.sum(index_mask) == 0: data_index = np.argmin(np.abs(arr - k)) mask = np.zeros(len(dataset), dtype=np.bool) mask[data_index] = True else: mask &= index_mask return mask
[ "def", "select_mask", "(", "cls", ",", "dataset", ",", "selection", ")", ":", "mask", "=", "np", ".", "ones", "(", "len", "(", "dataset", ")", ",", "dtype", "=", "np", ".", "bool", ")", "for", "dim", ",", "k", "in", "selection", ".", "items", "(", ")", ":", "if", "isinstance", "(", "k", ",", "tuple", ")", ":", "k", "=", "slice", "(", "*", "k", ")", "arr", "=", "cls", ".", "values", "(", "dataset", ",", "dim", ")", "if", "isinstance", "(", "k", ",", "slice", ")", ":", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "warnings", ".", "filterwarnings", "(", "'ignore'", ",", "r'invalid value encountered'", ")", "if", "k", ".", "start", "is", "not", "None", ":", "mask", "&=", "k", ".", "start", "<=", "arr", "if", "k", ".", "stop", "is", "not", "None", ":", "mask", "&=", "arr", "<", "k", ".", "stop", "elif", "isinstance", "(", "k", ",", "(", "set", ",", "list", ")", ")", ":", "iter_slcs", "=", "[", "]", "for", "ik", "in", "k", ":", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "warnings", ".", "filterwarnings", "(", "'ignore'", ",", "r'invalid value encountered'", ")", "iter_slcs", ".", "append", "(", "arr", "==", "ik", ")", "mask", "&=", "np", ".", "logical_or", ".", "reduce", "(", "iter_slcs", ")", "elif", "callable", "(", "k", ")", ":", "mask", "&=", "k", "(", "arr", ")", "else", ":", "index_mask", "=", "arr", "==", "k", "if", "dataset", ".", "ndims", "==", "1", "and", "np", ".", "sum", "(", "index_mask", ")", "==", "0", ":", "data_index", "=", "np", ".", "argmin", "(", "np", ".", "abs", "(", "arr", "-", "k", ")", ")", "mask", "=", "np", ".", "zeros", "(", "len", "(", "dataset", ")", ",", "dtype", "=", "np", ".", "bool", ")", "mask", "[", "data_index", "]", "=", "True", "else", ":", "mask", "&=", "index_mask", "return", "mask" ]
Given a Dataset object and a dictionary with dimension keys and selection keys (i.e tuple ranges, slices, sets, lists or literals) return a boolean mask over the rows in the Dataset object that have been selected.
[ "Given", "a", "Dataset", "object", "and", "a", "dictionary", "with", "dimension", "keys", "and", "selection", "keys", "(", "i", ".", "e", "tuple", "ranges", "slices", "sets", "lists", "or", "literals", ")", "return", "a", "boolean", "mask", "over", "the", "rows", "in", "the", "Dataset", "object", "that", "have", "been", "selected", "." ]
python
train
gwastro/pycbc
pycbc/psd/analytical.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/psd/analytical.py#L124-L144
def flat_unity(length, delta_f, low_freq_cutoff): """ Returns a FrequencySeries of ones above the low_frequency_cutoff. Parameters ---------- length : int Length of output Frequencyseries. delta_f : float Frequency step for output FrequencySeries. low_freq_cutoff : int Low-frequency cutoff for output FrequencySeries. Returns ------- FrequencySeries Returns a FrequencySeries containing the unity PSD model. """ fseries = FrequencySeries(numpy.ones(length), delta_f=delta_f) kmin = int(low_freq_cutoff / fseries.delta_f) fseries.data[:kmin] = 0 return fseries
[ "def", "flat_unity", "(", "length", ",", "delta_f", ",", "low_freq_cutoff", ")", ":", "fseries", "=", "FrequencySeries", "(", "numpy", ".", "ones", "(", "length", ")", ",", "delta_f", "=", "delta_f", ")", "kmin", "=", "int", "(", "low_freq_cutoff", "/", "fseries", ".", "delta_f", ")", "fseries", ".", "data", "[", ":", "kmin", "]", "=", "0", "return", "fseries" ]
Returns a FrequencySeries of ones above the low_frequency_cutoff. Parameters ---------- length : int Length of output Frequencyseries. delta_f : float Frequency step for output FrequencySeries. low_freq_cutoff : int Low-frequency cutoff for output FrequencySeries. Returns ------- FrequencySeries Returns a FrequencySeries containing the unity PSD model.
[ "Returns", "a", "FrequencySeries", "of", "ones", "above", "the", "low_frequency_cutoff", "." ]
python
train
projectatomic/atomic-reactor
atomic_reactor/util.py
https://github.com/projectatomic/atomic-reactor/blob/fd31c01b964097210bf169960d051e5f04019a80/atomic_reactor/util.py#L1584-L1592
def update_from_dict(self, source): """Update records of the digests of images from a dictionary (no validation is performed) :param dict source: data """ assert isinstance(source, dict) source_copy = deepcopy(source) # no mutable side effects self._images_digests.update(source_copy)
[ "def", "update_from_dict", "(", "self", ",", "source", ")", ":", "assert", "isinstance", "(", "source", ",", "dict", ")", "source_copy", "=", "deepcopy", "(", "source", ")", "# no mutable side effects", "self", ".", "_images_digests", ".", "update", "(", "source_copy", ")" ]
Update records of the digests of images from a dictionary (no validation is performed) :param dict source: data
[ "Update", "records", "of", "the", "digests", "of", "images", "from", "a", "dictionary", "(", "no", "validation", "is", "performed", ")" ]
python
train
internetarchive/brozzler
brozzler/model.py
https://github.com/internetarchive/brozzler/blob/411b3f266a38b9bb942021c0121ebd8e5ca66447/brozzler/model.py#L74-L108
def new_job(frontier, job_conf): '''Returns new Job.''' validate_conf(job_conf) job = Job(frontier.rr, { "conf": job_conf, "status": "ACTIVE", "started": doublethink.utcnow()}) if "id" in job_conf: job.id = job_conf["id"] if "max_claimed_sites" in job_conf: job.max_claimed_sites = job_conf["max_claimed_sites"] job.save() sites = [] pages = [] for seed_conf in job_conf["seeds"]: merged_conf = merge(seed_conf, job_conf) merged_conf.pop("seeds") merged_conf["job_id"] = job.id merged_conf["seed"] = merged_conf.pop("url") site = brozzler.Site(frontier.rr, merged_conf) site.id = str(uuid.uuid4()) sites.append(site) pages.append(new_seed_page(frontier, site)) # insert in batches to avoid this error # rethinkdb.errors.ReqlDriverError: Query size (167883036) greater than maximum (134217727) in: for batch in (pages[i:i+500] for i in range(0, len(pages), 500)): logging.info('inserting batch of %s pages', len(batch)) result = frontier.rr.table('pages').insert(batch).run() for batch in (sites[i:i+100] for i in range(0, len(sites), 100)): logging.info('inserting batch of %s sites', len(batch)) result = frontier.rr.table('sites').insert(batch).run() logging.info('job %s fully started', job.id) return job
[ "def", "new_job", "(", "frontier", ",", "job_conf", ")", ":", "validate_conf", "(", "job_conf", ")", "job", "=", "Job", "(", "frontier", ".", "rr", ",", "{", "\"conf\"", ":", "job_conf", ",", "\"status\"", ":", "\"ACTIVE\"", ",", "\"started\"", ":", "doublethink", ".", "utcnow", "(", ")", "}", ")", "if", "\"id\"", "in", "job_conf", ":", "job", ".", "id", "=", "job_conf", "[", "\"id\"", "]", "if", "\"max_claimed_sites\"", "in", "job_conf", ":", "job", ".", "max_claimed_sites", "=", "job_conf", "[", "\"max_claimed_sites\"", "]", "job", ".", "save", "(", ")", "sites", "=", "[", "]", "pages", "=", "[", "]", "for", "seed_conf", "in", "job_conf", "[", "\"seeds\"", "]", ":", "merged_conf", "=", "merge", "(", "seed_conf", ",", "job_conf", ")", "merged_conf", ".", "pop", "(", "\"seeds\"", ")", "merged_conf", "[", "\"job_id\"", "]", "=", "job", ".", "id", "merged_conf", "[", "\"seed\"", "]", "=", "merged_conf", ".", "pop", "(", "\"url\"", ")", "site", "=", "brozzler", ".", "Site", "(", "frontier", ".", "rr", ",", "merged_conf", ")", "site", ".", "id", "=", "str", "(", "uuid", ".", "uuid4", "(", ")", ")", "sites", ".", "append", "(", "site", ")", "pages", ".", "append", "(", "new_seed_page", "(", "frontier", ",", "site", ")", ")", "# insert in batches to avoid this error", "# rethinkdb.errors.ReqlDriverError: Query size (167883036) greater than maximum (134217727) in:", "for", "batch", "in", "(", "pages", "[", "i", ":", "i", "+", "500", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "pages", ")", ",", "500", ")", ")", ":", "logging", ".", "info", "(", "'inserting batch of %s pages'", ",", "len", "(", "batch", ")", ")", "result", "=", "frontier", ".", "rr", ".", "table", "(", "'pages'", ")", ".", "insert", "(", "batch", ")", ".", "run", "(", ")", "for", "batch", "in", "(", "sites", "[", "i", ":", "i", "+", "100", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "sites", ")", ",", "100", ")", ")", ":", "logging", ".", "info", "(", "'inserting batch of %s sites'", ",", "len", "(", "batch", ")", ")", "result", "=", "frontier", ".", "rr", ".", "table", "(", "'sites'", ")", ".", "insert", "(", "batch", ")", ".", "run", "(", ")", "logging", ".", "info", "(", "'job %s fully started'", ",", "job", ".", "id", ")", "return", "job" ]
Returns new Job.
[ "Returns", "new", "Job", "." ]
python
train
threeML/astromodels
astromodels/core/model.py
https://github.com/threeML/astromodels/blob/9aac365a372f77603039533df9a6b694c1e360d5/astromodels/core/model.py#L392-L403
def remove_independent_variable(self, variable_name): """ Remove an independent variable which was added with add_independent_variable :param variable_name: name of variable to remove :return: """ self._remove_child(variable_name) # Remove also from the list of independent variables self._independent_variables.pop(variable_name)
[ "def", "remove_independent_variable", "(", "self", ",", "variable_name", ")", ":", "self", ".", "_remove_child", "(", "variable_name", ")", "# Remove also from the list of independent variables", "self", ".", "_independent_variables", ".", "pop", "(", "variable_name", ")" ]
Remove an independent variable which was added with add_independent_variable :param variable_name: name of variable to remove :return:
[ "Remove", "an", "independent", "variable", "which", "was", "added", "with", "add_independent_variable" ]
python
train
senaite/senaite.core
bika/lims/content/abstractanalysis.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/content/abstractanalysis.py#L1123-L1129
def getAttachmentUIDs(self): """Used to populate metadata, so that we don't need full objects of analyses when working with their attachments. """ attachments = self.getAttachment() uids = [att.UID() for att in attachments] return uids
[ "def", "getAttachmentUIDs", "(", "self", ")", ":", "attachments", "=", "self", ".", "getAttachment", "(", ")", "uids", "=", "[", "att", ".", "UID", "(", ")", "for", "att", "in", "attachments", "]", "return", "uids" ]
Used to populate metadata, so that we don't need full objects of analyses when working with their attachments.
[ "Used", "to", "populate", "metadata", "so", "that", "we", "don", "t", "need", "full", "objects", "of", "analyses", "when", "working", "with", "their", "attachments", "." ]
python
train
titusjan/argos
argos/utils/masks.py
https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/utils/masks.py#L338-L367
def maskedEqual(array, missingValue): """ Mask an array where equal to a given (missing)value. Unfortunately ma.masked_equal does not work with structured arrays. See: https://mail.scipy.org/pipermail/numpy-discussion/2011-July/057669.html If the data is a structured array the mask is applied for every field (i.e. forming a logical-and). Otherwise ma.masked_equal is called. """ if array_is_structured(array): # Enforce the array to be masked if not isinstance(array, ma.MaskedArray): array = ma.MaskedArray(array) # Set the mask separately per field for nr, field in enumerate(array.dtype.names): if hasattr(missingValue, '__len__'): fieldMissingValue = missingValue[nr] else: fieldMissingValue = missingValue array[field] = ma.masked_equal(array[field], fieldMissingValue) check_class(array, ma.MaskedArray) # post-condition check return array else: # masked_equal works with missing is None result = ma.masked_equal(array, missingValue, copy=False) check_class(result, ma.MaskedArray) # post-condition check return result
[ "def", "maskedEqual", "(", "array", ",", "missingValue", ")", ":", "if", "array_is_structured", "(", "array", ")", ":", "# Enforce the array to be masked", "if", "not", "isinstance", "(", "array", ",", "ma", ".", "MaskedArray", ")", ":", "array", "=", "ma", ".", "MaskedArray", "(", "array", ")", "# Set the mask separately per field", "for", "nr", ",", "field", "in", "enumerate", "(", "array", ".", "dtype", ".", "names", ")", ":", "if", "hasattr", "(", "missingValue", ",", "'__len__'", ")", ":", "fieldMissingValue", "=", "missingValue", "[", "nr", "]", "else", ":", "fieldMissingValue", "=", "missingValue", "array", "[", "field", "]", "=", "ma", ".", "masked_equal", "(", "array", "[", "field", "]", ",", "fieldMissingValue", ")", "check_class", "(", "array", ",", "ma", ".", "MaskedArray", ")", "# post-condition check", "return", "array", "else", ":", "# masked_equal works with missing is None", "result", "=", "ma", ".", "masked_equal", "(", "array", ",", "missingValue", ",", "copy", "=", "False", ")", "check_class", "(", "result", ",", "ma", ".", "MaskedArray", ")", "# post-condition check", "return", "result" ]
Mask an array where equal to a given (missing)value. Unfortunately ma.masked_equal does not work with structured arrays. See: https://mail.scipy.org/pipermail/numpy-discussion/2011-July/057669.html If the data is a structured array the mask is applied for every field (i.e. forming a logical-and). Otherwise ma.masked_equal is called.
[ "Mask", "an", "array", "where", "equal", "to", "a", "given", "(", "missing", ")", "value", "." ]
python
train
agile-geoscience/striplog
striplog/legend.py
https://github.com/agile-geoscience/striplog/blob/8033b673a151f96c29802b43763e863519a3124c/striplog/legend.py#L637-L682
def to_csv(self): """ Renders a legend as a CSV string. No arguments. Returns: str: The legend as a CSV. """ # We can't delegate this to Decor because we need to know the superset # of all Decor properties. There may be lots of blanks. header = [] component_header = [] for row in self: for j in row.__dict__.keys(): if j == '_colour': j = 'colour' header.append(j) for k in row.component.__dict__.keys(): component_header.append(k) header = set(header) component_header = set(component_header) header.remove('component') header_row = '' if 'colour' in header: header_row += 'colour,' header.remove('colour') has_colour = True for item in header: header_row += item + ',' for item in component_header: header_row += 'component ' + item + ',' # Now we have a header row! Phew. # Next we'll go back over the legend and collect everything. result = header_row.strip(',') + '\n' for row in self: if has_colour: result += row.__dict__.get('_colour', '') + ',' for item in header: result += str(row.__dict__.get(item, '')) + ',' for item in component_header: result += str(row.component.__dict__.get(item, '')) + ',' result += '\n' return result
[ "def", "to_csv", "(", "self", ")", ":", "# We can't delegate this to Decor because we need to know the superset", "# of all Decor properties. There may be lots of blanks.", "header", "=", "[", "]", "component_header", "=", "[", "]", "for", "row", "in", "self", ":", "for", "j", "in", "row", ".", "__dict__", ".", "keys", "(", ")", ":", "if", "j", "==", "'_colour'", ":", "j", "=", "'colour'", "header", ".", "append", "(", "j", ")", "for", "k", "in", "row", ".", "component", ".", "__dict__", ".", "keys", "(", ")", ":", "component_header", ".", "append", "(", "k", ")", "header", "=", "set", "(", "header", ")", "component_header", "=", "set", "(", "component_header", ")", "header", ".", "remove", "(", "'component'", ")", "header_row", "=", "''", "if", "'colour'", "in", "header", ":", "header_row", "+=", "'colour,'", "header", ".", "remove", "(", "'colour'", ")", "has_colour", "=", "True", "for", "item", "in", "header", ":", "header_row", "+=", "item", "+", "','", "for", "item", "in", "component_header", ":", "header_row", "+=", "'component '", "+", "item", "+", "','", "# Now we have a header row! Phew.", "# Next we'll go back over the legend and collect everything.", "result", "=", "header_row", ".", "strip", "(", "','", ")", "+", "'\\n'", "for", "row", "in", "self", ":", "if", "has_colour", ":", "result", "+=", "row", ".", "__dict__", ".", "get", "(", "'_colour'", ",", "''", ")", "+", "','", "for", "item", "in", "header", ":", "result", "+=", "str", "(", "row", ".", "__dict__", ".", "get", "(", "item", ",", "''", ")", ")", "+", "','", "for", "item", "in", "component_header", ":", "result", "+=", "str", "(", "row", ".", "component", ".", "__dict__", ".", "get", "(", "item", ",", "''", ")", ")", "+", "','", "result", "+=", "'\\n'", "return", "result" ]
Renders a legend as a CSV string. No arguments. Returns: str: The legend as a CSV.
[ "Renders", "a", "legend", "as", "a", "CSV", "string", "." ]
python
test
benedictpaten/sonLib
bioio.py
https://github.com/benedictpaten/sonLib/blob/1decb75bb439b70721ec776f685ce98e25217d26/bioio.py#L199-L213
def popenCatch(command, stdinString=None): """Runs a command and return standard out. """ logger.debug("Running the command: %s" % command) if stdinString != None: process = subprocess.Popen(command, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, bufsize=-1) output, nothing = process.communicate(stdinString) else: process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=sys.stderr, bufsize=-1) output, nothing = process.communicate() #process.stdout.read().strip() sts = process.wait() if sts != 0: raise RuntimeError("Command: %s with stdin string '%s' exited with non-zero status %i" % (command, stdinString, sts)) return output
[ "def", "popenCatch", "(", "command", ",", "stdinString", "=", "None", ")", ":", "logger", ".", "debug", "(", "\"Running the command: %s\"", "%", "command", ")", "if", "stdinString", "!=", "None", ":", "process", "=", "subprocess", ".", "Popen", "(", "command", ",", "shell", "=", "True", ",", "stdin", "=", "subprocess", ".", "PIPE", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "bufsize", "=", "-", "1", ")", "output", ",", "nothing", "=", "process", ".", "communicate", "(", "stdinString", ")", "else", ":", "process", "=", "subprocess", ".", "Popen", "(", "command", ",", "shell", "=", "True", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "sys", ".", "stderr", ",", "bufsize", "=", "-", "1", ")", "output", ",", "nothing", "=", "process", ".", "communicate", "(", ")", "#process.stdout.read().strip()", "sts", "=", "process", ".", "wait", "(", ")", "if", "sts", "!=", "0", ":", "raise", "RuntimeError", "(", "\"Command: %s with stdin string '%s' exited with non-zero status %i\"", "%", "(", "command", ",", "stdinString", ",", "sts", ")", ")", "return", "output" ]
Runs a command and return standard out.
[ "Runs", "a", "command", "and", "return", "standard", "out", "." ]
python
train
jwodder/javaproperties
javaproperties/reading.py
https://github.com/jwodder/javaproperties/blob/8b48f040305217ebeb80c98c4354691bbb01429b/javaproperties/reading.py#L38-L66
def loads(s, object_pairs_hook=dict): """ Parse the contents of the string ``s`` as a simple line-oriented ``.properties`` file and return a `dict` of the key-value pairs. ``s`` may be either a text string or bytes string. If it is a bytes string, its contents are decoded as Latin-1. By default, the key-value pairs extracted from ``s`` are combined into a `dict` with later occurrences of a key overriding previous occurrences of the same key. To change this behavior, pass a callable as the ``object_pairs_hook`` argument; it will be called with one argument, a generator of ``(key, value)`` pairs representing the key-value entries in ``s`` (including duplicates) in order of occurrence. `loads` will then return the value returned by ``object_pairs_hook``. .. versionchanged:: 0.5.0 Invalid ``\\uXXXX`` escape sequences will now cause an `InvalidUEscapeError` to be raised :param string s: the string from which to read the ``.properties`` document :param callable object_pairs_hook: class or function for combining the key-value pairs :rtype: `dict` of text strings or the return value of ``object_pairs_hook`` :raises InvalidUEscapeError: if an invalid ``\\uXXXX`` escape sequence occurs in the input """ fp = BytesIO(s) if isinstance(s, binary_type) else StringIO(s) return load(fp, object_pairs_hook=object_pairs_hook)
[ "def", "loads", "(", "s", ",", "object_pairs_hook", "=", "dict", ")", ":", "fp", "=", "BytesIO", "(", "s", ")", "if", "isinstance", "(", "s", ",", "binary_type", ")", "else", "StringIO", "(", "s", ")", "return", "load", "(", "fp", ",", "object_pairs_hook", "=", "object_pairs_hook", ")" ]
Parse the contents of the string ``s`` as a simple line-oriented ``.properties`` file and return a `dict` of the key-value pairs. ``s`` may be either a text string or bytes string. If it is a bytes string, its contents are decoded as Latin-1. By default, the key-value pairs extracted from ``s`` are combined into a `dict` with later occurrences of a key overriding previous occurrences of the same key. To change this behavior, pass a callable as the ``object_pairs_hook`` argument; it will be called with one argument, a generator of ``(key, value)`` pairs representing the key-value entries in ``s`` (including duplicates) in order of occurrence. `loads` will then return the value returned by ``object_pairs_hook``. .. versionchanged:: 0.5.0 Invalid ``\\uXXXX`` escape sequences will now cause an `InvalidUEscapeError` to be raised :param string s: the string from which to read the ``.properties`` document :param callable object_pairs_hook: class or function for combining the key-value pairs :rtype: `dict` of text strings or the return value of ``object_pairs_hook`` :raises InvalidUEscapeError: if an invalid ``\\uXXXX`` escape sequence occurs in the input
[ "Parse", "the", "contents", "of", "the", "string", "s", "as", "a", "simple", "line", "-", "oriented", ".", "properties", "file", "and", "return", "a", "dict", "of", "the", "key", "-", "value", "pairs", "." ]
python
train
obulpathi/cdn-fastly-python
fastly/__init__.py
https://github.com/obulpathi/cdn-fastly-python/blob/db2564b047e8af4bce72c3b88d6c27d3d0291425/fastly/__init__.py#L965-L968
def deactivate_version(self, service_id, version_number): """Deactivate the current version.""" content = self._fetch("/service/%s/version/%d/deactivate" % (service_id, version_number), method="PUT") return FastlyVersion(self, content)
[ "def", "deactivate_version", "(", "self", ",", "service_id", ",", "version_number", ")", ":", "content", "=", "self", ".", "_fetch", "(", "\"/service/%s/version/%d/deactivate\"", "%", "(", "service_id", ",", "version_number", ")", ",", "method", "=", "\"PUT\"", ")", "return", "FastlyVersion", "(", "self", ",", "content", ")" ]
Deactivate the current version.
[ "Deactivate", "the", "current", "version", "." ]
python
train
AltSchool/dynamic-rest
dynamic_rest/viewsets.py
https://github.com/AltSchool/dynamic-rest/blob/5b0338c3dd8bc638d60c3bb92645857c5b89c920/dynamic_rest/viewsets.py#L94-L144
def initialize_request(self, request, *args, **kargs): """ Override DRF initialize_request() method to swap request.GET (which is aliased by request.query_params) with a mutable instance of QueryParams, and to convert request MergeDict to a subclass of dict for consistency (MergeDict is not a subclass of dict) """ def handle_encodings(request): """ WSGIRequest does not support Unicode values in the query string. WSGIRequest handling has a history of drifting behavior between combinations of Python versions, Django versions and DRF versions. Django changed its QUERY_STRING handling here: https://goo.gl/WThXo6. DRF 3.4.7 changed its behavior here: https://goo.gl/0ojIIO. """ try: return QueryParams(request.GET) except UnicodeEncodeError: pass s = request.environ.get('QUERY_STRING', '') try: s = s.encode('utf-8') except UnicodeDecodeError: pass return QueryParams(s) request.GET = handle_encodings(request) request = super(WithDynamicViewSetMixin, self).initialize_request( request, *args, **kargs ) try: # Django<1.9, DRF<3.2 # MergeDict doesn't have the same API as dict. # Django has deprecated MergeDict and DRF is moving away from # using it - thus, were comfortable replacing it with a QueryDict # This will allow the data property to have normal dict methods. from django.utils.datastructures import MergeDict if isinstance(request._full_data, MergeDict): data_as_dict = request.data.dicts[0] for d in request.data.dicts[1:]: data_as_dict.update(d) request._full_data = data_as_dict except: pass return request
[ "def", "initialize_request", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kargs", ")", ":", "def", "handle_encodings", "(", "request", ")", ":", "\"\"\"\n WSGIRequest does not support Unicode values in the query string.\n WSGIRequest handling has a history of drifting behavior between\n combinations of Python versions, Django versions and DRF versions.\n Django changed its QUERY_STRING handling here:\n https://goo.gl/WThXo6. DRF 3.4.7 changed its behavior here:\n https://goo.gl/0ojIIO.\n \"\"\"", "try", ":", "return", "QueryParams", "(", "request", ".", "GET", ")", "except", "UnicodeEncodeError", ":", "pass", "s", "=", "request", ".", "environ", ".", "get", "(", "'QUERY_STRING'", ",", "''", ")", "try", ":", "s", "=", "s", ".", "encode", "(", "'utf-8'", ")", "except", "UnicodeDecodeError", ":", "pass", "return", "QueryParams", "(", "s", ")", "request", ".", "GET", "=", "handle_encodings", "(", "request", ")", "request", "=", "super", "(", "WithDynamicViewSetMixin", ",", "self", ")", ".", "initialize_request", "(", "request", ",", "*", "args", ",", "*", "*", "kargs", ")", "try", ":", "# Django<1.9, DRF<3.2", "# MergeDict doesn't have the same API as dict.", "# Django has deprecated MergeDict and DRF is moving away from", "# using it - thus, were comfortable replacing it with a QueryDict", "# This will allow the data property to have normal dict methods.", "from", "django", ".", "utils", ".", "datastructures", "import", "MergeDict", "if", "isinstance", "(", "request", ".", "_full_data", ",", "MergeDict", ")", ":", "data_as_dict", "=", "request", ".", "data", ".", "dicts", "[", "0", "]", "for", "d", "in", "request", ".", "data", ".", "dicts", "[", "1", ":", "]", ":", "data_as_dict", ".", "update", "(", "d", ")", "request", ".", "_full_data", "=", "data_as_dict", "except", ":", "pass", "return", "request" ]
Override DRF initialize_request() method to swap request.GET (which is aliased by request.query_params) with a mutable instance of QueryParams, and to convert request MergeDict to a subclass of dict for consistency (MergeDict is not a subclass of dict)
[ "Override", "DRF", "initialize_request", "()", "method", "to", "swap", "request", ".", "GET", "(", "which", "is", "aliased", "by", "request", ".", "query_params", ")", "with", "a", "mutable", "instance", "of", "QueryParams", "and", "to", "convert", "request", "MergeDict", "to", "a", "subclass", "of", "dict", "for", "consistency", "(", "MergeDict", "is", "not", "a", "subclass", "of", "dict", ")" ]
python
train
inspirehep/harvesting-kit
harvestingkit/ftp_utils.py
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/ftp_utils.py#L143-L162
def ls(self, folder=''): """ Lists the files and folders of a specific directory default is the current working directory. :param folder: the folder to be listed. :type folder: string :returns: a tuple with the list of files in the folder and the list of subfolders in the folder. """ current_folder = self._ftp.pwd() self.cd(folder) contents = [] self._ftp.retrlines('LIST', lambda a: contents.append(a)) files = filter(lambda a: a.split()[0].startswith('-'), contents) folders = filter(lambda a: a.split()[0].startswith('d'), contents) files = map(lambda a: ' '.join(a.split()[8:]), files) folders = map(lambda a: ' '.join(a.split()[8:]), folders) self._ftp.cwd(current_folder) return files, folders
[ "def", "ls", "(", "self", ",", "folder", "=", "''", ")", ":", "current_folder", "=", "self", ".", "_ftp", ".", "pwd", "(", ")", "self", ".", "cd", "(", "folder", ")", "contents", "=", "[", "]", "self", ".", "_ftp", ".", "retrlines", "(", "'LIST'", ",", "lambda", "a", ":", "contents", ".", "append", "(", "a", ")", ")", "files", "=", "filter", "(", "lambda", "a", ":", "a", ".", "split", "(", ")", "[", "0", "]", ".", "startswith", "(", "'-'", ")", ",", "contents", ")", "folders", "=", "filter", "(", "lambda", "a", ":", "a", ".", "split", "(", ")", "[", "0", "]", ".", "startswith", "(", "'d'", ")", ",", "contents", ")", "files", "=", "map", "(", "lambda", "a", ":", "' '", ".", "join", "(", "a", ".", "split", "(", ")", "[", "8", ":", "]", ")", ",", "files", ")", "folders", "=", "map", "(", "lambda", "a", ":", "' '", ".", "join", "(", "a", ".", "split", "(", ")", "[", "8", ":", "]", ")", ",", "folders", ")", "self", ".", "_ftp", ".", "cwd", "(", "current_folder", ")", "return", "files", ",", "folders" ]
Lists the files and folders of a specific directory default is the current working directory. :param folder: the folder to be listed. :type folder: string :returns: a tuple with the list of files in the folder and the list of subfolders in the folder.
[ "Lists", "the", "files", "and", "folders", "of", "a", "specific", "directory", "default", "is", "the", "current", "working", "directory", "." ]
python
valid
urinieto/msaf
msaf/base.py
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/base.py#L112-L140
def estimate_beats(self): """Estimates the beats using librosa. Returns ------- times: np.array Times of estimated beats in seconds. frames: np.array Frame indeces of estimated beats. """ # Compute harmonic-percussive source separation if needed if self._audio_percussive is None: self._audio_harmonic, self._audio_percussive = self.compute_HPSS() # Compute beats tempo, frames = librosa.beat.beat_track( y=self._audio_percussive, sr=self.sr, hop_length=self.hop_length) # To times times = librosa.frames_to_time(frames, sr=self.sr, hop_length=self.hop_length) # TODO: Is this really necessary? if len(times) > 0 and times[0] == 0: times = times[1:] frames = frames[1:] return times, frames
[ "def", "estimate_beats", "(", "self", ")", ":", "# Compute harmonic-percussive source separation if needed", "if", "self", ".", "_audio_percussive", "is", "None", ":", "self", ".", "_audio_harmonic", ",", "self", ".", "_audio_percussive", "=", "self", ".", "compute_HPSS", "(", ")", "# Compute beats", "tempo", ",", "frames", "=", "librosa", ".", "beat", ".", "beat_track", "(", "y", "=", "self", ".", "_audio_percussive", ",", "sr", "=", "self", ".", "sr", ",", "hop_length", "=", "self", ".", "hop_length", ")", "# To times", "times", "=", "librosa", ".", "frames_to_time", "(", "frames", ",", "sr", "=", "self", ".", "sr", ",", "hop_length", "=", "self", ".", "hop_length", ")", "# TODO: Is this really necessary?", "if", "len", "(", "times", ")", ">", "0", "and", "times", "[", "0", "]", "==", "0", ":", "times", "=", "times", "[", "1", ":", "]", "frames", "=", "frames", "[", "1", ":", "]", "return", "times", ",", "frames" ]
Estimates the beats using librosa. Returns ------- times: np.array Times of estimated beats in seconds. frames: np.array Frame indeces of estimated beats.
[ "Estimates", "the", "beats", "using", "librosa", "." ]
python
test
astrorafael/twisted-mqtt
examples/subscriber.py
https://github.com/astrorafael/twisted-mqtt/blob/5b322f7c2b82a502b1e1b70703ae45f1f668d07d/examples/subscriber.py#L72-L87
def connectToBroker(self, protocol): ''' Connect to MQTT broker ''' self.protocol = protocol self.protocol.onPublish = self.onPublish self.protocol.onDisconnection = self.onDisconnection self.protocol.setWindowSize(3) try: yield self.protocol.connect("TwistedMQTT-subs", keepalive=60) yield self.subscribe() except Exception as e: log.error("Connecting to {broker} raised {excp!s}", broker=BROKER, excp=e) else: log.info("Connected and subscribed to {broker}", broker=BROKER)
[ "def", "connectToBroker", "(", "self", ",", "protocol", ")", ":", "self", ".", "protocol", "=", "protocol", "self", ".", "protocol", ".", "onPublish", "=", "self", ".", "onPublish", "self", ".", "protocol", ".", "onDisconnection", "=", "self", ".", "onDisconnection", "self", ".", "protocol", ".", "setWindowSize", "(", "3", ")", "try", ":", "yield", "self", ".", "protocol", ".", "connect", "(", "\"TwistedMQTT-subs\"", ",", "keepalive", "=", "60", ")", "yield", "self", ".", "subscribe", "(", ")", "except", "Exception", "as", "e", ":", "log", ".", "error", "(", "\"Connecting to {broker} raised {excp!s}\"", ",", "broker", "=", "BROKER", ",", "excp", "=", "e", ")", "else", ":", "log", ".", "info", "(", "\"Connected and subscribed to {broker}\"", ",", "broker", "=", "BROKER", ")" ]
Connect to MQTT broker
[ "Connect", "to", "MQTT", "broker" ]
python
test
openstack/networking-arista
networking_arista/common/db_lib.py
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/common/db_lib.py#L204-L216
def get_tenants(tenant_id=None): """Returns list of all project/tenant ids that may be relevant on CVX""" if tenant_id == '': return [] session = db.get_reader_session() project_ids = set() with session.begin(): for m in [models_v2.Network, models_v2.Port]: q = session.query(m.project_id).filter(m.project_id != '') if tenant_id: q = q.filter(m.project_id == tenant_id) project_ids.update(pid[0] for pid in q.distinct()) return [{'project_id': project_id} for project_id in project_ids]
[ "def", "get_tenants", "(", "tenant_id", "=", "None", ")", ":", "if", "tenant_id", "==", "''", ":", "return", "[", "]", "session", "=", "db", ".", "get_reader_session", "(", ")", "project_ids", "=", "set", "(", ")", "with", "session", ".", "begin", "(", ")", ":", "for", "m", "in", "[", "models_v2", ".", "Network", ",", "models_v2", ".", "Port", "]", ":", "q", "=", "session", ".", "query", "(", "m", ".", "project_id", ")", ".", "filter", "(", "m", ".", "project_id", "!=", "''", ")", "if", "tenant_id", ":", "q", "=", "q", ".", "filter", "(", "m", ".", "project_id", "==", "tenant_id", ")", "project_ids", ".", "update", "(", "pid", "[", "0", "]", "for", "pid", "in", "q", ".", "distinct", "(", ")", ")", "return", "[", "{", "'project_id'", ":", "project_id", "}", "for", "project_id", "in", "project_ids", "]" ]
Returns list of all project/tenant ids that may be relevant on CVX
[ "Returns", "list", "of", "all", "project", "/", "tenant", "ids", "that", "may", "be", "relevant", "on", "CVX" ]
python
train
Skype4Py/Skype4Py
Skype4Py/utils.py
https://github.com/Skype4Py/Skype4Py/blob/c48d83f7034109fe46315d45a066126002c6e0d4/Skype4Py/utils.py#L493-L507
def _AddEvents(cls, Class): """Adds events based on the attributes of the given ``...Events`` class. :Parameters: Class : class An `...Events` class whose methods define events that may occur in the instances of the current class. """ def make_event(event): return property(lambda self: self._GetDefaultEventHandler(event), lambda self, Value: self._SetDefaultEventHandler(event, Value)) for event in dir(Class): if not event.startswith('_'): setattr(cls, 'On%s' % event, make_event(event)) cls._EventNames.append(event)
[ "def", "_AddEvents", "(", "cls", ",", "Class", ")", ":", "def", "make_event", "(", "event", ")", ":", "return", "property", "(", "lambda", "self", ":", "self", ".", "_GetDefaultEventHandler", "(", "event", ")", ",", "lambda", "self", ",", "Value", ":", "self", ".", "_SetDefaultEventHandler", "(", "event", ",", "Value", ")", ")", "for", "event", "in", "dir", "(", "Class", ")", ":", "if", "not", "event", ".", "startswith", "(", "'_'", ")", ":", "setattr", "(", "cls", ",", "'On%s'", "%", "event", ",", "make_event", "(", "event", ")", ")", "cls", ".", "_EventNames", ".", "append", "(", "event", ")" ]
Adds events based on the attributes of the given ``...Events`` class. :Parameters: Class : class An `...Events` class whose methods define events that may occur in the instances of the current class.
[ "Adds", "events", "based", "on", "the", "attributes", "of", "the", "given", "...", "Events", "class", ".", ":", "Parameters", ":", "Class", ":", "class", "An", "...", "Events", "class", "whose", "methods", "define", "events", "that", "may", "occur", "in", "the", "instances", "of", "the", "current", "class", "." ]
python
train
jepegit/cellpy
cellpy/utils/batch_tools/engines.py
https://github.com/jepegit/cellpy/blob/9f4a84cdd11f72cfa02cda8c2d7b5174abbb7370/cellpy/utils/batch_tools/engines.py#L87-L131
def simple_db_engine(reader=None, srnos=None): """engine that gets values from the simple excel 'db'""" if reader is None: reader = dbreader.Reader() logger.debug("No reader provided. Creating one myself.") info_dict = dict() info_dict["filenames"] = [reader.get_cell_name(srno) for srno in srnos] info_dict["masses"] = [reader.get_mass(srno) for srno in srnos] info_dict["total_masses"] = [reader.get_total_mass(srno) for srno in srnos] info_dict["loadings"] = [reader.get_loading(srno) for srno in srnos] info_dict["fixed"] = [reader.inspect_hd5f_fixed(srno) for srno in srnos] info_dict["labels"] = [reader.get_label(srno) for srno in srnos] info_dict["cell_type"] = [reader.get_cell_type(srno) for srno in srnos] info_dict["raw_file_names"] = [] info_dict["cellpy_file_names"] = [] logger.debug("created info-dict") for key in list(info_dict.keys()): logger.debug("%s: %s" % (key, str(info_dict[key]))) _groups = [reader.get_group(srno) for srno in srnos] logger.debug(">\ngroups: %s" % str(_groups)) groups = helper.fix_groups(_groups) info_dict["groups"] = groups my_timer_start = time.time() filename_cache = [] info_dict = helper.find_files(info_dict, filename_cache) my_timer_end = time.time() if (my_timer_end - my_timer_start) > 5.0: logger.info( "The function _find_files was very slow. " "Save your info_df so you don't have to run it again!" ) info_df = pd.DataFrame(info_dict) info_df = info_df.sort_values(["groups", "filenames"]) info_df = helper.make_unique_groups(info_df) info_df["labels"] = info_df["filenames"].apply(helper.create_labels) info_df.set_index("filenames", inplace=True) return info_df
[ "def", "simple_db_engine", "(", "reader", "=", "None", ",", "srnos", "=", "None", ")", ":", "if", "reader", "is", "None", ":", "reader", "=", "dbreader", ".", "Reader", "(", ")", "logger", ".", "debug", "(", "\"No reader provided. Creating one myself.\"", ")", "info_dict", "=", "dict", "(", ")", "info_dict", "[", "\"filenames\"", "]", "=", "[", "reader", ".", "get_cell_name", "(", "srno", ")", "for", "srno", "in", "srnos", "]", "info_dict", "[", "\"masses\"", "]", "=", "[", "reader", ".", "get_mass", "(", "srno", ")", "for", "srno", "in", "srnos", "]", "info_dict", "[", "\"total_masses\"", "]", "=", "[", "reader", ".", "get_total_mass", "(", "srno", ")", "for", "srno", "in", "srnos", "]", "info_dict", "[", "\"loadings\"", "]", "=", "[", "reader", ".", "get_loading", "(", "srno", ")", "for", "srno", "in", "srnos", "]", "info_dict", "[", "\"fixed\"", "]", "=", "[", "reader", ".", "inspect_hd5f_fixed", "(", "srno", ")", "for", "srno", "in", "srnos", "]", "info_dict", "[", "\"labels\"", "]", "=", "[", "reader", ".", "get_label", "(", "srno", ")", "for", "srno", "in", "srnos", "]", "info_dict", "[", "\"cell_type\"", "]", "=", "[", "reader", ".", "get_cell_type", "(", "srno", ")", "for", "srno", "in", "srnos", "]", "info_dict", "[", "\"raw_file_names\"", "]", "=", "[", "]", "info_dict", "[", "\"cellpy_file_names\"", "]", "=", "[", "]", "logger", ".", "debug", "(", "\"created info-dict\"", ")", "for", "key", "in", "list", "(", "info_dict", ".", "keys", "(", ")", ")", ":", "logger", ".", "debug", "(", "\"%s: %s\"", "%", "(", "key", ",", "str", "(", "info_dict", "[", "key", "]", ")", ")", ")", "_groups", "=", "[", "reader", ".", "get_group", "(", "srno", ")", "for", "srno", "in", "srnos", "]", "logger", ".", "debug", "(", "\">\\ngroups: %s\"", "%", "str", "(", "_groups", ")", ")", "groups", "=", "helper", ".", "fix_groups", "(", "_groups", ")", "info_dict", "[", "\"groups\"", "]", "=", "groups", "my_timer_start", "=", "time", ".", "time", "(", ")", "filename_cache", "=", "[", "]", "info_dict", "=", "helper", ".", "find_files", "(", "info_dict", ",", "filename_cache", ")", "my_timer_end", "=", "time", ".", "time", "(", ")", "if", "(", "my_timer_end", "-", "my_timer_start", ")", ">", "5.0", ":", "logger", ".", "info", "(", "\"The function _find_files was very slow. \"", "\"Save your info_df so you don't have to run it again!\"", ")", "info_df", "=", "pd", ".", "DataFrame", "(", "info_dict", ")", "info_df", "=", "info_df", ".", "sort_values", "(", "[", "\"groups\"", ",", "\"filenames\"", "]", ")", "info_df", "=", "helper", ".", "make_unique_groups", "(", "info_df", ")", "info_df", "[", "\"labels\"", "]", "=", "info_df", "[", "\"filenames\"", "]", ".", "apply", "(", "helper", ".", "create_labels", ")", "info_df", ".", "set_index", "(", "\"filenames\"", ",", "inplace", "=", "True", ")", "return", "info_df" ]
engine that gets values from the simple excel 'db
[ "engine", "that", "gets", "values", "from", "the", "simple", "excel", "db" ]
python
train
django-auth-ldap/django-auth-ldap
django_auth_ldap/backend.py
https://github.com/django-auth-ldap/django-auth-ldap/blob/9ce3c2825527f8faa1793958b041816e63d839af/django_auth_ldap/backend.py#L688-L742
def _normalize_mirror_settings(self): """ Validates the group mirroring settings and converts them as necessary. """ def malformed_mirror_groups_except(): return ImproperlyConfigured( "{} must be a collection of group names".format( self.settings._name("MIRROR_GROUPS_EXCEPT") ) ) def malformed_mirror_groups(): return ImproperlyConfigured( "{} must be True or a collection of group names".format( self.settings._name("MIRROR_GROUPS") ) ) mge = self.settings.MIRROR_GROUPS_EXCEPT mg = self.settings.MIRROR_GROUPS if mge is not None: if isinstance(mge, (set, frozenset)): pass elif isinstance(mge, (list, tuple)): mge = self.settings.MIRROR_GROUPS_EXCEPT = frozenset(mge) else: raise malformed_mirror_groups_except() if not all(isinstance(value, str) for value in mge): raise malformed_mirror_groups_except() elif mg: warnings.warn( ConfigurationWarning( "Ignoring {} in favor of {}".format( self.settings._name("MIRROR_GROUPS"), self.settings._name("MIRROR_GROUPS_EXCEPT"), ) ) ) mg = self.settings.MIRROR_GROUPS = None if mg is not None: if isinstance(mg, (bool, set, frozenset)): pass elif isinstance(mg, (list, tuple)): mg = self.settings.MIRROR_GROUPS = frozenset(mg) else: raise malformed_mirror_groups() if isinstance(mg, (set, frozenset)) and ( not all(isinstance(value, str) for value in mg) ): raise malformed_mirror_groups()
[ "def", "_normalize_mirror_settings", "(", "self", ")", ":", "def", "malformed_mirror_groups_except", "(", ")", ":", "return", "ImproperlyConfigured", "(", "\"{} must be a collection of group names\"", ".", "format", "(", "self", ".", "settings", ".", "_name", "(", "\"MIRROR_GROUPS_EXCEPT\"", ")", ")", ")", "def", "malformed_mirror_groups", "(", ")", ":", "return", "ImproperlyConfigured", "(", "\"{} must be True or a collection of group names\"", ".", "format", "(", "self", ".", "settings", ".", "_name", "(", "\"MIRROR_GROUPS\"", ")", ")", ")", "mge", "=", "self", ".", "settings", ".", "MIRROR_GROUPS_EXCEPT", "mg", "=", "self", ".", "settings", ".", "MIRROR_GROUPS", "if", "mge", "is", "not", "None", ":", "if", "isinstance", "(", "mge", ",", "(", "set", ",", "frozenset", ")", ")", ":", "pass", "elif", "isinstance", "(", "mge", ",", "(", "list", ",", "tuple", ")", ")", ":", "mge", "=", "self", ".", "settings", ".", "MIRROR_GROUPS_EXCEPT", "=", "frozenset", "(", "mge", ")", "else", ":", "raise", "malformed_mirror_groups_except", "(", ")", "if", "not", "all", "(", "isinstance", "(", "value", ",", "str", ")", "for", "value", "in", "mge", ")", ":", "raise", "malformed_mirror_groups_except", "(", ")", "elif", "mg", ":", "warnings", ".", "warn", "(", "ConfigurationWarning", "(", "\"Ignoring {} in favor of {}\"", ".", "format", "(", "self", ".", "settings", ".", "_name", "(", "\"MIRROR_GROUPS\"", ")", ",", "self", ".", "settings", ".", "_name", "(", "\"MIRROR_GROUPS_EXCEPT\"", ")", ",", ")", ")", ")", "mg", "=", "self", ".", "settings", ".", "MIRROR_GROUPS", "=", "None", "if", "mg", "is", "not", "None", ":", "if", "isinstance", "(", "mg", ",", "(", "bool", ",", "set", ",", "frozenset", ")", ")", ":", "pass", "elif", "isinstance", "(", "mg", ",", "(", "list", ",", "tuple", ")", ")", ":", "mg", "=", "self", ".", "settings", ".", "MIRROR_GROUPS", "=", "frozenset", "(", "mg", ")", "else", ":", "raise", "malformed_mirror_groups", "(", ")", "if", "isinstance", "(", "mg", ",", "(", "set", ",", "frozenset", ")", ")", "and", "(", "not", "all", "(", "isinstance", "(", "value", ",", "str", ")", "for", "value", "in", "mg", ")", ")", ":", "raise", "malformed_mirror_groups", "(", ")" ]
Validates the group mirroring settings and converts them as necessary.
[ "Validates", "the", "group", "mirroring", "settings", "and", "converts", "them", "as", "necessary", "." ]
python
train
proycon/pynlpl
pynlpl/formats/sonar.py
https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/sonar.py#L235-L244
def validate(self, formats_dir="../formats/"): """checks if the document is valid""" #TODO: download XSD from web if self.inline: xmlschema = ElementTree.XMLSchema(ElementTree.parse(StringIO("\n".join(open(formats_dir+"dcoi-dsc.xsd").readlines())))) xmlschema.assertValid(self.tree) #return xmlschema.validate(self) else: xmlschema = ElementTree.XMLSchema(ElementTree.parse(StringIO("\n".join(open(formats_dir+"dutchsemcor-standalone.xsd").readlines())))) xmlschema.assertValid(self.tree)
[ "def", "validate", "(", "self", ",", "formats_dir", "=", "\"../formats/\"", ")", ":", "#TODO: download XSD from web", "if", "self", ".", "inline", ":", "xmlschema", "=", "ElementTree", ".", "XMLSchema", "(", "ElementTree", ".", "parse", "(", "StringIO", "(", "\"\\n\"", ".", "join", "(", "open", "(", "formats_dir", "+", "\"dcoi-dsc.xsd\"", ")", ".", "readlines", "(", ")", ")", ")", ")", ")", "xmlschema", ".", "assertValid", "(", "self", ".", "tree", ")", "#return xmlschema.validate(self)", "else", ":", "xmlschema", "=", "ElementTree", ".", "XMLSchema", "(", "ElementTree", ".", "parse", "(", "StringIO", "(", "\"\\n\"", ".", "join", "(", "open", "(", "formats_dir", "+", "\"dutchsemcor-standalone.xsd\"", ")", ".", "readlines", "(", ")", ")", ")", ")", ")", "xmlschema", ".", "assertValid", "(", "self", ".", "tree", ")" ]
checks if the document is valid
[ "checks", "if", "the", "document", "is", "valid" ]
python
train
faucamp/python-gsmmodem
gsmmodem/modem.py
https://github.com/faucamp/python-gsmmodem/blob/834c68b1387ca2c91e2210faa8f75526b39723b5/gsmmodem/modem.py#L1088-L1112
def deleteMultipleStoredSms(self, delFlag=4, memory=None): """ Deletes all SMS messages that have the specified read status. The messages are read from the memory set by the "memory" parameter. The value of the "delFlag" paramater is the same as the "DelFlag" parameter of the +CMGD command: 1: Delete All READ messages 2: Delete All READ and SENT messages 3: Delete All READ, SENT and UNSENT messages 4: Delete All messages (this is the default) :param delFlag: Controls what type of messages to delete; see description above. :type delFlag: int :param memory: The memory type to delete from. If None, use the current default SMS read/delete memory :type memory: str or None :param delete: If True, delete returned messages from the device/SIM card :type delete: bool :raise ValueErrror: if "delFlag" is not in range [1,4] :raise CommandError: if unable to delete the stored messages """ if 0 < delFlag <= 4: self._setSmsMemory(readDelete=memory) self.write('AT+CMGD=1,{0}'.format(delFlag)) else: raise ValueError('"delFlag" must be in range [1,4]')
[ "def", "deleteMultipleStoredSms", "(", "self", ",", "delFlag", "=", "4", ",", "memory", "=", "None", ")", ":", "if", "0", "<", "delFlag", "<=", "4", ":", "self", ".", "_setSmsMemory", "(", "readDelete", "=", "memory", ")", "self", ".", "write", "(", "'AT+CMGD=1,{0}'", ".", "format", "(", "delFlag", ")", ")", "else", ":", "raise", "ValueError", "(", "'\"delFlag\" must be in range [1,4]'", ")" ]
Deletes all SMS messages that have the specified read status. The messages are read from the memory set by the "memory" parameter. The value of the "delFlag" paramater is the same as the "DelFlag" parameter of the +CMGD command: 1: Delete All READ messages 2: Delete All READ and SENT messages 3: Delete All READ, SENT and UNSENT messages 4: Delete All messages (this is the default) :param delFlag: Controls what type of messages to delete; see description above. :type delFlag: int :param memory: The memory type to delete from. If None, use the current default SMS read/delete memory :type memory: str or None :param delete: If True, delete returned messages from the device/SIM card :type delete: bool :raise ValueErrror: if "delFlag" is not in range [1,4] :raise CommandError: if unable to delete the stored messages
[ "Deletes", "all", "SMS", "messages", "that", "have", "the", "specified", "read", "status", ".", "The", "messages", "are", "read", "from", "the", "memory", "set", "by", "the", "memory", "parameter", ".", "The", "value", "of", "the", "delFlag", "paramater", "is", "the", "same", "as", "the", "DelFlag", "parameter", "of", "the", "+", "CMGD", "command", ":", "1", ":", "Delete", "All", "READ", "messages", "2", ":", "Delete", "All", "READ", "and", "SENT", "messages", "3", ":", "Delete", "All", "READ", "SENT", "and", "UNSENT", "messages", "4", ":", "Delete", "All", "messages", "(", "this", "is", "the", "default", ")", ":", "param", "delFlag", ":", "Controls", "what", "type", "of", "messages", "to", "delete", ";", "see", "description", "above", ".", ":", "type", "delFlag", ":", "int", ":", "param", "memory", ":", "The", "memory", "type", "to", "delete", "from", ".", "If", "None", "use", "the", "current", "default", "SMS", "read", "/", "delete", "memory", ":", "type", "memory", ":", "str", "or", "None", ":", "param", "delete", ":", "If", "True", "delete", "returned", "messages", "from", "the", "device", "/", "SIM", "card", ":", "type", "delete", ":", "bool", ":", "raise", "ValueErrror", ":", "if", "delFlag", "is", "not", "in", "range", "[", "1", "4", "]", ":", "raise", "CommandError", ":", "if", "unable", "to", "delete", "the", "stored", "messages" ]
python
train
CalebBell/thermo
thermo/viscosity.py
https://github.com/CalebBell/thermo/blob/3857ed023a3e64fd3039a32d53576c24990ef1c3/thermo/viscosity.py#L1503-L1538
def calculate(self, T, method): r'''Method to calculate low-pressure gas viscosity at tempearture `T` with a given method. This method has no exception handling; see `T_dependent_property` for that. Parameters ---------- T : float Temperature of the gas, [K] method : str Name of the method to use Returns ------- mu : float Viscosity of the gas at T and a low pressure, [Pa*S] ''' if method == GHARAGHEIZI: mu = Gharagheizi_gas_viscosity(T, self.Tc, self.Pc, self.MW) elif method == COOLPROP: mu = CoolProp_T_dependent_property(T, self.CASRN, 'V', 'g') elif method == DIPPR_PERRY_8E: mu = EQ102(T, *self.Perrys2_312_coeffs) elif method == VDI_PPDS: mu = horner(self.VDI_PPDS_coeffs, T) elif method == YOON_THODOS: mu = Yoon_Thodos(T, self.Tc, self.Pc, self.MW) elif method == STIEL_THODOS: mu = Stiel_Thodos(T, self.Tc, self.Pc, self.MW) elif method == LUCAS_GAS: mu = lucas_gas(T, self.Tc, self.Pc, self.Zc, self.MW, self.dipole, CASRN=self.CASRN) elif method in self.tabular_data: mu = self.interpolate(T, method) return mu
[ "def", "calculate", "(", "self", ",", "T", ",", "method", ")", ":", "if", "method", "==", "GHARAGHEIZI", ":", "mu", "=", "Gharagheizi_gas_viscosity", "(", "T", ",", "self", ".", "Tc", ",", "self", ".", "Pc", ",", "self", ".", "MW", ")", "elif", "method", "==", "COOLPROP", ":", "mu", "=", "CoolProp_T_dependent_property", "(", "T", ",", "self", ".", "CASRN", ",", "'V'", ",", "'g'", ")", "elif", "method", "==", "DIPPR_PERRY_8E", ":", "mu", "=", "EQ102", "(", "T", ",", "*", "self", ".", "Perrys2_312_coeffs", ")", "elif", "method", "==", "VDI_PPDS", ":", "mu", "=", "horner", "(", "self", ".", "VDI_PPDS_coeffs", ",", "T", ")", "elif", "method", "==", "YOON_THODOS", ":", "mu", "=", "Yoon_Thodos", "(", "T", ",", "self", ".", "Tc", ",", "self", ".", "Pc", ",", "self", ".", "MW", ")", "elif", "method", "==", "STIEL_THODOS", ":", "mu", "=", "Stiel_Thodos", "(", "T", ",", "self", ".", "Tc", ",", "self", ".", "Pc", ",", "self", ".", "MW", ")", "elif", "method", "==", "LUCAS_GAS", ":", "mu", "=", "lucas_gas", "(", "T", ",", "self", ".", "Tc", ",", "self", ".", "Pc", ",", "self", ".", "Zc", ",", "self", ".", "MW", ",", "self", ".", "dipole", ",", "CASRN", "=", "self", ".", "CASRN", ")", "elif", "method", "in", "self", ".", "tabular_data", ":", "mu", "=", "self", ".", "interpolate", "(", "T", ",", "method", ")", "return", "mu" ]
r'''Method to calculate low-pressure gas viscosity at tempearture `T` with a given method. This method has no exception handling; see `T_dependent_property` for that. Parameters ---------- T : float Temperature of the gas, [K] method : str Name of the method to use Returns ------- mu : float Viscosity of the gas at T and a low pressure, [Pa*S]
[ "r", "Method", "to", "calculate", "low", "-", "pressure", "gas", "viscosity", "at", "tempearture", "T", "with", "a", "given", "method", "." ]
python
valid
jasonrbriggs/stomp.py
stomp/transport.py
https://github.com/jasonrbriggs/stomp.py/blob/643843c5fbf25fd24339dd0e69a9411c3d8b94c7/stomp/transport.py#L103-L113
def start(self): """ Start the connection. This should be called after all listeners have been registered. If this method is not called, no frames will be received by the connection. """ self.running = True self.attempt_connection() receiver_thread = self.create_thread_fc(self.__receiver_loop) receiver_thread.name = "StompReceiver%s" % getattr(receiver_thread, "name", "Thread") self.notify('connecting')
[ "def", "start", "(", "self", ")", ":", "self", ".", "running", "=", "True", "self", ".", "attempt_connection", "(", ")", "receiver_thread", "=", "self", ".", "create_thread_fc", "(", "self", ".", "__receiver_loop", ")", "receiver_thread", ".", "name", "=", "\"StompReceiver%s\"", "%", "getattr", "(", "receiver_thread", ",", "\"name\"", ",", "\"Thread\"", ")", "self", ".", "notify", "(", "'connecting'", ")" ]
Start the connection. This should be called after all listeners have been registered. If this method is not called, no frames will be received by the connection.
[ "Start", "the", "connection", ".", "This", "should", "be", "called", "after", "all", "listeners", "have", "been", "registered", ".", "If", "this", "method", "is", "not", "called", "no", "frames", "will", "be", "received", "by", "the", "connection", "." ]
python
train
divio/django-filer
filer/admin/folderadmin.py
https://github.com/divio/django-filer/blob/946629087943d41eff290f07bfdf240b8853dd88/filer/admin/folderadmin.py#L504-L581
def response_action(self, request, files_queryset, folders_queryset): """ Handle an admin action. This is called if a request is POSTed to the changelist; it returns an HttpResponse if the action was handled, and None otherwise. """ # There can be multiple action forms on the page (at the top # and bottom of the change list, for example). Get the action # whose button was pushed. try: action_index = int(request.POST.get('index', 0)) except ValueError: action_index = 0 # Construct the action form. data = request.POST.copy() data.pop(helpers.ACTION_CHECKBOX_NAME, None) data.pop("index", None) # Use the action whose button was pushed try: data.update({'action': data.getlist('action')[action_index]}) except IndexError: # If we didn't get an action from the chosen form that's invalid # POST data, so by deleting action it'll fail the validation check # below. So no need to do anything here pass action_form = self.action_form(data, auto_id=None) action_form.fields['action'].choices = self.get_action_choices(request) # If the form's valid we can handle the action. if action_form.is_valid(): action = action_form.cleaned_data['action'] select_across = action_form.cleaned_data['select_across'] func, name, description = self.get_actions(request)[action] # Get the list of selected PKs. If nothing's selected, we can't # perform an action on it, so bail. Except we want to perform # the action explicitly on all objects. selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME) if not selected and not select_across: # Reminder that something needs to be selected or nothing # will happen msg = _("Items must be selected in order to perform " "actions on them. No items have been changed.") self.message_user(request, msg) return None if not select_across: selected_files = [] selected_folders = [] for pk in selected: if pk[:5] == "file-": selected_files.append(pk[5:]) else: selected_folders.append(pk[7:]) # Perform the action only on the selected objects files_queryset = files_queryset.filter(pk__in=selected_files) folders_queryset = folders_queryset.filter( pk__in=selected_folders) response = func(self, request, files_queryset, folders_queryset) # Actions may return an HttpResponse, which will be used as the # response from the POST. If not, we'll be a good little HTTP # citizen and redirect back to the changelist page. if isinstance(response, HttpResponse): return response else: return HttpResponseRedirect(request.get_full_path()) else: msg = _("No action selected.") self.message_user(request, msg) return None
[ "def", "response_action", "(", "self", ",", "request", ",", "files_queryset", ",", "folders_queryset", ")", ":", "# There can be multiple action forms on the page (at the top", "# and bottom of the change list, for example). Get the action", "# whose button was pushed.", "try", ":", "action_index", "=", "int", "(", "request", ".", "POST", ".", "get", "(", "'index'", ",", "0", ")", ")", "except", "ValueError", ":", "action_index", "=", "0", "# Construct the action form.", "data", "=", "request", ".", "POST", ".", "copy", "(", ")", "data", ".", "pop", "(", "helpers", ".", "ACTION_CHECKBOX_NAME", ",", "None", ")", "data", ".", "pop", "(", "\"index\"", ",", "None", ")", "# Use the action whose button was pushed", "try", ":", "data", ".", "update", "(", "{", "'action'", ":", "data", ".", "getlist", "(", "'action'", ")", "[", "action_index", "]", "}", ")", "except", "IndexError", ":", "# If we didn't get an action from the chosen form that's invalid", "# POST data, so by deleting action it'll fail the validation check", "# below. So no need to do anything here", "pass", "action_form", "=", "self", ".", "action_form", "(", "data", ",", "auto_id", "=", "None", ")", "action_form", ".", "fields", "[", "'action'", "]", ".", "choices", "=", "self", ".", "get_action_choices", "(", "request", ")", "# If the form's valid we can handle the action.", "if", "action_form", ".", "is_valid", "(", ")", ":", "action", "=", "action_form", ".", "cleaned_data", "[", "'action'", "]", "select_across", "=", "action_form", ".", "cleaned_data", "[", "'select_across'", "]", "func", ",", "name", ",", "description", "=", "self", ".", "get_actions", "(", "request", ")", "[", "action", "]", "# Get the list of selected PKs. If nothing's selected, we can't", "# perform an action on it, so bail. Except we want to perform", "# the action explicitly on all objects.", "selected", "=", "request", ".", "POST", ".", "getlist", "(", "helpers", ".", "ACTION_CHECKBOX_NAME", ")", "if", "not", "selected", "and", "not", "select_across", ":", "# Reminder that something needs to be selected or nothing", "# will happen", "msg", "=", "_", "(", "\"Items must be selected in order to perform \"", "\"actions on them. No items have been changed.\"", ")", "self", ".", "message_user", "(", "request", ",", "msg", ")", "return", "None", "if", "not", "select_across", ":", "selected_files", "=", "[", "]", "selected_folders", "=", "[", "]", "for", "pk", "in", "selected", ":", "if", "pk", "[", ":", "5", "]", "==", "\"file-\"", ":", "selected_files", ".", "append", "(", "pk", "[", "5", ":", "]", ")", "else", ":", "selected_folders", ".", "append", "(", "pk", "[", "7", ":", "]", ")", "# Perform the action only on the selected objects", "files_queryset", "=", "files_queryset", ".", "filter", "(", "pk__in", "=", "selected_files", ")", "folders_queryset", "=", "folders_queryset", ".", "filter", "(", "pk__in", "=", "selected_folders", ")", "response", "=", "func", "(", "self", ",", "request", ",", "files_queryset", ",", "folders_queryset", ")", "# Actions may return an HttpResponse, which will be used as the", "# response from the POST. If not, we'll be a good little HTTP", "# citizen and redirect back to the changelist page.", "if", "isinstance", "(", "response", ",", "HttpResponse", ")", ":", "return", "response", "else", ":", "return", "HttpResponseRedirect", "(", "request", ".", "get_full_path", "(", ")", ")", "else", ":", "msg", "=", "_", "(", "\"No action selected.\"", ")", "self", ".", "message_user", "(", "request", ",", "msg", ")", "return", "None" ]
Handle an admin action. This is called if a request is POSTed to the changelist; it returns an HttpResponse if the action was handled, and None otherwise.
[ "Handle", "an", "admin", "action", ".", "This", "is", "called", "if", "a", "request", "is", "POSTed", "to", "the", "changelist", ";", "it", "returns", "an", "HttpResponse", "if", "the", "action", "was", "handled", "and", "None", "otherwise", "." ]
python
train
materialsproject/pymatgen
pymatgen/analysis/fragmenter.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/fragmenter.py#L90-L126
def _fragment_one_level(self, mol_graphs): """ Perform one step of iterative fragmentation on a list of molecule graphs. Loop through the graphs, then loop through each graph's edges and attempt to remove that edge in order to obtain two disconnected subgraphs, aka two new fragments. If successful, check to see if the new fragments are already present in self.unique_fragments, and append them if not. If unsucessful, we know that edge belongs to a ring. If we are opening rings, do so with that bond, and then again check if the resulting fragment is present in self.unique_fragments and add it if it is not. """ unique_fragments_on_this_level = [] for mol_graph in mol_graphs: for edge in mol_graph.graph.edges: bond = [(edge[0],edge[1])] try: fragments = mol_graph.split_molecule_subgraphs(bond, allow_reverse=True) for fragment in fragments: found = False for unique_fragment in self.unique_fragments: if unique_fragment.isomorphic_to(fragment): found = True break if not found: self.unique_fragments.append(fragment) unique_fragments_on_this_level.append(fragment) except MolGraphSplitError: if self.open_rings: fragment = open_ring(mol_graph, bond, self.opt_steps) found = False for unique_fragment in self.unique_fragments: if unique_fragment.isomorphic_to(fragment): found = True break if not found: self.unique_fragments.append(fragment) self.unique_fragments_from_ring_openings.append(fragment) unique_fragments_on_this_level.append(fragment) return unique_fragments_on_this_level
[ "def", "_fragment_one_level", "(", "self", ",", "mol_graphs", ")", ":", "unique_fragments_on_this_level", "=", "[", "]", "for", "mol_graph", "in", "mol_graphs", ":", "for", "edge", "in", "mol_graph", ".", "graph", ".", "edges", ":", "bond", "=", "[", "(", "edge", "[", "0", "]", ",", "edge", "[", "1", "]", ")", "]", "try", ":", "fragments", "=", "mol_graph", ".", "split_molecule_subgraphs", "(", "bond", ",", "allow_reverse", "=", "True", ")", "for", "fragment", "in", "fragments", ":", "found", "=", "False", "for", "unique_fragment", "in", "self", ".", "unique_fragments", ":", "if", "unique_fragment", ".", "isomorphic_to", "(", "fragment", ")", ":", "found", "=", "True", "break", "if", "not", "found", ":", "self", ".", "unique_fragments", ".", "append", "(", "fragment", ")", "unique_fragments_on_this_level", ".", "append", "(", "fragment", ")", "except", "MolGraphSplitError", ":", "if", "self", ".", "open_rings", ":", "fragment", "=", "open_ring", "(", "mol_graph", ",", "bond", ",", "self", ".", "opt_steps", ")", "found", "=", "False", "for", "unique_fragment", "in", "self", ".", "unique_fragments", ":", "if", "unique_fragment", ".", "isomorphic_to", "(", "fragment", ")", ":", "found", "=", "True", "break", "if", "not", "found", ":", "self", ".", "unique_fragments", ".", "append", "(", "fragment", ")", "self", ".", "unique_fragments_from_ring_openings", ".", "append", "(", "fragment", ")", "unique_fragments_on_this_level", ".", "append", "(", "fragment", ")", "return", "unique_fragments_on_this_level" ]
Perform one step of iterative fragmentation on a list of molecule graphs. Loop through the graphs, then loop through each graph's edges and attempt to remove that edge in order to obtain two disconnected subgraphs, aka two new fragments. If successful, check to see if the new fragments are already present in self.unique_fragments, and append them if not. If unsucessful, we know that edge belongs to a ring. If we are opening rings, do so with that bond, and then again check if the resulting fragment is present in self.unique_fragments and add it if it is not.
[ "Perform", "one", "step", "of", "iterative", "fragmentation", "on", "a", "list", "of", "molecule", "graphs", ".", "Loop", "through", "the", "graphs", "then", "loop", "through", "each", "graph", "s", "edges", "and", "attempt", "to", "remove", "that", "edge", "in", "order", "to", "obtain", "two", "disconnected", "subgraphs", "aka", "two", "new", "fragments", ".", "If", "successful", "check", "to", "see", "if", "the", "new", "fragments", "are", "already", "present", "in", "self", ".", "unique_fragments", "and", "append", "them", "if", "not", ".", "If", "unsucessful", "we", "know", "that", "edge", "belongs", "to", "a", "ring", ".", "If", "we", "are", "opening", "rings", "do", "so", "with", "that", "bond", "and", "then", "again", "check", "if", "the", "resulting", "fragment", "is", "present", "in", "self", ".", "unique_fragments", "and", "add", "it", "if", "it", "is", "not", "." ]
python
train
awslabs/sockeye
sockeye/image_captioning/inference.py
https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/image_captioning/inference.py#L102-L130
def translate(self, trans_inputs: List[TranslatorInput]) -> List[TranslatorOutput]: """ Batch-translates a list of TranslatorInputs, returns a list of TranslatorOutputs. Splits oversized sentences to sentence chunks of size less than max_input_length. :param trans_inputs: List of TranslatorInputs as returned by make_input(). :return: List of translation results. """ batch_size = self.max_batch_size # translate in batch-sized blocks over input chunks translations = [] for batch_id, batch in enumerate(utils.grouper(trans_inputs, batch_size)): logger.debug("Translating batch %d", batch_id) # underfilled batch will be filled to a full batch size with copies of the 1st input rest = batch_size - len(batch) if rest > 0: logger.debug("Extending the last batch to the full batch size (%d)", batch_size) batch = batch + [batch[0]] * rest batch_translations = self._translate_nd(*self._get_inference_input(batch)) # truncate to remove filler translations if rest > 0: batch_translations = batch_translations[:-rest] translations.extend(batch_translations) # Concatenate results results = [] # type: List[TranslatorOutput] for trans_input, translation in zip(trans_inputs, translations): results.append(self._make_result(trans_input, translation)) return results
[ "def", "translate", "(", "self", ",", "trans_inputs", ":", "List", "[", "TranslatorInput", "]", ")", "->", "List", "[", "TranslatorOutput", "]", ":", "batch_size", "=", "self", ".", "max_batch_size", "# translate in batch-sized blocks over input chunks", "translations", "=", "[", "]", "for", "batch_id", ",", "batch", "in", "enumerate", "(", "utils", ".", "grouper", "(", "trans_inputs", ",", "batch_size", ")", ")", ":", "logger", ".", "debug", "(", "\"Translating batch %d\"", ",", "batch_id", ")", "# underfilled batch will be filled to a full batch size with copies of the 1st input", "rest", "=", "batch_size", "-", "len", "(", "batch", ")", "if", "rest", ">", "0", ":", "logger", ".", "debug", "(", "\"Extending the last batch to the full batch size (%d)\"", ",", "batch_size", ")", "batch", "=", "batch", "+", "[", "batch", "[", "0", "]", "]", "*", "rest", "batch_translations", "=", "self", ".", "_translate_nd", "(", "*", "self", ".", "_get_inference_input", "(", "batch", ")", ")", "# truncate to remove filler translations", "if", "rest", ">", "0", ":", "batch_translations", "=", "batch_translations", "[", ":", "-", "rest", "]", "translations", ".", "extend", "(", "batch_translations", ")", "# Concatenate results", "results", "=", "[", "]", "# type: List[TranslatorOutput]", "for", "trans_input", ",", "translation", "in", "zip", "(", "trans_inputs", ",", "translations", ")", ":", "results", ".", "append", "(", "self", ".", "_make_result", "(", "trans_input", ",", "translation", ")", ")", "return", "results" ]
Batch-translates a list of TranslatorInputs, returns a list of TranslatorOutputs. Splits oversized sentences to sentence chunks of size less than max_input_length. :param trans_inputs: List of TranslatorInputs as returned by make_input(). :return: List of translation results.
[ "Batch", "-", "translates", "a", "list", "of", "TranslatorInputs", "returns", "a", "list", "of", "TranslatorOutputs", ".", "Splits", "oversized", "sentences", "to", "sentence", "chunks", "of", "size", "less", "than", "max_input_length", "." ]
python
train
ivanyu/idx2numpy
idx2numpy/converters.py
https://github.com/ivanyu/idx2numpy/blob/9b88698314973226212181d1747dfad6c6974e51/idx2numpy/converters.py#L49-L59
def convert_from_file(file): """ Reads the content of file in IDX format, converts it into numpy.ndarray and returns it. file is a file-like object (with read() method) or a file name. """ if isinstance(file, six_string_types): with open(file, 'rb') as f: return _internal_convert(f) else: return _internal_convert(file)
[ "def", "convert_from_file", "(", "file", ")", ":", "if", "isinstance", "(", "file", ",", "six_string_types", ")", ":", "with", "open", "(", "file", ",", "'rb'", ")", "as", "f", ":", "return", "_internal_convert", "(", "f", ")", "else", ":", "return", "_internal_convert", "(", "file", ")" ]
Reads the content of file in IDX format, converts it into numpy.ndarray and returns it. file is a file-like object (with read() method) or a file name.
[ "Reads", "the", "content", "of", "file", "in", "IDX", "format", "converts", "it", "into", "numpy", ".", "ndarray", "and", "returns", "it", ".", "file", "is", "a", "file", "-", "like", "object", "(", "with", "read", "()", "method", ")", "or", "a", "file", "name", "." ]
python
train
armstrong/armstrong.dev
armstrong/dev/tasks.py
https://github.com/armstrong/armstrong.dev/blob/6fd8b863038d9e5ebfd52dfe5ce6c85fb441c267/armstrong/dev/tasks.py#L64-L74
def replaced_by_django_migrations(func, *args, **kwargs): """Decorator to preempt South requirement""" DjangoSettings() # trigger helpful messages if Django is missing import django if django.VERSION >= (1, 7): print("Django 1.7+ has its own migrations system.") print("Use this instead: `invoke managepy makemigrations`") sys.exit(1) return func(*args, **kwargs)
[ "def", "replaced_by_django_migrations", "(", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "DjangoSettings", "(", ")", "# trigger helpful messages if Django is missing", "import", "django", "if", "django", ".", "VERSION", ">=", "(", "1", ",", "7", ")", ":", "print", "(", "\"Django 1.7+ has its own migrations system.\"", ")", "print", "(", "\"Use this instead: `invoke managepy makemigrations`\"", ")", "sys", ".", "exit", "(", "1", ")", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Decorator to preempt South requirement
[ "Decorator", "to", "preempt", "South", "requirement" ]
python
train
mozillazg/python-pinyin
pypinyin/core.py
https://github.com/mozillazg/python-pinyin/blob/b44756c852e0d2f50f251e3098cbbfef51774979/pypinyin/core.py#L251-L283
def slug(hans, style=Style.NORMAL, heteronym=False, separator='-', errors='default', strict=True): """生成 slug 字符串. :param hans: 汉字 :type hans: unicode or list :param style: 指定拼音风格,默认是 :py:attr:`~pypinyin.Style.NORMAL` 风格。 更多拼音风格详见 :class:`~pypinyin.Style` :param heteronym: 是否启用多音字 :param separstor: 两个拼音间的分隔符/连接符 :param errors: 指定如何处理没有拼音的字符,详情请参考 :py:func:`~pypinyin.pinyin` :param strict: 是否严格遵照《汉语拼音方案》来处理声母和韵母,详见 :ref:`strict` :return: slug 字符串. :raise AssertionError: 当传入的字符串不是 unicode 字符时会抛出这个异常 :: >>> import pypinyin >>> from pypinyin import Style >>> pypinyin.slug('中国人') 'zhong-guo-ren' >>> pypinyin.slug('中国人', separator=' ') 'zhong guo ren' >>> pypinyin.slug('中国人', style=Style.FIRST_LETTER) 'z-g-r' >>> pypinyin.slug('中国人', style=Style.CYRILLIC) 'чжун1-го2-жэнь2' """ return separator.join(chain(*pinyin(hans, style=style, heteronym=heteronym, errors=errors, strict=strict) ))
[ "def", "slug", "(", "hans", ",", "style", "=", "Style", ".", "NORMAL", ",", "heteronym", "=", "False", ",", "separator", "=", "'-'", ",", "errors", "=", "'default'", ",", "strict", "=", "True", ")", ":", "return", "separator", ".", "join", "(", "chain", "(", "*", "pinyin", "(", "hans", ",", "style", "=", "style", ",", "heteronym", "=", "heteronym", ",", "errors", "=", "errors", ",", "strict", "=", "strict", ")", ")", ")" ]
生成 slug 字符串. :param hans: 汉字 :type hans: unicode or list :param style: 指定拼音风格,默认是 :py:attr:`~pypinyin.Style.NORMAL` 风格。 更多拼音风格详见 :class:`~pypinyin.Style` :param heteronym: 是否启用多音字 :param separstor: 两个拼音间的分隔符/连接符 :param errors: 指定如何处理没有拼音的字符,详情请参考 :py:func:`~pypinyin.pinyin` :param strict: 是否严格遵照《汉语拼音方案》来处理声母和韵母,详见 :ref:`strict` :return: slug 字符串. :raise AssertionError: 当传入的字符串不是 unicode 字符时会抛出这个异常 :: >>> import pypinyin >>> from pypinyin import Style >>> pypinyin.slug('中国人') 'zhong-guo-ren' >>> pypinyin.slug('中国人', separator=' ') 'zhong guo ren' >>> pypinyin.slug('中国人', style=Style.FIRST_LETTER) 'z-g-r' >>> pypinyin.slug('中国人', style=Style.CYRILLIC) 'чжун1-го2-жэнь2'
[ "生成", "slug", "字符串", "." ]
python
train
NASA-AMMOS/AIT-Core
ait/core/server/stream.py
https://github.com/NASA-AMMOS/AIT-Core/blob/9d85bd9c738e7a6a6fbdff672bea708238b02a3a/ait/core/server/stream.py#L54-L68
def process(self, input_data, topic=None): """ Invokes each handler in sequence. Publishes final output data. Params: input_data: message received by stream topic: name of plugin or stream message received from, if applicable """ for handler in self.handlers: output = handler.handle(input_data) input_data = output self.publish(input_data)
[ "def", "process", "(", "self", ",", "input_data", ",", "topic", "=", "None", ")", ":", "for", "handler", "in", "self", ".", "handlers", ":", "output", "=", "handler", ".", "handle", "(", "input_data", ")", "input_data", "=", "output", "self", ".", "publish", "(", "input_data", ")" ]
Invokes each handler in sequence. Publishes final output data. Params: input_data: message received by stream topic: name of plugin or stream message received from, if applicable
[ "Invokes", "each", "handler", "in", "sequence", ".", "Publishes", "final", "output", "data", "." ]
python
train

No dataset card yet

New: Create and edit this dataset card directly on the website!

Contribute a Dataset Card
Downloads last month
2
Add dataset card

Models trained or fine-tuned on kejian/codesearchnet-python-raw