repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
rocky/python3-trepan
celery/ctrepan.py
https://github.com/rocky/python3-trepan/blob/14e91bc0acce090d67be145b1ac040cab92ac5f3/celery/ctrepan.py#L104-L110
def debugger(): """Return the current debugger instance (if any), or creates a new one.""" dbg = _current[0] if dbg is None or not dbg.active: dbg = _current[0] = RemoteCeleryTrepan() return dbg
[ "def", "debugger", "(", ")", ":", "dbg", "=", "_current", "[", "0", "]", "if", "dbg", "is", "None", "or", "not", "dbg", ".", "active", ":", "dbg", "=", "_current", "[", "0", "]", "=", "RemoteCeleryTrepan", "(", ")", "return", "dbg" ]
Return the current debugger instance (if any), or creates a new one.
[ "Return", "the", "current", "debugger", "instance", "(", "if", "any", ")", "or", "creates", "a", "new", "one", "." ]
python
test
saltstack/salt
salt/modules/temp.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/temp.py#L33-L46
def file(suffix='', prefix='tmp', parent=None): ''' Create a temporary file CLI Example: .. code-block:: bash salt '*' temp.file salt '*' temp.file prefix='mytemp-' parent='/var/run/' ''' fh_, tmp_ = tempfile.mkstemp(suffix, prefix, parent) os.close(fh_) return tmp_
[ "def", "file", "(", "suffix", "=", "''", ",", "prefix", "=", "'tmp'", ",", "parent", "=", "None", ")", ":", "fh_", ",", "tmp_", "=", "tempfile", ".", "mkstemp", "(", "suffix", ",", "prefix", ",", "parent", ")", "os", ".", "close", "(", "fh_", ")", "return", "tmp_" ]
Create a temporary file CLI Example: .. code-block:: bash salt '*' temp.file salt '*' temp.file prefix='mytemp-' parent='/var/run/'
[ "Create", "a", "temporary", "file" ]
python
train
zhmcclient/python-zhmcclient
zhmcclient_mock/_hmc.py
https://github.com/zhmcclient/python-zhmcclient/blob/9657563e5d9184c51d3c903442a58b9725fdf335/zhmcclient_mock/_hmc.py#L2995-L3020
def get_metric_group_definitions(self): """ Get the faked metric group definitions for this context object that are to be returned from its create operation. If a 'metric-groups' property had been specified for this context, only those faked metric group definitions of its manager object that are in that list, are included in the result. Otherwise, all metric group definitions of its manager are included in the result. Returns: iterable of :class:~zhmcclient.FakedMetricGroupDefinition`: The faked metric group definitions, in the order they had been added. """ group_names = self.properties.get('metric-groups', None) if not group_names: group_names = self.manager.get_metric_group_definition_names() mg_defs = [] for group_name in group_names: try: mg_def = self.manager.get_metric_group_definition(group_name) mg_defs.append(mg_def) except ValueError: pass # ignore metric groups without metric group defs return mg_defs
[ "def", "get_metric_group_definitions", "(", "self", ")", ":", "group_names", "=", "self", ".", "properties", ".", "get", "(", "'metric-groups'", ",", "None", ")", "if", "not", "group_names", ":", "group_names", "=", "self", ".", "manager", ".", "get_metric_group_definition_names", "(", ")", "mg_defs", "=", "[", "]", "for", "group_name", "in", "group_names", ":", "try", ":", "mg_def", "=", "self", ".", "manager", ".", "get_metric_group_definition", "(", "group_name", ")", "mg_defs", ".", "append", "(", "mg_def", ")", "except", "ValueError", ":", "pass", "# ignore metric groups without metric group defs", "return", "mg_defs" ]
Get the faked metric group definitions for this context object that are to be returned from its create operation. If a 'metric-groups' property had been specified for this context, only those faked metric group definitions of its manager object that are in that list, are included in the result. Otherwise, all metric group definitions of its manager are included in the result. Returns: iterable of :class:~zhmcclient.FakedMetricGroupDefinition`: The faked metric group definitions, in the order they had been added.
[ "Get", "the", "faked", "metric", "group", "definitions", "for", "this", "context", "object", "that", "are", "to", "be", "returned", "from", "its", "create", "operation", "." ]
python
train
viveksck/changepoint
changepoint/mean_shift_model.py
https://github.com/viveksck/changepoint/blob/001792cb148c991ec704463d3213997ebb7171af/changepoint/mean_shift_model.py#L27-L31
def generate_null_timeseries(self, ts, mu, sigma): """ Generate a time series with a given mu and sigma. This serves as the NULL distribution. """ l = len(ts) return np.random.normal(mu, sigma, l)
[ "def", "generate_null_timeseries", "(", "self", ",", "ts", ",", "mu", ",", "sigma", ")", ":", "l", "=", "len", "(", "ts", ")", "return", "np", ".", "random", ".", "normal", "(", "mu", ",", "sigma", ",", "l", ")" ]
Generate a time series with a given mu and sigma. This serves as the NULL distribution.
[ "Generate", "a", "time", "series", "with", "a", "given", "mu", "and", "sigma", ".", "This", "serves", "as", "the", "NULL", "distribution", "." ]
python
train
graphql-python/graphql-core
scripts/casing.py
https://github.com/graphql-python/graphql-core/blob/d8e9d3abe7c209eb2f51cf001402783bfd480596/scripts/casing.py#L19-L29
def snake(s): """Convert from title or camelCase to snake_case.""" if len(s) < 2: return s.lower() out = s[0].lower() for c in s[1:]: if c.isupper(): out += "_" c = c.lower() out += c return out
[ "def", "snake", "(", "s", ")", ":", "if", "len", "(", "s", ")", "<", "2", ":", "return", "s", ".", "lower", "(", ")", "out", "=", "s", "[", "0", "]", ".", "lower", "(", ")", "for", "c", "in", "s", "[", "1", ":", "]", ":", "if", "c", ".", "isupper", "(", ")", ":", "out", "+=", "\"_\"", "c", "=", "c", ".", "lower", "(", ")", "out", "+=", "c", "return", "out" ]
Convert from title or camelCase to snake_case.
[ "Convert", "from", "title", "or", "camelCase", "to", "snake_case", "." ]
python
train
sony/nnabla
python/src/nnabla/parametric_functions.py
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/parametric_functions.py#L26-L125
def parametric_function_api(scope_name=None, param_desc=None): """Decorator for parametric functions. The decorated function is always called under a parameter scope ``scope_name``. Also, the decorator adds an additional argument ``name`` (:obj:`str`, default is ``None``) at the end. If ``name`` is specified, the scope ``scope_name`` comes under a scope ``name``. This feature could reduce vertical space usage of the source code. Any parametric function should be decorated by this. Args: scope_name (str, optional): The original function will be called under a parameter scope named by ``scope_name``. param_desc (list, optional): Descriptions of parameters will be automatically included into docstring. This must be a list of tuples with 4 elements composed of (name (str), description (str), shape info (str), need_grad (bool)). Returns: function: A decorated parametric function. """ if scope_name is None: scope_name = name def parametric_function_api_inside(func): from nnabla.utils.py23_compatible import getargspec import inspect name = func.__name__ doc = func.__doc__ if param_desc: indent = 8 try: desc = map(lambda d: ' ' * indent + '* {} (``need_grad={}``) : {}. (shape: ``{}``)'.format(d[0], d[3], d[1], d[2]), param_desc) except: ValueError( 'param_desc argument of parametric_function_api must be ' 'None or a list of tuple with three elements composed of ' '(name(str), description(str), need_grad(bool)).') doc += ''' Parameters to be registered The following variables are registered in a parameter scope ``"{}"``; {} '''.format(scope_name, '\n'.join(desc)) doc += """ Note: If the ``name`` option is passed, the parameters become wrapped inside the parameter scope with the specified name, yielding the same results as the following code. This can be used to simplify the code. .. code-block:: python with parametric_scope(name): output = {name}(<args>) """.format(name=name) spec = getargspec(func) defaults = spec.defaults if defaults is None: defaults = tuple() # None will be appended later signature = inspect.formatargspec( spec.args + ['name'], spec.varargs, spec.keywords, defaults + (None,)) shortsignature = inspect.formatargspec( spec.args, spec.varargs, spec.keywords, None) # Check required argument assert 'fix_parameters' in spec.args, \ "A parametric function must take `fix_parameters` as an argument." \ " `{}{}` doesn't have it.".format(name, signature) code = """ def {name}{signature}: if name is None: with parameter_scope(scope_name): return func{shortsignature} with parameter_scope(name): with parameter_scope(scope_name): return func{shortsignature} """.format(**locals()) execdict = dict( func=func, parameter_scope=nn.parameter_scope, scope_name=scope_name) exec_(code, execdict) newfunc = execdict[name] newfunc.__doc__ = doc newfunc.__parametric_function_api_base__ = func newfunc.__scope_name__ = scope_name newfunc.__module__ = __name__ return newfunc return parametric_function_api_inside
[ "def", "parametric_function_api", "(", "scope_name", "=", "None", ",", "param_desc", "=", "None", ")", ":", "if", "scope_name", "is", "None", ":", "scope_name", "=", "name", "def", "parametric_function_api_inside", "(", "func", ")", ":", "from", "nnabla", ".", "utils", ".", "py23_compatible", "import", "getargspec", "import", "inspect", "name", "=", "func", ".", "__name__", "doc", "=", "func", ".", "__doc__", "if", "param_desc", ":", "indent", "=", "8", "try", ":", "desc", "=", "map", "(", "lambda", "d", ":", "' '", "*", "indent", "+", "'* {} (``need_grad={}``) : {}. (shape: ``{}``)'", ".", "format", "(", "d", "[", "0", "]", ",", "d", "[", "3", "]", ",", "d", "[", "1", "]", ",", "d", "[", "2", "]", ")", ",", "param_desc", ")", "except", ":", "ValueError", "(", "'param_desc argument of parametric_function_api must be '", "'None or a list of tuple with three elements composed of '", "'(name(str), description(str), need_grad(bool)).'", ")", "doc", "+=", "'''\n Parameters to be registered\n The following variables are registered in a parameter scope ``\"{}\"``;\n\n{}\n\n '''", ".", "format", "(", "scope_name", ",", "'\\n'", ".", "join", "(", "desc", ")", ")", "doc", "+=", "\"\"\"\n Note:\n\n If the ``name`` option is passed, the parameters become wrapped inside the parameter scope\n with the specified name, yielding the same results as the following code.\n This can be used to simplify the code.\n\n .. code-block:: python\n\n with parametric_scope(name):\n output = {name}(<args>)\n\n \"\"\"", ".", "format", "(", "name", "=", "name", ")", "spec", "=", "getargspec", "(", "func", ")", "defaults", "=", "spec", ".", "defaults", "if", "defaults", "is", "None", ":", "defaults", "=", "tuple", "(", ")", "# None will be appended later", "signature", "=", "inspect", ".", "formatargspec", "(", "spec", ".", "args", "+", "[", "'name'", "]", ",", "spec", ".", "varargs", ",", "spec", ".", "keywords", ",", "defaults", "+", "(", "None", ",", ")", ")", "shortsignature", "=", "inspect", ".", "formatargspec", "(", "spec", ".", "args", ",", "spec", ".", "varargs", ",", "spec", ".", "keywords", ",", "None", ")", "# Check required argument", "assert", "'fix_parameters'", "in", "spec", ".", "args", ",", "\"A parametric function must take `fix_parameters` as an argument.\"", "\" `{}{}` doesn't have it.\"", ".", "format", "(", "name", ",", "signature", ")", "code", "=", "\"\"\"\ndef {name}{signature}:\n if name is None:\n with parameter_scope(scope_name):\n return func{shortsignature}\n with parameter_scope(name):\n with parameter_scope(scope_name):\n return func{shortsignature}\n \"\"\"", ".", "format", "(", "*", "*", "locals", "(", ")", ")", "execdict", "=", "dict", "(", "func", "=", "func", ",", "parameter_scope", "=", "nn", ".", "parameter_scope", ",", "scope_name", "=", "scope_name", ")", "exec_", "(", "code", ",", "execdict", ")", "newfunc", "=", "execdict", "[", "name", "]", "newfunc", ".", "__doc__", "=", "doc", "newfunc", ".", "__parametric_function_api_base__", "=", "func", "newfunc", ".", "__scope_name__", "=", "scope_name", "newfunc", ".", "__module__", "=", "__name__", "return", "newfunc", "return", "parametric_function_api_inside" ]
Decorator for parametric functions. The decorated function is always called under a parameter scope ``scope_name``. Also, the decorator adds an additional argument ``name`` (:obj:`str`, default is ``None``) at the end. If ``name`` is specified, the scope ``scope_name`` comes under a scope ``name``. This feature could reduce vertical space usage of the source code. Any parametric function should be decorated by this. Args: scope_name (str, optional): The original function will be called under a parameter scope named by ``scope_name``. param_desc (list, optional): Descriptions of parameters will be automatically included into docstring. This must be a list of tuples with 4 elements composed of (name (str), description (str), shape info (str), need_grad (bool)). Returns: function: A decorated parametric function.
[ "Decorator", "for", "parametric", "functions", "." ]
python
train
ronniedada/tabula
tabula/painter.py
https://github.com/ronniedada/tabula/blob/ba18bb2f7db75972256b950711415031dc5421c7/tabula/painter.py#L17-L23
def enter_fullscreen(self): """ Invoke before printing out anything. This method should be replaced by or merged to blessings package """ self.term.stream.write(self.term.enter_fullscreen) self.term.stream.write(self.term.hide_cursor)
[ "def", "enter_fullscreen", "(", "self", ")", ":", "self", ".", "term", ".", "stream", ".", "write", "(", "self", ".", "term", ".", "enter_fullscreen", ")", "self", ".", "term", ".", "stream", ".", "write", "(", "self", ".", "term", ".", "hide_cursor", ")" ]
Invoke before printing out anything. This method should be replaced by or merged to blessings package
[ "Invoke", "before", "printing", "out", "anything", ".", "This", "method", "should", "be", "replaced", "by", "or", "merged", "to", "blessings", "package" ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_nsh.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_nsh.py#L26-L36
def mavlink_packet(self, m): '''handle an incoming mavlink packet''' if m.get_type() == 'SERIAL_CONTROL': data = m.data[:m.count] if m.count > 0: s = ''.join(str(chr(x)) for x in data) if self.mpstate.system == 'Windows': # strip nsh ansi codes s = s.replace("\033[K","") sys.stdout.write(s) self.last_packet = time.time()
[ "def", "mavlink_packet", "(", "self", ",", "m", ")", ":", "if", "m", ".", "get_type", "(", ")", "==", "'SERIAL_CONTROL'", ":", "data", "=", "m", ".", "data", "[", ":", "m", ".", "count", "]", "if", "m", ".", "count", ">", "0", ":", "s", "=", "''", ".", "join", "(", "str", "(", "chr", "(", "x", ")", ")", "for", "x", "in", "data", ")", "if", "self", ".", "mpstate", ".", "system", "==", "'Windows'", ":", "# strip nsh ansi codes", "s", "=", "s", ".", "replace", "(", "\"\\033[K\"", ",", "\"\"", ")", "sys", ".", "stdout", ".", "write", "(", "s", ")", "self", ".", "last_packet", "=", "time", ".", "time", "(", ")" ]
handle an incoming mavlink packet
[ "handle", "an", "incoming", "mavlink", "packet" ]
python
train
jsvine/markovify
markovify/chain.py
https://github.com/jsvine/markovify/blob/6968649a4c5d80f8a1b2279734417348013789e5/markovify/chain.py#L85-L97
def move(self, state): """ Given a state, choose the next item at random. """ if state == tuple([ BEGIN ] * self.state_size): choices = self.begin_choices cumdist = self.begin_cumdist else: choices, weights = zip(*self.model[state].items()) cumdist = list(accumulate(weights)) r = random.random() * cumdist[-1] selection = choices[bisect.bisect(cumdist, r)] return selection
[ "def", "move", "(", "self", ",", "state", ")", ":", "if", "state", "==", "tuple", "(", "[", "BEGIN", "]", "*", "self", ".", "state_size", ")", ":", "choices", "=", "self", ".", "begin_choices", "cumdist", "=", "self", ".", "begin_cumdist", "else", ":", "choices", ",", "weights", "=", "zip", "(", "*", "self", ".", "model", "[", "state", "]", ".", "items", "(", ")", ")", "cumdist", "=", "list", "(", "accumulate", "(", "weights", ")", ")", "r", "=", "random", ".", "random", "(", ")", "*", "cumdist", "[", "-", "1", "]", "selection", "=", "choices", "[", "bisect", ".", "bisect", "(", "cumdist", ",", "r", ")", "]", "return", "selection" ]
Given a state, choose the next item at random.
[ "Given", "a", "state", "choose", "the", "next", "item", "at", "random", "." ]
python
train
F5Networks/f5-common-python
f5/bigip/tm/vcmp/virtual_disk.py
https://github.com/F5Networks/f5-common-python/blob/7e67d5acd757a60e3d5f8c88c534bd72208f5494/f5/bigip/tm/vcmp/virtual_disk.py#L53-L66
def load(self, **kwargs): """Loads a given resource Loads a given resource provided a 'name' and an optional 'slot' parameter. The 'slot' parameter is not a required load parameter because it is provided as an optional way of constructing the correct 'name' of the vCMP resource. :param kwargs: :return: """ kwargs['transform_name'] = True kwargs = self._mutate_name(kwargs) return self._load(**kwargs)
[ "def", "load", "(", "self", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'transform_name'", "]", "=", "True", "kwargs", "=", "self", ".", "_mutate_name", "(", "kwargs", ")", "return", "self", ".", "_load", "(", "*", "*", "kwargs", ")" ]
Loads a given resource Loads a given resource provided a 'name' and an optional 'slot' parameter. The 'slot' parameter is not a required load parameter because it is provided as an optional way of constructing the correct 'name' of the vCMP resource. :param kwargs: :return:
[ "Loads", "a", "given", "resource" ]
python
train
Nic30/ipCorePackager
ipCorePackager/intfIpMeta.py
https://github.com/Nic30/ipCorePackager/blob/0af4e56ebfdc3749fffa40d50d9ccbf8b5445881/ipCorePackager/intfIpMeta.py#L30-L53
def _asQuartusTcl(self, buff: List[str], version: str, intfName: str, component: "Component", packager: "IpPackager", thisIf: 'Interface', intfMapOrName: Dict[str, Union[Dict, str]]): """ Add interface to Quartus tcl by specified name map :param buff: line buffer for output :param version: Quartus version :param intfName: name of top interface :param component: component object from ipcore generator :param packager: instance of IpPackager which is packagin current design :param thisIf: interface to add into Quartus TCL :param intfMapOrName: Quartus name string for this interface or dictionary to map subinterfaces """ if isinstance(intfMapOrName, str): self.quartus_add_interface_port( buff, intfName, thisIf, intfMapOrName, packager) else: for thisIf_ in thisIf._interfaces: v = intfMapOrName[thisIf_._name] self._asQuartusTcl(buff, version, intfName, component, packager, thisIf_, v)
[ "def", "_asQuartusTcl", "(", "self", ",", "buff", ":", "List", "[", "str", "]", ",", "version", ":", "str", ",", "intfName", ":", "str", ",", "component", ":", "\"Component\"", ",", "packager", ":", "\"IpPackager\"", ",", "thisIf", ":", "'Interface'", ",", "intfMapOrName", ":", "Dict", "[", "str", ",", "Union", "[", "Dict", ",", "str", "]", "]", ")", ":", "if", "isinstance", "(", "intfMapOrName", ",", "str", ")", ":", "self", ".", "quartus_add_interface_port", "(", "buff", ",", "intfName", ",", "thisIf", ",", "intfMapOrName", ",", "packager", ")", "else", ":", "for", "thisIf_", "in", "thisIf", ".", "_interfaces", ":", "v", "=", "intfMapOrName", "[", "thisIf_", ".", "_name", "]", "self", ".", "_asQuartusTcl", "(", "buff", ",", "version", ",", "intfName", ",", "component", ",", "packager", ",", "thisIf_", ",", "v", ")" ]
Add interface to Quartus tcl by specified name map :param buff: line buffer for output :param version: Quartus version :param intfName: name of top interface :param component: component object from ipcore generator :param packager: instance of IpPackager which is packagin current design :param thisIf: interface to add into Quartus TCL :param intfMapOrName: Quartus name string for this interface or dictionary to map subinterfaces
[ "Add", "interface", "to", "Quartus", "tcl", "by", "specified", "name", "map" ]
python
train
ardydedase/pycouchbase
pycouchbase/fields.py
https://github.com/ardydedase/pycouchbase/blob/6f010b4d2ef41aead2366878d0cf0b1284c0db0e/pycouchbase/fields.py#L189-L199
def check_password(self, raw_password): """Validates the given raw password against the intance's encrypted one. :param raw_password: Raw password to be checked against. :type raw_password: unicode :returns: True if comparison was successful, False otherwise. :rtype: bool :raises: :exc:`ImportError` if `py-bcrypt` was not found. """ bcrypt = self.get_bcrypt() return bcrypt.hashpw(raw_password, self.value)==self.value
[ "def", "check_password", "(", "self", ",", "raw_password", ")", ":", "bcrypt", "=", "self", ".", "get_bcrypt", "(", ")", "return", "bcrypt", ".", "hashpw", "(", "raw_password", ",", "self", ".", "value", ")", "==", "self", ".", "value" ]
Validates the given raw password against the intance's encrypted one. :param raw_password: Raw password to be checked against. :type raw_password: unicode :returns: True if comparison was successful, False otherwise. :rtype: bool :raises: :exc:`ImportError` if `py-bcrypt` was not found.
[ "Validates", "the", "given", "raw", "password", "against", "the", "intance", "s", "encrypted", "one", "." ]
python
train
jaumebonet/libconfig
libconfig/config.py
https://github.com/jaumebonet/libconfig/blob/9b34cefcbaf9a326e3f3cd517896c2933cf61a3b/libconfig/config.py#L123-L144
def get_option_default(self, key, subkey): """Get the default value of the option. :param str key: First identifier of the option. :param str subkey: Second identifier of the option. :return: Default value of the option (type varies). :raise: :NotRegisteredError: If ``key`` or ``subkey`` do not define any option. """ key, subkey = _lower_keys(key, subkey) _entry_must_exist(self.gc, key, subkey) df = self.gc[(self.gc["k1"] == key) & (self.gc["k2"] == subkey)] if df["type"].values[0] == "bool": return bool(df["default"].values[0]) elif df["type"].values[0] == "int": return int(df["default"].values[0]) else: return df["default"].values[0]
[ "def", "get_option_default", "(", "self", ",", "key", ",", "subkey", ")", ":", "key", ",", "subkey", "=", "_lower_keys", "(", "key", ",", "subkey", ")", "_entry_must_exist", "(", "self", ".", "gc", ",", "key", ",", "subkey", ")", "df", "=", "self", ".", "gc", "[", "(", "self", ".", "gc", "[", "\"k1\"", "]", "==", "key", ")", "&", "(", "self", ".", "gc", "[", "\"k2\"", "]", "==", "subkey", ")", "]", "if", "df", "[", "\"type\"", "]", ".", "values", "[", "0", "]", "==", "\"bool\"", ":", "return", "bool", "(", "df", "[", "\"default\"", "]", ".", "values", "[", "0", "]", ")", "elif", "df", "[", "\"type\"", "]", ".", "values", "[", "0", "]", "==", "\"int\"", ":", "return", "int", "(", "df", "[", "\"default\"", "]", ".", "values", "[", "0", "]", ")", "else", ":", "return", "df", "[", "\"default\"", "]", ".", "values", "[", "0", "]" ]
Get the default value of the option. :param str key: First identifier of the option. :param str subkey: Second identifier of the option. :return: Default value of the option (type varies). :raise: :NotRegisteredError: If ``key`` or ``subkey`` do not define any option.
[ "Get", "the", "default", "value", "of", "the", "option", "." ]
python
train
LogicalDash/LiSE
ELiDE/ELiDE/charmenu.py
https://github.com/LogicalDash/LiSE/blob/fe6fd4f0a7c1780e065f4c9babb9bc443af6bb84/ELiDE/ELiDE/charmenu.py#L120-L138
def toggle_pawn_cfg(self): """Show or hide the pop-over where you can configure the dummy pawn""" if self.app.manager.current == 'pawncfg': dummything = self.app.dummything self.ids.thingtab.remove_widget(dummything) dummything.clear() if self.app.pawncfg.prefix: dummything.prefix = self.app.pawncfg.prefix dummything.num = dummynum( self.app.character, dummything.prefix ) + 1 if self.app.pawncfg.imgpaths: dummything.paths = self.app.pawncfg.imgpaths else: dummything.paths = ['atlas://rltiles/base/unseen'] self.ids.thingtab.add_widget(dummything) else: self.app.pawncfg.prefix = self.ids.dummything.prefix self.app.pawncfg.toggle()
[ "def", "toggle_pawn_cfg", "(", "self", ")", ":", "if", "self", ".", "app", ".", "manager", ".", "current", "==", "'pawncfg'", ":", "dummything", "=", "self", ".", "app", ".", "dummything", "self", ".", "ids", ".", "thingtab", ".", "remove_widget", "(", "dummything", ")", "dummything", ".", "clear", "(", ")", "if", "self", ".", "app", ".", "pawncfg", ".", "prefix", ":", "dummything", ".", "prefix", "=", "self", ".", "app", ".", "pawncfg", ".", "prefix", "dummything", ".", "num", "=", "dummynum", "(", "self", ".", "app", ".", "character", ",", "dummything", ".", "prefix", ")", "+", "1", "if", "self", ".", "app", ".", "pawncfg", ".", "imgpaths", ":", "dummything", ".", "paths", "=", "self", ".", "app", ".", "pawncfg", ".", "imgpaths", "else", ":", "dummything", ".", "paths", "=", "[", "'atlas://rltiles/base/unseen'", "]", "self", ".", "ids", ".", "thingtab", ".", "add_widget", "(", "dummything", ")", "else", ":", "self", ".", "app", ".", "pawncfg", ".", "prefix", "=", "self", ".", "ids", ".", "dummything", ".", "prefix", "self", ".", "app", ".", "pawncfg", ".", "toggle", "(", ")" ]
Show or hide the pop-over where you can configure the dummy pawn
[ "Show", "or", "hide", "the", "pop", "-", "over", "where", "you", "can", "configure", "the", "dummy", "pawn" ]
python
train
coleifer/walrus
walrus/graph.py
https://github.com/coleifer/walrus/blob/82bf15a6613487b5b5fefeb488f186d7e0106547/walrus/graph.py#L90-L98
def store_many(self, items): """ Store multiple subject-predicate-object triples in the database. :param items: A list of (subj, pred, obj) 3-tuples. """ with self.walrus.atomic(): for item in items: self.store(*item)
[ "def", "store_many", "(", "self", ",", "items", ")", ":", "with", "self", ".", "walrus", ".", "atomic", "(", ")", ":", "for", "item", "in", "items", ":", "self", ".", "store", "(", "*", "item", ")" ]
Store multiple subject-predicate-object triples in the database. :param items: A list of (subj, pred, obj) 3-tuples.
[ "Store", "multiple", "subject", "-", "predicate", "-", "object", "triples", "in", "the", "database", "." ]
python
train
gem/oq-engine
openquake/hazardlib/gsim/campbell_bozorgnia_2014.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/campbell_bozorgnia_2014.py#L227-L232
def _get_f2rx(self, C, r_x, r_1, r_2): """ Defines the f2 scaling coefficient defined in equation 10 """ drx = (r_x - r_1) / (r_2 - r_1) return self.CONSTS["h4"] + (C["h5"] * drx) + (C["h6"] * (drx ** 2.))
[ "def", "_get_f2rx", "(", "self", ",", "C", ",", "r_x", ",", "r_1", ",", "r_2", ")", ":", "drx", "=", "(", "r_x", "-", "r_1", ")", "/", "(", "r_2", "-", "r_1", ")", "return", "self", ".", "CONSTS", "[", "\"h4\"", "]", "+", "(", "C", "[", "\"h5\"", "]", "*", "drx", ")", "+", "(", "C", "[", "\"h6\"", "]", "*", "(", "drx", "**", "2.", ")", ")" ]
Defines the f2 scaling coefficient defined in equation 10
[ "Defines", "the", "f2", "scaling", "coefficient", "defined", "in", "equation", "10" ]
python
train
myint/autoflake
autoflake.py
https://github.com/myint/autoflake/blob/68fea68646922b920d55975f9f2adaeafd84df4f/autoflake.py#L274-L304
def filter_from_import(line, unused_module): """Parse and filter ``from something import a, b, c``. Return line without unused import modules, or `pass` if all of the module in import is unused. """ (indentation, imports) = re.split(pattern=r'\bimport\b', string=line, maxsplit=1) base_module = re.search(pattern=r'\bfrom\s+([^ ]+)', string=indentation).group(1) # Create an imported module list with base module name # ex ``from a import b, c as d`` -> ``['a.b', 'a.c as d']`` imports = re.split(pattern=r',', string=imports.strip()) imports = [base_module + '.' + x.strip() for x in imports] # We compare full module name (``a.module`` not `module`) to # guarantee the exact same module as detected from pyflakes. filtered_imports = [x.replace(base_module + '.', '') for x in imports if x not in unused_module] # All of the import in this statement is unused if not filtered_imports: return get_indentation(line) + 'pass' + get_line_ending(line) indentation += 'import ' return ( indentation + ', '.join(sorted(filtered_imports)) + get_line_ending(line))
[ "def", "filter_from_import", "(", "line", ",", "unused_module", ")", ":", "(", "indentation", ",", "imports", ")", "=", "re", ".", "split", "(", "pattern", "=", "r'\\bimport\\b'", ",", "string", "=", "line", ",", "maxsplit", "=", "1", ")", "base_module", "=", "re", ".", "search", "(", "pattern", "=", "r'\\bfrom\\s+([^ ]+)'", ",", "string", "=", "indentation", ")", ".", "group", "(", "1", ")", "# Create an imported module list with base module name", "# ex ``from a import b, c as d`` -> ``['a.b', 'a.c as d']``", "imports", "=", "re", ".", "split", "(", "pattern", "=", "r','", ",", "string", "=", "imports", ".", "strip", "(", ")", ")", "imports", "=", "[", "base_module", "+", "'.'", "+", "x", ".", "strip", "(", ")", "for", "x", "in", "imports", "]", "# We compare full module name (``a.module`` not `module`) to", "# guarantee the exact same module as detected from pyflakes.", "filtered_imports", "=", "[", "x", ".", "replace", "(", "base_module", "+", "'.'", ",", "''", ")", "for", "x", "in", "imports", "if", "x", "not", "in", "unused_module", "]", "# All of the import in this statement is unused", "if", "not", "filtered_imports", ":", "return", "get_indentation", "(", "line", ")", "+", "'pass'", "+", "get_line_ending", "(", "line", ")", "indentation", "+=", "'import '", "return", "(", "indentation", "+", "', '", ".", "join", "(", "sorted", "(", "filtered_imports", ")", ")", "+", "get_line_ending", "(", "line", ")", ")" ]
Parse and filter ``from something import a, b, c``. Return line without unused import modules, or `pass` if all of the module in import is unused.
[ "Parse", "and", "filter", "from", "something", "import", "a", "b", "c", "." ]
python
test
dhermes/bezier
src/bezier/surface.py
https://github.com/dhermes/bezier/blob/4f941f82637a8e70a5b159a9203132192e23406b/src/bezier/surface.py#L303-L319
def _get_edges(self): """Get the edges for the current surface. If they haven't been computed yet, first compute and store them. This is provided as a means for internal calls to get the edges without copying (since :attr:`.edges` copies before giving to a user to keep the stored data immutable). Returns: Tuple[~bezier.curve.Curve, ~bezier.curve.Curve, \ ~bezier.curve.Curve]: The edges of the surface. """ if self._edges is None: self._edges = self._compute_edges() return self._edges
[ "def", "_get_edges", "(", "self", ")", ":", "if", "self", ".", "_edges", "is", "None", ":", "self", ".", "_edges", "=", "self", ".", "_compute_edges", "(", ")", "return", "self", ".", "_edges" ]
Get the edges for the current surface. If they haven't been computed yet, first compute and store them. This is provided as a means for internal calls to get the edges without copying (since :attr:`.edges` copies before giving to a user to keep the stored data immutable). Returns: Tuple[~bezier.curve.Curve, ~bezier.curve.Curve, \ ~bezier.curve.Curve]: The edges of the surface.
[ "Get", "the", "edges", "for", "the", "current", "surface", "." ]
python
train
openstack/horizon
horizon/base.py
https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/horizon/base.py#L185-L200
def _register(self, cls): """Registers the given class. If the specified class is already registered then it is ignored. """ if not inspect.isclass(cls): raise ValueError('Only classes may be registered.') elif not issubclass(cls, self._registerable_class): raise ValueError('Only %s classes or subclasses may be registered.' % self._registerable_class.__name__) if cls not in self._registry: cls._registered_with = self self._registry[cls] = cls() return self._registry[cls]
[ "def", "_register", "(", "self", ",", "cls", ")", ":", "if", "not", "inspect", ".", "isclass", "(", "cls", ")", ":", "raise", "ValueError", "(", "'Only classes may be registered.'", ")", "elif", "not", "issubclass", "(", "cls", ",", "self", ".", "_registerable_class", ")", ":", "raise", "ValueError", "(", "'Only %s classes or subclasses may be registered.'", "%", "self", ".", "_registerable_class", ".", "__name__", ")", "if", "cls", "not", "in", "self", ".", "_registry", ":", "cls", ".", "_registered_with", "=", "self", "self", ".", "_registry", "[", "cls", "]", "=", "cls", "(", ")", "return", "self", ".", "_registry", "[", "cls", "]" ]
Registers the given class. If the specified class is already registered then it is ignored.
[ "Registers", "the", "given", "class", "." ]
python
train
Yubico/python-yubico
yubico/yubikey_usb_hid.py
https://github.com/Yubico/python-yubico/blob/a72e8eddb90da6ee96e29f60912ca1f2872c9aea/yubico/yubikey_usb_hid.py#L578-L587
def valid_configs(self): """ Return a list of slots having a valid configurtion. Requires firmware 2.1. """ if self.ykver() < (2,1,0): raise YubiKeyUSBHIDError('Valid configs unsupported in firmware %s' % (self.version())) res = [] if self.touch_level & self.CONFIG1_VALID == self.CONFIG1_VALID: res.append(1) if self.touch_level & self.CONFIG2_VALID == self.CONFIG2_VALID: res.append(2) return res
[ "def", "valid_configs", "(", "self", ")", ":", "if", "self", ".", "ykver", "(", ")", "<", "(", "2", ",", "1", ",", "0", ")", ":", "raise", "YubiKeyUSBHIDError", "(", "'Valid configs unsupported in firmware %s'", "%", "(", "self", ".", "version", "(", ")", ")", ")", "res", "=", "[", "]", "if", "self", ".", "touch_level", "&", "self", ".", "CONFIG1_VALID", "==", "self", ".", "CONFIG1_VALID", ":", "res", ".", "append", "(", "1", ")", "if", "self", ".", "touch_level", "&", "self", ".", "CONFIG2_VALID", "==", "self", ".", "CONFIG2_VALID", ":", "res", ".", "append", "(", "2", ")", "return", "res" ]
Return a list of slots having a valid configurtion. Requires firmware 2.1.
[ "Return", "a", "list", "of", "slots", "having", "a", "valid", "configurtion", ".", "Requires", "firmware", "2", ".", "1", "." ]
python
train
redhat-cip/dci-control-server
dci/api/v1/jobs.py
https://github.com/redhat-cip/dci-control-server/blob/b416cf935ec93e4fdd5741f61a21cabecf8454d2/dci/api/v1/jobs.py#L521-L551
def get_all_results_from_jobs(user, j_id): """Get all results from job. """ job = v1_utils.verify_existence_and_get(j_id, _TABLE) if not user.is_in_team(job['team_id']) and not user.is_read_only_user(): raise dci_exc.Unauthorized() # get testscases from tests_results query = sql.select([models.TESTS_RESULTS]). \ where(models.TESTS_RESULTS.c.job_id == job['id']) all_tests_results = flask.g.db_conn.execute(query).fetchall() results = [] for test_result in all_tests_results: test_result = dict(test_result) results.append({'filename': test_result['name'], 'name': test_result['name'], 'total': test_result['total'], 'failures': test_result['failures'], 'errors': test_result['errors'], 'skips': test_result['skips'], 'time': test_result['time'], 'regressions': test_result['regressions'], 'successfixes': test_result['successfixes'], 'success': test_result['success'], 'file_id': test_result['file_id']}) return flask.jsonify({'results': results, '_meta': {'count': len(results)}})
[ "def", "get_all_results_from_jobs", "(", "user", ",", "j_id", ")", ":", "job", "=", "v1_utils", ".", "verify_existence_and_get", "(", "j_id", ",", "_TABLE", ")", "if", "not", "user", ".", "is_in_team", "(", "job", "[", "'team_id'", "]", ")", "and", "not", "user", ".", "is_read_only_user", "(", ")", ":", "raise", "dci_exc", ".", "Unauthorized", "(", ")", "# get testscases from tests_results", "query", "=", "sql", ".", "select", "(", "[", "models", ".", "TESTS_RESULTS", "]", ")", ".", "where", "(", "models", ".", "TESTS_RESULTS", ".", "c", ".", "job_id", "==", "job", "[", "'id'", "]", ")", "all_tests_results", "=", "flask", ".", "g", ".", "db_conn", ".", "execute", "(", "query", ")", ".", "fetchall", "(", ")", "results", "=", "[", "]", "for", "test_result", "in", "all_tests_results", ":", "test_result", "=", "dict", "(", "test_result", ")", "results", ".", "append", "(", "{", "'filename'", ":", "test_result", "[", "'name'", "]", ",", "'name'", ":", "test_result", "[", "'name'", "]", ",", "'total'", ":", "test_result", "[", "'total'", "]", ",", "'failures'", ":", "test_result", "[", "'failures'", "]", ",", "'errors'", ":", "test_result", "[", "'errors'", "]", ",", "'skips'", ":", "test_result", "[", "'skips'", "]", ",", "'time'", ":", "test_result", "[", "'time'", "]", ",", "'regressions'", ":", "test_result", "[", "'regressions'", "]", ",", "'successfixes'", ":", "test_result", "[", "'successfixes'", "]", ",", "'success'", ":", "test_result", "[", "'success'", "]", ",", "'file_id'", ":", "test_result", "[", "'file_id'", "]", "}", ")", "return", "flask", ".", "jsonify", "(", "{", "'results'", ":", "results", ",", "'_meta'", ":", "{", "'count'", ":", "len", "(", "results", ")", "}", "}", ")" ]
Get all results from job.
[ "Get", "all", "results", "from", "job", "." ]
python
train
pywbem/pywbem
pywbem_mock/_wbemconnection_mock.py
https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/pywbem_mock/_wbemconnection_mock.py#L2882-L2918
def _fake_references(self, namespace, **params): """ Implements a mock WBEM server responder for :meth:`~pywbem.WBEMConnection.References` """ rc = None if params['ResultClass'] is None else \ params['ResultClass'].classname role = params['Role'] obj_name = params['ObjectName'] classname = obj_name.classname pl = params['PropertyList'] ico = params['IncludeClassOrigin'] iq = params['IncludeQualifiers'] if isinstance(obj_name, CIMClassName): rtn_classnames = self._get_reference_classnames( classname, namespace, rc, role) # returns list of tuples of (CIMClassname, CIMClass) return self._return_assoc_class_tuples(rtn_classnames, namespace, iq, ico, pl) assert isinstance(obj_name, CIMInstanceName) ref_paths = self._get_reference_instnames(obj_name, namespace, rc, role) rtn_insts = [] for path in ref_paths: rtn_insts.append(self._get_instance( path, namespace, None, params['PropertyList'], params['IncludeClassOrigin'], params['IncludeQualifiers'])) for inst in rtn_insts: if inst.path.host is None: inst.path.host = self.host return self._return_assoc_tuple(rtn_insts)
[ "def", "_fake_references", "(", "self", ",", "namespace", ",", "*", "*", "params", ")", ":", "rc", "=", "None", "if", "params", "[", "'ResultClass'", "]", "is", "None", "else", "params", "[", "'ResultClass'", "]", ".", "classname", "role", "=", "params", "[", "'Role'", "]", "obj_name", "=", "params", "[", "'ObjectName'", "]", "classname", "=", "obj_name", ".", "classname", "pl", "=", "params", "[", "'PropertyList'", "]", "ico", "=", "params", "[", "'IncludeClassOrigin'", "]", "iq", "=", "params", "[", "'IncludeQualifiers'", "]", "if", "isinstance", "(", "obj_name", ",", "CIMClassName", ")", ":", "rtn_classnames", "=", "self", ".", "_get_reference_classnames", "(", "classname", ",", "namespace", ",", "rc", ",", "role", ")", "# returns list of tuples of (CIMClassname, CIMClass)", "return", "self", ".", "_return_assoc_class_tuples", "(", "rtn_classnames", ",", "namespace", ",", "iq", ",", "ico", ",", "pl", ")", "assert", "isinstance", "(", "obj_name", ",", "CIMInstanceName", ")", "ref_paths", "=", "self", ".", "_get_reference_instnames", "(", "obj_name", ",", "namespace", ",", "rc", ",", "role", ")", "rtn_insts", "=", "[", "]", "for", "path", "in", "ref_paths", ":", "rtn_insts", ".", "append", "(", "self", ".", "_get_instance", "(", "path", ",", "namespace", ",", "None", ",", "params", "[", "'PropertyList'", "]", ",", "params", "[", "'IncludeClassOrigin'", "]", ",", "params", "[", "'IncludeQualifiers'", "]", ")", ")", "for", "inst", "in", "rtn_insts", ":", "if", "inst", ".", "path", ".", "host", "is", "None", ":", "inst", ".", "path", ".", "host", "=", "self", ".", "host", "return", "self", ".", "_return_assoc_tuple", "(", "rtn_insts", ")" ]
Implements a mock WBEM server responder for :meth:`~pywbem.WBEMConnection.References`
[ "Implements", "a", "mock", "WBEM", "server", "responder", "for", ":", "meth", ":", "~pywbem", ".", "WBEMConnection", ".", "References" ]
python
train
web-push-libs/pywebpush
pywebpush/__init__.py
https://github.com/web-push-libs/pywebpush/blob/2a23f45b7819e31bd030de9fe1357a1cf7dcfdc4/pywebpush/__init__.py#L226-L254
def as_curl(self, endpoint, encoded_data, headers): """Return the send as a curl command. Useful for debugging. This will write out the encoded data to a local file named `encrypted.data` :param endpoint: Push service endpoint URL :type endpoint: basestring :param encoded_data: byte array of encoded data :type encoded_data: bytearray :param headers: Additional headers for the send :type headers: dict :returns string """ header_list = [ '-H "{}: {}" \\ \n'.format( key.lower(), val) for key, val in headers.items() ] data = "" if encoded_data: with open("encrypted.data", "wb") as f: f.write(encoded_data) data = "--data-binary @encrypted.data" if 'content-length' not in headers: header_list.append( '-H "content-length: {}" \\ \n'.format(len(encoded_data))) return ("""curl -vX POST {url} \\\n{headers}{data}""".format( url=endpoint, headers="".join(header_list), data=data))
[ "def", "as_curl", "(", "self", ",", "endpoint", ",", "encoded_data", ",", "headers", ")", ":", "header_list", "=", "[", "'-H \"{}: {}\" \\\\ \\n'", ".", "format", "(", "key", ".", "lower", "(", ")", ",", "val", ")", "for", "key", ",", "val", "in", "headers", ".", "items", "(", ")", "]", "data", "=", "\"\"", "if", "encoded_data", ":", "with", "open", "(", "\"encrypted.data\"", ",", "\"wb\"", ")", "as", "f", ":", "f", ".", "write", "(", "encoded_data", ")", "data", "=", "\"--data-binary @encrypted.data\"", "if", "'content-length'", "not", "in", "headers", ":", "header_list", ".", "append", "(", "'-H \"content-length: {}\" \\\\ \\n'", ".", "format", "(", "len", "(", "encoded_data", ")", ")", ")", "return", "(", "\"\"\"curl -vX POST {url} \\\\\\n{headers}{data}\"\"\"", ".", "format", "(", "url", "=", "endpoint", ",", "headers", "=", "\"\"", ".", "join", "(", "header_list", ")", ",", "data", "=", "data", ")", ")" ]
Return the send as a curl command. Useful for debugging. This will write out the encoded data to a local file named `encrypted.data` :param endpoint: Push service endpoint URL :type endpoint: basestring :param encoded_data: byte array of encoded data :type encoded_data: bytearray :param headers: Additional headers for the send :type headers: dict :returns string
[ "Return", "the", "send", "as", "a", "curl", "command", "." ]
python
train
mrcagney/gtfstk
gtfstk/shapes.py
https://github.com/mrcagney/gtfstk/blob/c91494e6fefc02523889655a0dc92d1c0eee8d03/gtfstk/shapes.py#L158-L210
def append_dist_to_shapes(feed: "Feed") -> "Feed": """ Calculate and append the optional ``shape_dist_traveled`` field in ``feed.shapes`` in terms of the distance units ``feed.dist_units``. Return the resulting Feed. Notes ----- - As a benchmark, using this function on `this Portland feed <https://transitfeeds.com/p/trimet/43/1400947517>`_ produces a ``shape_dist_traveled`` column that differs by at most 0.016 km in absolute value from of the original values - Assume the following feed attributes are not ``None``: * ``feed.shapes`` """ if feed.shapes is None: raise ValueError( "This function requires the feed to have a shapes.txt file" ) feed = feed.copy() f = feed.shapes m_to_dist = hp.get_convert_dist("m", feed.dist_units) def compute_dist(group): # Compute the distances of the stops along this trip group = group.sort_values("shape_pt_sequence") shape = group["shape_id"].iat[0] if not isinstance(shape, str): group["shape_dist_traveled"] = np.nan return group points = [ sg.Point(utm.from_latlon(lat, lon)[:2]) for lon, lat in group[["shape_pt_lon", "shape_pt_lat"]].values ] p_prev = points[0] d = 0 distances = [0] for p in points[1:]: d += p.distance(p_prev) distances.append(d) p_prev = p group["shape_dist_traveled"] = distances return group g = f.groupby("shape_id", group_keys=False).apply(compute_dist) # Convert from meters g["shape_dist_traveled"] = g["shape_dist_traveled"].map(m_to_dist) feed.shapes = g return feed
[ "def", "append_dist_to_shapes", "(", "feed", ":", "\"Feed\"", ")", "->", "\"Feed\"", ":", "if", "feed", ".", "shapes", "is", "None", ":", "raise", "ValueError", "(", "\"This function requires the feed to have a shapes.txt file\"", ")", "feed", "=", "feed", ".", "copy", "(", ")", "f", "=", "feed", ".", "shapes", "m_to_dist", "=", "hp", ".", "get_convert_dist", "(", "\"m\"", ",", "feed", ".", "dist_units", ")", "def", "compute_dist", "(", "group", ")", ":", "# Compute the distances of the stops along this trip", "group", "=", "group", ".", "sort_values", "(", "\"shape_pt_sequence\"", ")", "shape", "=", "group", "[", "\"shape_id\"", "]", ".", "iat", "[", "0", "]", "if", "not", "isinstance", "(", "shape", ",", "str", ")", ":", "group", "[", "\"shape_dist_traveled\"", "]", "=", "np", ".", "nan", "return", "group", "points", "=", "[", "sg", ".", "Point", "(", "utm", ".", "from_latlon", "(", "lat", ",", "lon", ")", "[", ":", "2", "]", ")", "for", "lon", ",", "lat", "in", "group", "[", "[", "\"shape_pt_lon\"", ",", "\"shape_pt_lat\"", "]", "]", ".", "values", "]", "p_prev", "=", "points", "[", "0", "]", "d", "=", "0", "distances", "=", "[", "0", "]", "for", "p", "in", "points", "[", "1", ":", "]", ":", "d", "+=", "p", ".", "distance", "(", "p_prev", ")", "distances", ".", "append", "(", "d", ")", "p_prev", "=", "p", "group", "[", "\"shape_dist_traveled\"", "]", "=", "distances", "return", "group", "g", "=", "f", ".", "groupby", "(", "\"shape_id\"", ",", "group_keys", "=", "False", ")", ".", "apply", "(", "compute_dist", ")", "# Convert from meters", "g", "[", "\"shape_dist_traveled\"", "]", "=", "g", "[", "\"shape_dist_traveled\"", "]", ".", "map", "(", "m_to_dist", ")", "feed", ".", "shapes", "=", "g", "return", "feed" ]
Calculate and append the optional ``shape_dist_traveled`` field in ``feed.shapes`` in terms of the distance units ``feed.dist_units``. Return the resulting Feed. Notes ----- - As a benchmark, using this function on `this Portland feed <https://transitfeeds.com/p/trimet/43/1400947517>`_ produces a ``shape_dist_traveled`` column that differs by at most 0.016 km in absolute value from of the original values - Assume the following feed attributes are not ``None``: * ``feed.shapes``
[ "Calculate", "and", "append", "the", "optional", "shape_dist_traveled", "field", "in", "feed", ".", "shapes", "in", "terms", "of", "the", "distance", "units", "feed", ".", "dist_units", ".", "Return", "the", "resulting", "Feed", "." ]
python
train
mcocdawc/chemcoord
version.py
https://github.com/mcocdawc/chemcoord/blob/95561ce387c142227c38fb14a1d182179aef8f5f/version.py#L208-L216
def call_git_branch(): """return the string output of git desribe""" try: with open(devnull, "w") as fnull: arguments = [GIT_COMMAND, 'rev-parse', '--abbrev-ref', 'HEAD'] return check_output(arguments, cwd=CURRENT_DIRECTORY, stderr=fnull).decode("ascii").strip() except (OSError, CalledProcessError): return None
[ "def", "call_git_branch", "(", ")", ":", "try", ":", "with", "open", "(", "devnull", ",", "\"w\"", ")", "as", "fnull", ":", "arguments", "=", "[", "GIT_COMMAND", ",", "'rev-parse'", ",", "'--abbrev-ref'", ",", "'HEAD'", "]", "return", "check_output", "(", "arguments", ",", "cwd", "=", "CURRENT_DIRECTORY", ",", "stderr", "=", "fnull", ")", ".", "decode", "(", "\"ascii\"", ")", ".", "strip", "(", ")", "except", "(", "OSError", ",", "CalledProcessError", ")", ":", "return", "None" ]
return the string output of git desribe
[ "return", "the", "string", "output", "of", "git", "desribe" ]
python
train
tensorlayer/tensorlayer
tensorlayer/activation.py
https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/activation.py#L303-L333
def pixel_wise_softmax(x, name='pixel_wise_softmax'): """Return the softmax outputs of images, every pixels have multiple label, the sum of a pixel is 1. Usually be used for image segmentation. Parameters ---------- x : Tensor input. - For 2d image, 4D tensor (batch_size, height, weight, channel), where channel >= 2. - For 3d image, 5D tensor (batch_size, depth, height, weight, channel), where channel >= 2. name : str function name (optional) Returns ------- Tensor A ``Tensor`` in the same type as ``x``. Examples -------- >>> outputs = pixel_wise_softmax(network.outputs) >>> dice_loss = 1 - dice_coe(outputs, y_, epsilon=1e-5) References ---------- - `tf.reverse <https://www.tensorflow.org/versions/master/api_docs/python/array_ops.html#reverse>`__ """ with tf.name_scope(name): return tf.nn.softmax(x)
[ "def", "pixel_wise_softmax", "(", "x", ",", "name", "=", "'pixel_wise_softmax'", ")", ":", "with", "tf", ".", "name_scope", "(", "name", ")", ":", "return", "tf", ".", "nn", ".", "softmax", "(", "x", ")" ]
Return the softmax outputs of images, every pixels have multiple label, the sum of a pixel is 1. Usually be used for image segmentation. Parameters ---------- x : Tensor input. - For 2d image, 4D tensor (batch_size, height, weight, channel), where channel >= 2. - For 3d image, 5D tensor (batch_size, depth, height, weight, channel), where channel >= 2. name : str function name (optional) Returns ------- Tensor A ``Tensor`` in the same type as ``x``. Examples -------- >>> outputs = pixel_wise_softmax(network.outputs) >>> dice_loss = 1 - dice_coe(outputs, y_, epsilon=1e-5) References ---------- - `tf.reverse <https://www.tensorflow.org/versions/master/api_docs/python/array_ops.html#reverse>`__
[ "Return", "the", "softmax", "outputs", "of", "images", "every", "pixels", "have", "multiple", "label", "the", "sum", "of", "a", "pixel", "is", "1", "." ]
python
valid
data-8/datascience
datascience/tables.py
https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L1461-L1505
def split(self, k): """Return a tuple of two tables where the first table contains ``k`` rows randomly sampled and the second contains the remaining rows. Args: ``k`` (int): The number of rows randomly sampled into the first table. ``k`` must be between 1 and ``num_rows - 1``. Raises: ``ValueError``: ``k`` is not between 1 and ``num_rows - 1``. Returns: A tuple containing two instances of ``Table``. >>> jobs = Table().with_columns( ... 'job', make_array('a', 'b', 'c', 'd'), ... 'wage', make_array(10, 20, 15, 8)) >>> jobs job | wage a | 10 b | 20 c | 15 d | 8 >>> sample, rest = jobs.split(3) >>> sample # doctest: +SKIP job | wage c | 15 a | 10 b | 20 >>> rest # doctest: +SKIP job | wage d | 8 """ if not 1 <= k <= self.num_rows - 1: raise ValueError("Invalid value of k. k must be between 1 and the" "number of rows - 1") rows = np.random.permutation(self.num_rows) first = self.take(rows[:k]) rest = self.take(rows[k:]) for column_label in self._formats: first._formats[column_label] = self._formats[column_label] rest._formats[column_label] = self._formats[column_label] return first, rest
[ "def", "split", "(", "self", ",", "k", ")", ":", "if", "not", "1", "<=", "k", "<=", "self", ".", "num_rows", "-", "1", ":", "raise", "ValueError", "(", "\"Invalid value of k. k must be between 1 and the\"", "\"number of rows - 1\"", ")", "rows", "=", "np", ".", "random", ".", "permutation", "(", "self", ".", "num_rows", ")", "first", "=", "self", ".", "take", "(", "rows", "[", ":", "k", "]", ")", "rest", "=", "self", ".", "take", "(", "rows", "[", "k", ":", "]", ")", "for", "column_label", "in", "self", ".", "_formats", ":", "first", ".", "_formats", "[", "column_label", "]", "=", "self", ".", "_formats", "[", "column_label", "]", "rest", ".", "_formats", "[", "column_label", "]", "=", "self", ".", "_formats", "[", "column_label", "]", "return", "first", ",", "rest" ]
Return a tuple of two tables where the first table contains ``k`` rows randomly sampled and the second contains the remaining rows. Args: ``k`` (int): The number of rows randomly sampled into the first table. ``k`` must be between 1 and ``num_rows - 1``. Raises: ``ValueError``: ``k`` is not between 1 and ``num_rows - 1``. Returns: A tuple containing two instances of ``Table``. >>> jobs = Table().with_columns( ... 'job', make_array('a', 'b', 'c', 'd'), ... 'wage', make_array(10, 20, 15, 8)) >>> jobs job | wage a | 10 b | 20 c | 15 d | 8 >>> sample, rest = jobs.split(3) >>> sample # doctest: +SKIP job | wage c | 15 a | 10 b | 20 >>> rest # doctest: +SKIP job | wage d | 8
[ "Return", "a", "tuple", "of", "two", "tables", "where", "the", "first", "table", "contains", "k", "rows", "randomly", "sampled", "and", "the", "second", "contains", "the", "remaining", "rows", "." ]
python
train
numenta/nupic
src/nupic/swarming/hypersearch/permutation_helpers.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/hypersearch/permutation_helpers.py#L436-L446
def pushAwayFrom(self, otherPositions, rng): """See comments in base class.""" # Get the count of how many in each position positions = [self.choices.index(x) for x in otherPositions] positionCounts = [0] * len(self.choices) for pos in positions: positionCounts[pos] += 1 self._positionIdx = numpy.array(positionCounts).argmin() self._bestPositionIdx = self._positionIdx
[ "def", "pushAwayFrom", "(", "self", ",", "otherPositions", ",", "rng", ")", ":", "# Get the count of how many in each position", "positions", "=", "[", "self", ".", "choices", ".", "index", "(", "x", ")", "for", "x", "in", "otherPositions", "]", "positionCounts", "=", "[", "0", "]", "*", "len", "(", "self", ".", "choices", ")", "for", "pos", "in", "positions", ":", "positionCounts", "[", "pos", "]", "+=", "1", "self", ".", "_positionIdx", "=", "numpy", ".", "array", "(", "positionCounts", ")", ".", "argmin", "(", ")", "self", ".", "_bestPositionIdx", "=", "self", ".", "_positionIdx" ]
See comments in base class.
[ "See", "comments", "in", "base", "class", "." ]
python
valid
sveetch/py-css-styleguide
py_css_styleguide/serializer.py
https://github.com/sveetch/py-css-styleguide/blob/5acc693f71b2fa7d944d7fed561ae0a7699ccd0f/py_css_styleguide/serializer.py#L436-L456
def get_enabled_references(self, datas, meta_references): """ Get enabled manifest references declarations. Enabled references are defined through meta references declaration, every other references are ignored. Arguments: datas (dict): Data where to search for reference declarations. This is commonly the fully parsed manifest. meta_references (list): List of enabled reference names. Returns: collections.OrderedDict: Serialized enabled references datas. """ references = OrderedDict() for section in meta_references: references[section] = self.get_reference(datas, section) return references
[ "def", "get_enabled_references", "(", "self", ",", "datas", ",", "meta_references", ")", ":", "references", "=", "OrderedDict", "(", ")", "for", "section", "in", "meta_references", ":", "references", "[", "section", "]", "=", "self", ".", "get_reference", "(", "datas", ",", "section", ")", "return", "references" ]
Get enabled manifest references declarations. Enabled references are defined through meta references declaration, every other references are ignored. Arguments: datas (dict): Data where to search for reference declarations. This is commonly the fully parsed manifest. meta_references (list): List of enabled reference names. Returns: collections.OrderedDict: Serialized enabled references datas.
[ "Get", "enabled", "manifest", "references", "declarations", "." ]
python
train
Ex-Mente/auxi.0
auxi/modelling/business/basic.py
https://github.com/Ex-Mente/auxi.0/blob/2dcdae74154f136f8ca58289fe5b20772f215046/auxi/modelling/business/basic.py#L223-L236
def prepare_to_run(self, clock, period_count): """ Prepare the activity for execution. :param clock: The clock containing the execution start time and execution period information. :param period_count: The total amount of periods this activity will be requested to be run for. """ super(BasicLoanActivity, self).prepare_to_run(clock, period_count) self._months_executed = 0 self._amount_left = self.amount
[ "def", "prepare_to_run", "(", "self", ",", "clock", ",", "period_count", ")", ":", "super", "(", "BasicLoanActivity", ",", "self", ")", ".", "prepare_to_run", "(", "clock", ",", "period_count", ")", "self", ".", "_months_executed", "=", "0", "self", ".", "_amount_left", "=", "self", ".", "amount" ]
Prepare the activity for execution. :param clock: The clock containing the execution start time and execution period information. :param period_count: The total amount of periods this activity will be requested to be run for.
[ "Prepare", "the", "activity", "for", "execution", "." ]
python
valid
ionelmc/python-cogen
cogen/web/wsgi.py
https://github.com/ionelmc/python-cogen/blob/83b0edb88425eba6e5bfda9f1dcd34642517e2a8/cogen/web/wsgi.py#L723-L729
def bind(self, family, type, proto=0): """Create (or recreate) the actual socket object.""" self.socket = sockets.Socket(family, type, proto) self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.socket.setblocking(0) #~ self.socket.setsockopt(socket.SOL_SOCKET, socket.TCP_NODELAY, 1) self.socket.bind(self.bind_addr)
[ "def", "bind", "(", "self", ",", "family", ",", "type", ",", "proto", "=", "0", ")", ":", "self", ".", "socket", "=", "sockets", ".", "Socket", "(", "family", ",", "type", ",", "proto", ")", "self", ".", "socket", ".", "setsockopt", "(", "socket", ".", "SOL_SOCKET", ",", "socket", ".", "SO_REUSEADDR", ",", "1", ")", "self", ".", "socket", ".", "setblocking", "(", "0", ")", "#~ self.socket.setsockopt(socket.SOL_SOCKET, socket.TCP_NODELAY, 1)\r", "self", ".", "socket", ".", "bind", "(", "self", ".", "bind_addr", ")" ]
Create (or recreate) the actual socket object.
[ "Create", "(", "or", "recreate", ")", "the", "actual", "socket", "object", "." ]
python
train
DiamondLightSource/python-workflows
workflows/transport/common_transport.py
https://github.com/DiamondLightSource/python-workflows/blob/7ef47b457655b96f4d2ef7ee9863cf1b6d20e023/workflows/transport/common_transport.py#L231-L258
def ack(self, message, subscription_id=None, **kwargs): """Acknowledge receipt of a message. This only makes sense when the 'acknowledgement' flag was set for the relevant subscription. :param message: ID of the message to be acknowledged, OR a dictionary containing a field 'message-id'. :param subscription_id: ID of the associated subscription. Optional when a dictionary is passed as first parameter and that dictionary contains field 'subscription'. :param **kwargs: Further parameters for the transport layer. For example transaction: Transaction ID if acknowledgement should be part of a transaction """ if isinstance(message, dict): message_id = message.get("message-id") if not subscription_id: subscription_id = message.get("subscription") else: message_id = message if not message_id: raise workflows.Error("Cannot acknowledge message without " + "message ID") if not subscription_id: raise workflows.Error( "Cannot acknowledge message without " + "subscription ID" ) self.log.debug( "Acknowledging message %s on subscription %s", message_id, subscription_id ) self._ack(message_id, subscription_id, **kwargs)
[ "def", "ack", "(", "self", ",", "message", ",", "subscription_id", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "message", ",", "dict", ")", ":", "message_id", "=", "message", ".", "get", "(", "\"message-id\"", ")", "if", "not", "subscription_id", ":", "subscription_id", "=", "message", ".", "get", "(", "\"subscription\"", ")", "else", ":", "message_id", "=", "message", "if", "not", "message_id", ":", "raise", "workflows", ".", "Error", "(", "\"Cannot acknowledge message without \"", "+", "\"message ID\"", ")", "if", "not", "subscription_id", ":", "raise", "workflows", ".", "Error", "(", "\"Cannot acknowledge message without \"", "+", "\"subscription ID\"", ")", "self", ".", "log", ".", "debug", "(", "\"Acknowledging message %s on subscription %s\"", ",", "message_id", ",", "subscription_id", ")", "self", ".", "_ack", "(", "message_id", ",", "subscription_id", ",", "*", "*", "kwargs", ")" ]
Acknowledge receipt of a message. This only makes sense when the 'acknowledgement' flag was set for the relevant subscription. :param message: ID of the message to be acknowledged, OR a dictionary containing a field 'message-id'. :param subscription_id: ID of the associated subscription. Optional when a dictionary is passed as first parameter and that dictionary contains field 'subscription'. :param **kwargs: Further parameters for the transport layer. For example transaction: Transaction ID if acknowledgement should be part of a transaction
[ "Acknowledge", "receipt", "of", "a", "message", ".", "This", "only", "makes", "sense", "when", "the", "acknowledgement", "flag", "was", "set", "for", "the", "relevant", "subscription", ".", ":", "param", "message", ":", "ID", "of", "the", "message", "to", "be", "acknowledged", "OR", "a", "dictionary", "containing", "a", "field", "message", "-", "id", ".", ":", "param", "subscription_id", ":", "ID", "of", "the", "associated", "subscription", ".", "Optional", "when", "a", "dictionary", "is", "passed", "as", "first", "parameter", "and", "that", "dictionary", "contains", "field", "subscription", ".", ":", "param", "**", "kwargs", ":", "Further", "parameters", "for", "the", "transport", "layer", ".", "For", "example", "transaction", ":", "Transaction", "ID", "if", "acknowledgement", "should", "be", "part", "of", "a", "transaction" ]
python
train
shad7/tvrenamer
tvrenamer/core/parser.py
https://github.com/shad7/tvrenamer/blob/7fb59cb02669357e73b7acb92dcb6d74fdff4654/tvrenamer/core/parser.py#L29-L54
def parse_filename(filename): """Parse media filename for metadata. :param str filename: the name of media file :returns: dict of metadata attributes found in filename or None if no matching expression. :rtype: dict """ _patterns = patterns.get_expressions() result = {} for cmatcher in _patterns: match = cmatcher.match(filename) if match: namedgroups = match.groupdict().keys() result['pattern'] = cmatcher.pattern result['series_name'] = match.group('seriesname') result['season_number'] = _get_season_no(match, namedgroups) result['episode_numbers'] = _get_episodes(match, namedgroups) break else: result = None return result
[ "def", "parse_filename", "(", "filename", ")", ":", "_patterns", "=", "patterns", ".", "get_expressions", "(", ")", "result", "=", "{", "}", "for", "cmatcher", "in", "_patterns", ":", "match", "=", "cmatcher", ".", "match", "(", "filename", ")", "if", "match", ":", "namedgroups", "=", "match", ".", "groupdict", "(", ")", ".", "keys", "(", ")", "result", "[", "'pattern'", "]", "=", "cmatcher", ".", "pattern", "result", "[", "'series_name'", "]", "=", "match", ".", "group", "(", "'seriesname'", ")", "result", "[", "'season_number'", "]", "=", "_get_season_no", "(", "match", ",", "namedgroups", ")", "result", "[", "'episode_numbers'", "]", "=", "_get_episodes", "(", "match", ",", "namedgroups", ")", "break", "else", ":", "result", "=", "None", "return", "result" ]
Parse media filename for metadata. :param str filename: the name of media file :returns: dict of metadata attributes found in filename or None if no matching expression. :rtype: dict
[ "Parse", "media", "filename", "for", "metadata", "." ]
python
train
rbuffat/pyepw
pyepw/epw.py
https://github.com/rbuffat/pyepw/blob/373d4d3c8386c8d35789f086ac5f6018c2711745/pyepw/epw.py#L1550-L1581
def design_stat_cooling(self, value="Cooling"): """Corresponds to IDD Field `design_stat_cooling` Args: value (str): value for IDD Field `design_stat_cooling` Accepted values are: - Cooling Default value: Cooling if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = str(value) except ValueError: raise ValueError( 'value {} need to be of type str ' 'for field `design_stat_cooling`'.format(value)) if ',' in value: raise ValueError('value should not contain a comma ' 'for field `design_stat_cooling`') vals = set() vals.add("Cooling") if value not in vals: raise ValueError('value {} is not an accepted value for ' 'field `design_stat_cooling`'.format(value)) self._design_stat_cooling = value
[ "def", "design_stat_cooling", "(", "self", ",", "value", "=", "\"Cooling\"", ")", ":", "if", "value", "is", "not", "None", ":", "try", ":", "value", "=", "str", "(", "value", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'value {} need to be of type str '", "'for field `design_stat_cooling`'", ".", "format", "(", "value", ")", ")", "if", "','", "in", "value", ":", "raise", "ValueError", "(", "'value should not contain a comma '", "'for field `design_stat_cooling`'", ")", "vals", "=", "set", "(", ")", "vals", ".", "add", "(", "\"Cooling\"", ")", "if", "value", "not", "in", "vals", ":", "raise", "ValueError", "(", "'value {} is not an accepted value for '", "'field `design_stat_cooling`'", ".", "format", "(", "value", ")", ")", "self", ".", "_design_stat_cooling", "=", "value" ]
Corresponds to IDD Field `design_stat_cooling` Args: value (str): value for IDD Field `design_stat_cooling` Accepted values are: - Cooling Default value: Cooling if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
[ "Corresponds", "to", "IDD", "Field", "design_stat_cooling" ]
python
train
paramiko/paramiko
paramiko/transport.py
https://github.com/paramiko/paramiko/blob/cf7d49d66f3b1fbc8b0853518a54050182b3b5eb/paramiko/transport.py#L1634-L1647
def set_log_channel(self, name): """ Set the channel for this transport's logging. The default is ``"paramiko.transport"`` but it can be set to anything you want. (See the `.logging` module for more info.) SSH Channels will log to a sub-channel of the one specified. :param str name: new channel name for logging .. versionadded:: 1.1 """ self.log_name = name self.logger = util.get_logger(name) self.packetizer.set_log(self.logger)
[ "def", "set_log_channel", "(", "self", ",", "name", ")", ":", "self", ".", "log_name", "=", "name", "self", ".", "logger", "=", "util", ".", "get_logger", "(", "name", ")", "self", ".", "packetizer", ".", "set_log", "(", "self", ".", "logger", ")" ]
Set the channel for this transport's logging. The default is ``"paramiko.transport"`` but it can be set to anything you want. (See the `.logging` module for more info.) SSH Channels will log to a sub-channel of the one specified. :param str name: new channel name for logging .. versionadded:: 1.1
[ "Set", "the", "channel", "for", "this", "transport", "s", "logging", ".", "The", "default", "is", "paramiko", ".", "transport", "but", "it", "can", "be", "set", "to", "anything", "you", "want", ".", "(", "See", "the", ".", "logging", "module", "for", "more", "info", ".", ")", "SSH", "Channels", "will", "log", "to", "a", "sub", "-", "channel", "of", "the", "one", "specified", "." ]
python
train
openstax/cnx-archive
cnxarchive/views/xpath.py
https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/views/xpath.py#L123-L144
def execute_xpath(xpath_string, sql_function, uuid, version): """Executes either xpath or xpath-module SQL function with given input params.""" settings = get_current_registry().settings with db_connect() as db_connection: with db_connection.cursor() as cursor: try: cursor.execute(SQL[sql_function], {'document_uuid': uuid, 'document_version': version, 'xpath_string': xpath_string}) except psycopg2.Error as e: exc = httpexceptions.HTTPBadRequest() exc.explanation = e.pgerror raise exc for res in cursor.fetchall(): yield {'name': res[0], 'uuid': res[1], 'version': res[2], 'xpath_results': res[3]}
[ "def", "execute_xpath", "(", "xpath_string", ",", "sql_function", ",", "uuid", ",", "version", ")", ":", "settings", "=", "get_current_registry", "(", ")", ".", "settings", "with", "db_connect", "(", ")", "as", "db_connection", ":", "with", "db_connection", ".", "cursor", "(", ")", "as", "cursor", ":", "try", ":", "cursor", ".", "execute", "(", "SQL", "[", "sql_function", "]", ",", "{", "'document_uuid'", ":", "uuid", ",", "'document_version'", ":", "version", ",", "'xpath_string'", ":", "xpath_string", "}", ")", "except", "psycopg2", ".", "Error", "as", "e", ":", "exc", "=", "httpexceptions", ".", "HTTPBadRequest", "(", ")", "exc", ".", "explanation", "=", "e", ".", "pgerror", "raise", "exc", "for", "res", "in", "cursor", ".", "fetchall", "(", ")", ":", "yield", "{", "'name'", ":", "res", "[", "0", "]", ",", "'uuid'", ":", "res", "[", "1", "]", ",", "'version'", ":", "res", "[", "2", "]", ",", "'xpath_results'", ":", "res", "[", "3", "]", "}" ]
Executes either xpath or xpath-module SQL function with given input params.
[ "Executes", "either", "xpath", "or", "xpath", "-", "module", "SQL", "function", "with", "given", "input", "params", "." ]
python
train
IntegralDefense/splunklib
splunklib/__init__.py
https://github.com/IntegralDefense/splunklib/blob/c3a02c83daad20cf24838f52b22cd2476f062eed/splunklib/__init__.py#L336-L348
def json(self): """Returns the search results as a list of JSON objects.""" if self.search_results is None: return None result = [] for row in self.search_results['rows']: obj = {} for index in range(0, len(self.search_results['fields'])): obj[self.search_results['fields'][index]] = row[index] result.append(obj) return result
[ "def", "json", "(", "self", ")", ":", "if", "self", ".", "search_results", "is", "None", ":", "return", "None", "result", "=", "[", "]", "for", "row", "in", "self", ".", "search_results", "[", "'rows'", "]", ":", "obj", "=", "{", "}", "for", "index", "in", "range", "(", "0", ",", "len", "(", "self", ".", "search_results", "[", "'fields'", "]", ")", ")", ":", "obj", "[", "self", ".", "search_results", "[", "'fields'", "]", "[", "index", "]", "]", "=", "row", "[", "index", "]", "result", ".", "append", "(", "obj", ")", "return", "result" ]
Returns the search results as a list of JSON objects.
[ "Returns", "the", "search", "results", "as", "a", "list", "of", "JSON", "objects", "." ]
python
train
sorgerlab/indra
indra/sources/isi/processor.py
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/isi/processor.py#L38-L43
def get_statements(self): """Process reader output to produce INDRA Statements.""" for k, v in self.reader_output.items(): for interaction in v['interactions']: self._process_interaction(k, interaction, v['text'], self.pmid, self.extra_annotations)
[ "def", "get_statements", "(", "self", ")", ":", "for", "k", ",", "v", "in", "self", ".", "reader_output", ".", "items", "(", ")", ":", "for", "interaction", "in", "v", "[", "'interactions'", "]", ":", "self", ".", "_process_interaction", "(", "k", ",", "interaction", ",", "v", "[", "'text'", "]", ",", "self", ".", "pmid", ",", "self", ".", "extra_annotations", ")" ]
Process reader output to produce INDRA Statements.
[ "Process", "reader", "output", "to", "produce", "INDRA", "Statements", "." ]
python
train
klen/makesite
makesite/modules/django/main/utils/cache.py
https://github.com/klen/makesite/blob/f6f77a43a04a256189e8fffbeac1ffd63f35a10c/makesite/modules/django/main/utils/cache.py#L15-L22
def cached_instance(model, timeout=None, **filters): """ Auto cached model instance. """ if isinstance(model, basestring): model = _str_to_model(model) cache_key = generate_cache_key(model, **filters) return get_cached(cache_key, model.objects.select_related().get, kwargs=filters)
[ "def", "cached_instance", "(", "model", ",", "timeout", "=", "None", ",", "*", "*", "filters", ")", ":", "if", "isinstance", "(", "model", ",", "basestring", ")", ":", "model", "=", "_str_to_model", "(", "model", ")", "cache_key", "=", "generate_cache_key", "(", "model", ",", "*", "*", "filters", ")", "return", "get_cached", "(", "cache_key", ",", "model", ".", "objects", ".", "select_related", "(", ")", ".", "get", ",", "kwargs", "=", "filters", ")" ]
Auto cached model instance.
[ "Auto", "cached", "model", "instance", "." ]
python
train
onelogin/python-saml
src/onelogin/saml2/idp_metadata_parser.py
https://github.com/onelogin/python-saml/blob/9fe7a72da5b4caa1529c1640b52d2649447ce49b/src/onelogin/saml2/idp_metadata_parser.py#L28-L63
def get_metadata(url, validate_cert=True): """ Gets the metadata XML from the provided URL :param url: Url where the XML of the Identity Provider Metadata is published. :type url: string :param validate_cert: If the url uses https schema, that flag enables or not the verification of the associated certificate. :type validate_cert: bool :returns: metadata XML :rtype: string """ valid = False if validate_cert: response = urllib2.urlopen(url) else: ctx = ssl.create_default_context() ctx.check_hostname = False ctx.verify_mode = ssl.CERT_NONE response = urllib2.urlopen(url, context=ctx) xml = response.read() if xml: try: dom = fromstring(xml, forbid_dtd=True) idp_descriptor_nodes = OneLogin_Saml2_Utils.query(dom, '//md:IDPSSODescriptor') if idp_descriptor_nodes: valid = True except Exception: pass if not valid: raise Exception('Not valid IdP XML found from URL: %s' % (url)) return xml
[ "def", "get_metadata", "(", "url", ",", "validate_cert", "=", "True", ")", ":", "valid", "=", "False", "if", "validate_cert", ":", "response", "=", "urllib2", ".", "urlopen", "(", "url", ")", "else", ":", "ctx", "=", "ssl", ".", "create_default_context", "(", ")", "ctx", ".", "check_hostname", "=", "False", "ctx", ".", "verify_mode", "=", "ssl", ".", "CERT_NONE", "response", "=", "urllib2", ".", "urlopen", "(", "url", ",", "context", "=", "ctx", ")", "xml", "=", "response", ".", "read", "(", ")", "if", "xml", ":", "try", ":", "dom", "=", "fromstring", "(", "xml", ",", "forbid_dtd", "=", "True", ")", "idp_descriptor_nodes", "=", "OneLogin_Saml2_Utils", ".", "query", "(", "dom", ",", "'//md:IDPSSODescriptor'", ")", "if", "idp_descriptor_nodes", ":", "valid", "=", "True", "except", "Exception", ":", "pass", "if", "not", "valid", ":", "raise", "Exception", "(", "'Not valid IdP XML found from URL: %s'", "%", "(", "url", ")", ")", "return", "xml" ]
Gets the metadata XML from the provided URL :param url: Url where the XML of the Identity Provider Metadata is published. :type url: string :param validate_cert: If the url uses https schema, that flag enables or not the verification of the associated certificate. :type validate_cert: bool :returns: metadata XML :rtype: string
[ "Gets", "the", "metadata", "XML", "from", "the", "provided", "URL" ]
python
train
theislab/scanpy
scanpy/_exporting.py
https://github.com/theislab/scanpy/blob/9e4e5ee02e04cf618872d9b098e24f0542e8b227/scanpy/_exporting.py#L409-L514
def cellbrowser( adata, data_dir, data_name, embedding_keys = None, annot_keys = ["louvain", "percent_mito", "n_genes", "n_counts"], cluster_field = "louvain", nb_marker = 50, skip_matrix = False, html_dir = None, port = None, do_debug = False ): """ Export adata to a UCSC Cell Browser project directory. If `html_dir` is set, subsequently build the html files from the project directory into `html_dir`. If `port` is set, start an HTTP server in the background and serve `html_dir` on `port`. By default, export all gene expression data from `adata.raw`, the annotations `louvain`, `percent_mito`, `n_genes` and `n_counts` and the top `nb_marker` cluster markers. All existing files in data_dir are overwritten, except cellbrowser.conf. See `UCSC Cellbrowser <https://github.com/maximilianh/cellBrowser>`__ for details. Parameters ---------- adata : :class:`~anndata.AnnData` Annotated data matrix data_dir : `str` Path to directory for exported Cell Browser files. Usually these are the files `exprMatrix.tsv.gz`, `meta.tsv`, coordinate files like `tsne.coords.tsv`, and cluster marker gene lists like `markers.tsv`. A file `cellbrowser.conf` is also created with pointers to these files. As a result, each adata object should have its own project_dir. data_name : `str` Name of dataset in Cell Browser, a string without special characters. This is written to `data_dir`/cellbrowser.conf. Ideally this is a short unique name for the dataset, like "pbmc3k" or "tabulamuris". embedding_keys: `list` of `str` or `dict` of `key (str)`->`display label (str)` 2-D embeddings in `adata.obsm` to export. The prefix "`X_`" or "`X_draw_graph_`" is not necessary. Coordinates missing from `adata` are skipped. By default, these keys are tried: ["tsne", "umap", "pagaFa", "pagaFr", "pagaUmap", "phate", "fa", "fr", "kk", "drl", "rt"]. For these, default display labels are automatically used. For other values, you can specify a dictionary instead of a list, the values of the dictionary are then the display labels for the coordinates, e.g. `{'tsne' : "t-SNE by Scanpy"}` annot_keys: `list` of `str` or `dict` of `key (str)`->`display label (str)` Annotations in `adata.obsm` to export. Can be a dictionary with key -> display label. skip_matrix: `boolean` Do not export the matrix. If you had previously exported this adata into the same `data_dir`, then there is no need to export the whole matrix again. This option will make the export a lot faster, e.g. when only coordinates or meta data were changed. html_dir: `str` If this variable is set, the export will build html files from `data_dir` to `html_dir`, creating html/js/json files. Usually there is one global html output directory for all datasets. Often, `html_dir` is located under a webserver's (like Apache) htdocs directory or is copied to one. A directory `html_dir`/`project_name` will be created and an index.html will be created under `html_dir` for all subdirectories. Existing files will be overwritten. If do not to use html_dir, you can use the command line tool `cbBuild` to build the html directory. port: `int` If this variable and `html_dir` are set, Python's built-in web server will be spawned as a daemon in the background and serve the files under `html_dir`. To kill the process, call `cellbrowser.cellbrowser.stop()`. do_debug: `boolean` Activate debugging output Examples -------- See this `tutorial <https://github.com/theislab/scanpy_usage/tree/master/181126_Cellbrowser_exports>`__. """ try: import cellbrowser.cellbrowser as cb except ImportError: print("The package cellbrowser is not installed. Install with 'pip " "install cellbrowser' and retry.") cb.setDebug(do_debug) cb.scanpyToCellbrowser(adata, data_dir, data_name, coordFields=embedding_keys, metaFields=annot_keys, clusterField=cluster_field, nb_marker=nb_marker, skipMatrix=skip_matrix, doDebug = None ) if html_dir: cb.build(data_dir, html_dir, doDebug=None) if port: cb.serve(html_dir, port)
[ "def", "cellbrowser", "(", "adata", ",", "data_dir", ",", "data_name", ",", "embedding_keys", "=", "None", ",", "annot_keys", "=", "[", "\"louvain\"", ",", "\"percent_mito\"", ",", "\"n_genes\"", ",", "\"n_counts\"", "]", ",", "cluster_field", "=", "\"louvain\"", ",", "nb_marker", "=", "50", ",", "skip_matrix", "=", "False", ",", "html_dir", "=", "None", ",", "port", "=", "None", ",", "do_debug", "=", "False", ")", ":", "try", ":", "import", "cellbrowser", ".", "cellbrowser", "as", "cb", "except", "ImportError", ":", "print", "(", "\"The package cellbrowser is not installed. Install with 'pip \"", "\"install cellbrowser' and retry.\"", ")", "cb", ".", "setDebug", "(", "do_debug", ")", "cb", ".", "scanpyToCellbrowser", "(", "adata", ",", "data_dir", ",", "data_name", ",", "coordFields", "=", "embedding_keys", ",", "metaFields", "=", "annot_keys", ",", "clusterField", "=", "cluster_field", ",", "nb_marker", "=", "nb_marker", ",", "skipMatrix", "=", "skip_matrix", ",", "doDebug", "=", "None", ")", "if", "html_dir", ":", "cb", ".", "build", "(", "data_dir", ",", "html_dir", ",", "doDebug", "=", "None", ")", "if", "port", ":", "cb", ".", "serve", "(", "html_dir", ",", "port", ")" ]
Export adata to a UCSC Cell Browser project directory. If `html_dir` is set, subsequently build the html files from the project directory into `html_dir`. If `port` is set, start an HTTP server in the background and serve `html_dir` on `port`. By default, export all gene expression data from `adata.raw`, the annotations `louvain`, `percent_mito`, `n_genes` and `n_counts` and the top `nb_marker` cluster markers. All existing files in data_dir are overwritten, except cellbrowser.conf. See `UCSC Cellbrowser <https://github.com/maximilianh/cellBrowser>`__ for details. Parameters ---------- adata : :class:`~anndata.AnnData` Annotated data matrix data_dir : `str` Path to directory for exported Cell Browser files. Usually these are the files `exprMatrix.tsv.gz`, `meta.tsv`, coordinate files like `tsne.coords.tsv`, and cluster marker gene lists like `markers.tsv`. A file `cellbrowser.conf` is also created with pointers to these files. As a result, each adata object should have its own project_dir. data_name : `str` Name of dataset in Cell Browser, a string without special characters. This is written to `data_dir`/cellbrowser.conf. Ideally this is a short unique name for the dataset, like "pbmc3k" or "tabulamuris". embedding_keys: `list` of `str` or `dict` of `key (str)`->`display label (str)` 2-D embeddings in `adata.obsm` to export. The prefix "`X_`" or "`X_draw_graph_`" is not necessary. Coordinates missing from `adata` are skipped. By default, these keys are tried: ["tsne", "umap", "pagaFa", "pagaFr", "pagaUmap", "phate", "fa", "fr", "kk", "drl", "rt"]. For these, default display labels are automatically used. For other values, you can specify a dictionary instead of a list, the values of the dictionary are then the display labels for the coordinates, e.g. `{'tsne' : "t-SNE by Scanpy"}` annot_keys: `list` of `str` or `dict` of `key (str)`->`display label (str)` Annotations in `adata.obsm` to export. Can be a dictionary with key -> display label. skip_matrix: `boolean` Do not export the matrix. If you had previously exported this adata into the same `data_dir`, then there is no need to export the whole matrix again. This option will make the export a lot faster, e.g. when only coordinates or meta data were changed. html_dir: `str` If this variable is set, the export will build html files from `data_dir` to `html_dir`, creating html/js/json files. Usually there is one global html output directory for all datasets. Often, `html_dir` is located under a webserver's (like Apache) htdocs directory or is copied to one. A directory `html_dir`/`project_name` will be created and an index.html will be created under `html_dir` for all subdirectories. Existing files will be overwritten. If do not to use html_dir, you can use the command line tool `cbBuild` to build the html directory. port: `int` If this variable and `html_dir` are set, Python's built-in web server will be spawned as a daemon in the background and serve the files under `html_dir`. To kill the process, call `cellbrowser.cellbrowser.stop()`. do_debug: `boolean` Activate debugging output Examples -------- See this `tutorial <https://github.com/theislab/scanpy_usage/tree/master/181126_Cellbrowser_exports>`__.
[ "Export", "adata", "to", "a", "UCSC", "Cell", "Browser", "project", "directory", ".", "If", "html_dir", "is", "set", "subsequently", "build", "the", "html", "files", "from", "the", "project", "directory", "into", "html_dir", ".", "If", "port", "is", "set", "start", "an", "HTTP", "server", "in", "the", "background", "and", "serve", "html_dir", "on", "port", "." ]
python
train
roclark/sportsreference
sportsreference/nba/roster.py
https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/nba/roster.py#L389-L411
def _parse_contract_wages(self, table): """ Parse the wages on the contract. The wages are listed as the data points in the contract table. Any values that don't have a value which starts with a '$' sign are likely not valid and should be dropped. Parameters ---------- table : PyQuery object A PyQuery object containing the contract table. Returns ------- list Returns a list of all wages where each element is a string denoting the dollar amount, such as '$40,000,000'. """ wages = [i.text() if i.text().startswith('$') else '' for i in table('td').items()] wages.remove('') return wages
[ "def", "_parse_contract_wages", "(", "self", ",", "table", ")", ":", "wages", "=", "[", "i", ".", "text", "(", ")", "if", "i", ".", "text", "(", ")", ".", "startswith", "(", "'$'", ")", "else", "''", "for", "i", "in", "table", "(", "'td'", ")", ".", "items", "(", ")", "]", "wages", ".", "remove", "(", "''", ")", "return", "wages" ]
Parse the wages on the contract. The wages are listed as the data points in the contract table. Any values that don't have a value which starts with a '$' sign are likely not valid and should be dropped. Parameters ---------- table : PyQuery object A PyQuery object containing the contract table. Returns ------- list Returns a list of all wages where each element is a string denoting the dollar amount, such as '$40,000,000'.
[ "Parse", "the", "wages", "on", "the", "contract", "." ]
python
train
google-research/batch-ppo
agents/algorithms/ppo/ppo.py
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/ppo.py#L334-L380
def _perform_update_steps( self, observ, action, old_policy_params, reward, length): """Perform multiple update steps of value function and policy. The advantage is computed once at the beginning and shared across iterations. We need to decide for the summary of one iteration, and thus choose the one after half of the iterations. Args: observ: Sequences of observations. action: Sequences of actions. old_policy_params: Parameters of the behavioral policy. reward: Sequences of rewards. length: Batch of sequence lengths. Returns: Summary tensor. """ return_ = utility.discounted_return( reward, length, self._config.discount) value = self._network(observ, length).value if self._config.gae_lambda: advantage = utility.lambda_advantage( reward, value, length, self._config.discount, self._config.gae_lambda) else: advantage = return_ - value mean, variance = tf.nn.moments(advantage, axes=[0, 1], keep_dims=True) advantage = (advantage - mean) / (tf.sqrt(variance) + 1e-8) advantage = tf.Print( advantage, [tf.reduce_mean(return_), tf.reduce_mean(value)], 'return and value: ') advantage = tf.Print( advantage, [tf.reduce_mean(advantage)], 'normalized advantage: ') episodes = (observ, action, old_policy_params, reward, advantage) value_loss, policy_loss, summary = parts.iterate_sequences( self._update_step, [0., 0., ''], episodes, length, self._config.chunk_length, self._config.batch_size, self._config.update_epochs, padding_value=1) print_losses = tf.group( tf.Print(0, [tf.reduce_mean(value_loss)], 'value loss: '), tf.Print(0, [tf.reduce_mean(policy_loss)], 'policy loss: ')) with tf.control_dependencies([value_loss, policy_loss, print_losses]): return summary[self._config.update_epochs // 2]
[ "def", "_perform_update_steps", "(", "self", ",", "observ", ",", "action", ",", "old_policy_params", ",", "reward", ",", "length", ")", ":", "return_", "=", "utility", ".", "discounted_return", "(", "reward", ",", "length", ",", "self", ".", "_config", ".", "discount", ")", "value", "=", "self", ".", "_network", "(", "observ", ",", "length", ")", ".", "value", "if", "self", ".", "_config", ".", "gae_lambda", ":", "advantage", "=", "utility", ".", "lambda_advantage", "(", "reward", ",", "value", ",", "length", ",", "self", ".", "_config", ".", "discount", ",", "self", ".", "_config", ".", "gae_lambda", ")", "else", ":", "advantage", "=", "return_", "-", "value", "mean", ",", "variance", "=", "tf", ".", "nn", ".", "moments", "(", "advantage", ",", "axes", "=", "[", "0", ",", "1", "]", ",", "keep_dims", "=", "True", ")", "advantage", "=", "(", "advantage", "-", "mean", ")", "/", "(", "tf", ".", "sqrt", "(", "variance", ")", "+", "1e-8", ")", "advantage", "=", "tf", ".", "Print", "(", "advantage", ",", "[", "tf", ".", "reduce_mean", "(", "return_", ")", ",", "tf", ".", "reduce_mean", "(", "value", ")", "]", ",", "'return and value: '", ")", "advantage", "=", "tf", ".", "Print", "(", "advantage", ",", "[", "tf", ".", "reduce_mean", "(", "advantage", ")", "]", ",", "'normalized advantage: '", ")", "episodes", "=", "(", "observ", ",", "action", ",", "old_policy_params", ",", "reward", ",", "advantage", ")", "value_loss", ",", "policy_loss", ",", "summary", "=", "parts", ".", "iterate_sequences", "(", "self", ".", "_update_step", ",", "[", "0.", ",", "0.", ",", "''", "]", ",", "episodes", ",", "length", ",", "self", ".", "_config", ".", "chunk_length", ",", "self", ".", "_config", ".", "batch_size", ",", "self", ".", "_config", ".", "update_epochs", ",", "padding_value", "=", "1", ")", "print_losses", "=", "tf", ".", "group", "(", "tf", ".", "Print", "(", "0", ",", "[", "tf", ".", "reduce_mean", "(", "value_loss", ")", "]", ",", "'value loss: '", ")", ",", "tf", ".", "Print", "(", "0", ",", "[", "tf", ".", "reduce_mean", "(", "policy_loss", ")", "]", ",", "'policy loss: '", ")", ")", "with", "tf", ".", "control_dependencies", "(", "[", "value_loss", ",", "policy_loss", ",", "print_losses", "]", ")", ":", "return", "summary", "[", "self", ".", "_config", ".", "update_epochs", "//", "2", "]" ]
Perform multiple update steps of value function and policy. The advantage is computed once at the beginning and shared across iterations. We need to decide for the summary of one iteration, and thus choose the one after half of the iterations. Args: observ: Sequences of observations. action: Sequences of actions. old_policy_params: Parameters of the behavioral policy. reward: Sequences of rewards. length: Batch of sequence lengths. Returns: Summary tensor.
[ "Perform", "multiple", "update", "steps", "of", "value", "function", "and", "policy", "." ]
python
train
openid/JWTConnect-Python-CryptoJWT
src/cryptojwt/key_bundle.py
https://github.com/openid/JWTConnect-Python-CryptoJWT/blob/8863cfbfe77ca885084870b234a66b55bd52930c/src/cryptojwt/key_bundle.py#L56-L85
def rsa_init(spec): """ Initiates a :py:class:`oidcmsg.keybundle.KeyBundle` instance containing newly minted RSA keys according to a spec. Example of specification:: {'size':2048, 'use': ['enc', 'sig'] } Using the spec above 2 RSA keys would be minted, one for encryption and one for signing. :param spec: :return: KeyBundle """ try: size = spec['size'] except KeyError: size = 2048 kb = KeyBundle(keytype="RSA") if 'use' in spec: for use in harmonize_usage(spec["use"]): _key = new_rsa_key(use=use, key_size=size) kb.append(_key) else: _key = new_rsa_key(key_size=size) kb.append(_key) return kb
[ "def", "rsa_init", "(", "spec", ")", ":", "try", ":", "size", "=", "spec", "[", "'size'", "]", "except", "KeyError", ":", "size", "=", "2048", "kb", "=", "KeyBundle", "(", "keytype", "=", "\"RSA\"", ")", "if", "'use'", "in", "spec", ":", "for", "use", "in", "harmonize_usage", "(", "spec", "[", "\"use\"", "]", ")", ":", "_key", "=", "new_rsa_key", "(", "use", "=", "use", ",", "key_size", "=", "size", ")", "kb", ".", "append", "(", "_key", ")", "else", ":", "_key", "=", "new_rsa_key", "(", "key_size", "=", "size", ")", "kb", ".", "append", "(", "_key", ")", "return", "kb" ]
Initiates a :py:class:`oidcmsg.keybundle.KeyBundle` instance containing newly minted RSA keys according to a spec. Example of specification:: {'size':2048, 'use': ['enc', 'sig'] } Using the spec above 2 RSA keys would be minted, one for encryption and one for signing. :param spec: :return: KeyBundle
[ "Initiates", "a", ":", "py", ":", "class", ":", "oidcmsg", ".", "keybundle", ".", "KeyBundle", "instance", "containing", "newly", "minted", "RSA", "keys", "according", "to", "a", "spec", ".", "Example", "of", "specification", "::", "{", "size", ":", "2048", "use", ":", "[", "enc", "sig", "]", "}", "Using", "the", "spec", "above", "2", "RSA", "keys", "would", "be", "minted", "one", "for", "encryption", "and", "one", "for", "signing", ".", ":", "param", "spec", ":", ":", "return", ":", "KeyBundle" ]
python
train
spyder-ide/spyder
spyder/plugins/plots/widgets/figurebrowser.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/plots/widgets/figurebrowser.py#L284-L291
def show_fig_outline_in_viewer(self, state): """Draw a frame around the figure viewer if state is True.""" if state is True: self.figviewer.figcanvas.setStyleSheet( "FigureCanvas{border: 1px solid lightgrey;}") else: self.figviewer.figcanvas.setStyleSheet("FigureCanvas{}") self.option_changed('show_plot_outline', state)
[ "def", "show_fig_outline_in_viewer", "(", "self", ",", "state", ")", ":", "if", "state", "is", "True", ":", "self", ".", "figviewer", ".", "figcanvas", ".", "setStyleSheet", "(", "\"FigureCanvas{border: 1px solid lightgrey;}\"", ")", "else", ":", "self", ".", "figviewer", ".", "figcanvas", ".", "setStyleSheet", "(", "\"FigureCanvas{}\"", ")", "self", ".", "option_changed", "(", "'show_plot_outline'", ",", "state", ")" ]
Draw a frame around the figure viewer if state is True.
[ "Draw", "a", "frame", "around", "the", "figure", "viewer", "if", "state", "is", "True", "." ]
python
train
materialsproject/pymatgen
pymatgen/analysis/pourbaix_diagram.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/pourbaix_diagram.py#L683-L714
def get_decomposition_energy(self, entry, pH, V): """ Finds decomposition to most stable entry Args: entry (PourbaixEntry): PourbaixEntry corresponding to compound to find the decomposition for pH (float): pH at which to find the decomposition V (float): voltage at which to find the decomposition Returns: reaction corresponding to the decomposition """ # Find representative multientry if self._multielement and not isinstance(entry, MultiEntry): possible_entries = self._generate_multielement_entries( self._filtered_entries, forced_include=[entry]) # Filter to only include materials where the entry is only solid if entry.phase_type == "solid": possible_entries = [e for e in possible_entries if e.phase_type.count("Solid") == 1] possible_energies = [e.normalized_energy_at_conditions(pH, V) for e in possible_entries] else: possible_energies = [entry.normalized_energy_at_conditions(pH, V)] min_energy = np.min(possible_energies, axis=0) # Find entry and take the difference hull = self.get_hull_energy(pH, V) return min_energy - hull
[ "def", "get_decomposition_energy", "(", "self", ",", "entry", ",", "pH", ",", "V", ")", ":", "# Find representative multientry", "if", "self", ".", "_multielement", "and", "not", "isinstance", "(", "entry", ",", "MultiEntry", ")", ":", "possible_entries", "=", "self", ".", "_generate_multielement_entries", "(", "self", ".", "_filtered_entries", ",", "forced_include", "=", "[", "entry", "]", ")", "# Filter to only include materials where the entry is only solid", "if", "entry", ".", "phase_type", "==", "\"solid\"", ":", "possible_entries", "=", "[", "e", "for", "e", "in", "possible_entries", "if", "e", ".", "phase_type", ".", "count", "(", "\"Solid\"", ")", "==", "1", "]", "possible_energies", "=", "[", "e", ".", "normalized_energy_at_conditions", "(", "pH", ",", "V", ")", "for", "e", "in", "possible_entries", "]", "else", ":", "possible_energies", "=", "[", "entry", ".", "normalized_energy_at_conditions", "(", "pH", ",", "V", ")", "]", "min_energy", "=", "np", ".", "min", "(", "possible_energies", ",", "axis", "=", "0", ")", "# Find entry and take the difference", "hull", "=", "self", ".", "get_hull_energy", "(", "pH", ",", "V", ")", "return", "min_energy", "-", "hull" ]
Finds decomposition to most stable entry Args: entry (PourbaixEntry): PourbaixEntry corresponding to compound to find the decomposition for pH (float): pH at which to find the decomposition V (float): voltage at which to find the decomposition Returns: reaction corresponding to the decomposition
[ "Finds", "decomposition", "to", "most", "stable", "entry" ]
python
train
pyblish/pyblish-qml
pyblish_qml/host.py
https://github.com/pyblish/pyblish-qml/blob/6095d18b2ec0afd0409a9b1a17e53b0658887283/pyblish_qml/host.py#L393-L428
def install(self, host): """Setup common to all Qt-based hosts""" print("Installing..") if self._state["installed"]: return if self.is_headless(): log.info("Headless host") return print("aboutToQuit..") self.app.aboutToQuit.connect(self._on_application_quit) if host == "Maya": print("Maya host..") window = { widget.objectName(): widget for widget in self.app.topLevelWidgets() }["MayaWindow"] else: window = self.find_window() # Install event filter print("event filter..") event_filter = self.EventFilter(window) window.installEventFilter(event_filter) for signal in SIGNALS_TO_REMOVE_EVENT_FILTER: pyblish.api.register_callback(signal, self.uninstall) log.info("Installed event filter") self.window = window self._state["installed"] = True self._state["eventFilter"] = event_filter
[ "def", "install", "(", "self", ",", "host", ")", ":", "print", "(", "\"Installing..\"", ")", "if", "self", ".", "_state", "[", "\"installed\"", "]", ":", "return", "if", "self", ".", "is_headless", "(", ")", ":", "log", ".", "info", "(", "\"Headless host\"", ")", "return", "print", "(", "\"aboutToQuit..\"", ")", "self", ".", "app", ".", "aboutToQuit", ".", "connect", "(", "self", ".", "_on_application_quit", ")", "if", "host", "==", "\"Maya\"", ":", "print", "(", "\"Maya host..\"", ")", "window", "=", "{", "widget", ".", "objectName", "(", ")", ":", "widget", "for", "widget", "in", "self", ".", "app", ".", "topLevelWidgets", "(", ")", "}", "[", "\"MayaWindow\"", "]", "else", ":", "window", "=", "self", ".", "find_window", "(", ")", "# Install event filter", "print", "(", "\"event filter..\"", ")", "event_filter", "=", "self", ".", "EventFilter", "(", "window", ")", "window", ".", "installEventFilter", "(", "event_filter", ")", "for", "signal", "in", "SIGNALS_TO_REMOVE_EVENT_FILTER", ":", "pyblish", ".", "api", ".", "register_callback", "(", "signal", ",", "self", ".", "uninstall", ")", "log", ".", "info", "(", "\"Installed event filter\"", ")", "self", ".", "window", "=", "window", "self", ".", "_state", "[", "\"installed\"", "]", "=", "True", "self", ".", "_state", "[", "\"eventFilter\"", "]", "=", "event_filter" ]
Setup common to all Qt-based hosts
[ "Setup", "common", "to", "all", "Qt", "-", "based", "hosts" ]
python
train
autokey/autokey
lib/autokey/iomediator/_iomediator.py
https://github.com/autokey/autokey/blob/35decb72f286ce68cd2a1f09ace8891a520b58d1/lib/autokey/iomediator/_iomediator.py#L218-L223
def send_backspace(self, count): """ Sends the given number of backspace key presses. """ for i in range(count): self.interface.send_key(Key.BACKSPACE)
[ "def", "send_backspace", "(", "self", ",", "count", ")", ":", "for", "i", "in", "range", "(", "count", ")", ":", "self", ".", "interface", ".", "send_key", "(", "Key", ".", "BACKSPACE", ")" ]
Sends the given number of backspace key presses.
[ "Sends", "the", "given", "number", "of", "backspace", "key", "presses", "." ]
python
train
rbarrois/aionotify
aionotify/base.py
https://github.com/rbarrois/aionotify/blob/6cfa35b26a2660f77f29a92d3efb7d1dde685b43/aionotify/base.py#L50-L59
def watch(self, path, flags, *, alias=None): """Add a new watching rule.""" if alias is None: alias = path if alias in self.requests: raise ValueError("A watch request is already scheduled for alias %s" % alias) self.requests[alias] = (path, flags) if self._fd is not None: # We've started, register the watch immediately. self._setup_watch(alias, path, flags)
[ "def", "watch", "(", "self", ",", "path", ",", "flags", ",", "*", ",", "alias", "=", "None", ")", ":", "if", "alias", "is", "None", ":", "alias", "=", "path", "if", "alias", "in", "self", ".", "requests", ":", "raise", "ValueError", "(", "\"A watch request is already scheduled for alias %s\"", "%", "alias", ")", "self", ".", "requests", "[", "alias", "]", "=", "(", "path", ",", "flags", ")", "if", "self", ".", "_fd", "is", "not", "None", ":", "# We've started, register the watch immediately.", "self", ".", "_setup_watch", "(", "alias", ",", "path", ",", "flags", ")" ]
Add a new watching rule.
[ "Add", "a", "new", "watching", "rule", "." ]
python
test
saltstack/salt
salt/states/grafana.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/grafana.py#L205-L219
def _rows_differ(row, _row): ''' Check if grafana dashboard row and _row differ ''' row_copy = copy.deepcopy(row) _row_copy = copy.deepcopy(_row) # Strip id from all panels in both rows, since they are always generated. for panel in row_copy['panels']: if 'id' in panel: del panel['id'] for _panel in _row_copy['panels']: if 'id' in _panel: del _panel['id'] diff = DictDiffer(row_copy, _row_copy) return diff.changed() or diff.added() or diff.removed()
[ "def", "_rows_differ", "(", "row", ",", "_row", ")", ":", "row_copy", "=", "copy", ".", "deepcopy", "(", "row", ")", "_row_copy", "=", "copy", ".", "deepcopy", "(", "_row", ")", "# Strip id from all panels in both rows, since they are always generated.", "for", "panel", "in", "row_copy", "[", "'panels'", "]", ":", "if", "'id'", "in", "panel", ":", "del", "panel", "[", "'id'", "]", "for", "_panel", "in", "_row_copy", "[", "'panels'", "]", ":", "if", "'id'", "in", "_panel", ":", "del", "_panel", "[", "'id'", "]", "diff", "=", "DictDiffer", "(", "row_copy", ",", "_row_copy", ")", "return", "diff", ".", "changed", "(", ")", "or", "diff", ".", "added", "(", ")", "or", "diff", ".", "removed", "(", ")" ]
Check if grafana dashboard row and _row differ
[ "Check", "if", "grafana", "dashboard", "row", "and", "_row", "differ" ]
python
train
phaethon/kamene
kamene/arch/windows/__init__.py
https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/arch/windows/__init__.py#L200-L209
def show(self, resolve_mac=True): """Print list of available network interfaces in human readable form""" print("%s %s %s %s" % ("INDEX".ljust(5), "IFACE".ljust(35), "IP".ljust(15), "MAC")) for iface_name in sorted(self.data.keys()): dev = self.data[iface_name] mac = dev.mac if resolve_mac and iface_name != LOOPBACK_NAME: mac = conf.manufdb._resolve_MAC(mac) print("%s %s %s %s" % (str(dev.win_index).ljust(5), str(dev.name).ljust(35), str(dev.ip).ljust(15), mac) )
[ "def", "show", "(", "self", ",", "resolve_mac", "=", "True", ")", ":", "print", "(", "\"%s %s %s %s\"", "%", "(", "\"INDEX\"", ".", "ljust", "(", "5", ")", ",", "\"IFACE\"", ".", "ljust", "(", "35", ")", ",", "\"IP\"", ".", "ljust", "(", "15", ")", ",", "\"MAC\"", ")", ")", "for", "iface_name", "in", "sorted", "(", "self", ".", "data", ".", "keys", "(", ")", ")", ":", "dev", "=", "self", ".", "data", "[", "iface_name", "]", "mac", "=", "dev", ".", "mac", "if", "resolve_mac", "and", "iface_name", "!=", "LOOPBACK_NAME", ":", "mac", "=", "conf", ".", "manufdb", ".", "_resolve_MAC", "(", "mac", ")", "print", "(", "\"%s %s %s %s\"", "%", "(", "str", "(", "dev", ".", "win_index", ")", ".", "ljust", "(", "5", ")", ",", "str", "(", "dev", ".", "name", ")", ".", "ljust", "(", "35", ")", ",", "str", "(", "dev", ".", "ip", ")", ".", "ljust", "(", "15", ")", ",", "mac", ")", ")" ]
Print list of available network interfaces in human readable form
[ "Print", "list", "of", "available", "network", "interfaces", "in", "human", "readable", "form" ]
python
train
blockstack/blockstack-core
blockstack/lib/nameset/db.py
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/nameset/db.py#L2546-L2559
def namedb_get_all_namespace_ids( cur ): """ Get a list of all READY namespace IDs. """ query = "SELECT namespace_id FROM namespaces WHERE op = ?;" args = (NAMESPACE_READY,) namespace_rows = namedb_query_execute( cur, query, args ) ret = [] for namespace_row in namespace_rows: ret.append( namespace_row['namespace_id'] ) return ret
[ "def", "namedb_get_all_namespace_ids", "(", "cur", ")", ":", "query", "=", "\"SELECT namespace_id FROM namespaces WHERE op = ?;\"", "args", "=", "(", "NAMESPACE_READY", ",", ")", "namespace_rows", "=", "namedb_query_execute", "(", "cur", ",", "query", ",", "args", ")", "ret", "=", "[", "]", "for", "namespace_row", "in", "namespace_rows", ":", "ret", ".", "append", "(", "namespace_row", "[", "'namespace_id'", "]", ")", "return", "ret" ]
Get a list of all READY namespace IDs.
[ "Get", "a", "list", "of", "all", "READY", "namespace", "IDs", "." ]
python
train
romanvm/django-tinymce4-lite
tinymce/widgets.py
https://github.com/romanvm/django-tinymce4-lite/blob/3b9221db5f0327e1e08c79b7b8cdbdcb1848a390/tinymce/widgets.py#L49-L60
def language_file_exists(language_code): """ Check if TinyMCE has a language file for the specified lang code :param language_code: language code :type language_code: str :return: check result :rtype: bool """ filename = '{0}.js'.format(language_code) path = os.path.join('tinymce', 'js', 'tinymce', 'langs', filename) return finders.find(path) is not None
[ "def", "language_file_exists", "(", "language_code", ")", ":", "filename", "=", "'{0}.js'", ".", "format", "(", "language_code", ")", "path", "=", "os", ".", "path", ".", "join", "(", "'tinymce'", ",", "'js'", ",", "'tinymce'", ",", "'langs'", ",", "filename", ")", "return", "finders", ".", "find", "(", "path", ")", "is", "not", "None" ]
Check if TinyMCE has a language file for the specified lang code :param language_code: language code :type language_code: str :return: check result :rtype: bool
[ "Check", "if", "TinyMCE", "has", "a", "language", "file", "for", "the", "specified", "lang", "code" ]
python
train
ionelmc/python-hunter
src/hunter/__init__.py
https://github.com/ionelmc/python-hunter/blob/b3a1310b0593d2c6b6ef430883843896e17d6a81/src/hunter/__init__.py#L120-L126
def And(*predicates, **kwargs): """ `And` predicate. Returns ``False`` at the first sub-predicate that returns ``False``. """ if kwargs: predicates += Query(**kwargs), return _flatten(_And, *predicates)
[ "def", "And", "(", "*", "predicates", ",", "*", "*", "kwargs", ")", ":", "if", "kwargs", ":", "predicates", "+=", "Query", "(", "*", "*", "kwargs", ")", ",", "return", "_flatten", "(", "_And", ",", "*", "predicates", ")" ]
`And` predicate. Returns ``False`` at the first sub-predicate that returns ``False``.
[ "And", "predicate", ".", "Returns", "False", "at", "the", "first", "sub", "-", "predicate", "that", "returns", "False", "." ]
python
train
oscarlazoarjona/fast
fast/angular_momentum.py
https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/fast/angular_momentum.py#L510-L538
def wigner_d(J, alpha, beta, gamma): u"""Return the Wigner D matrix for angular momentum J. We use the general formula from [Edmonds74]_, equation 4.1.12. The simplest possible example: >>> from sympy import Integer, symbols, pprint >>> half = 1/Integer(2) >>> alpha, beta, gamma = symbols("alpha, beta, gamma", real=True) >>> pprint(wigner_d(half, alpha, beta, gamma), use_unicode=True) ⎡ ⅈ⋅α ⅈ⋅γ ⅈ⋅α -ⅈ⋅γ ⎤ ⎢ ─── ─── ─── ───── ⎥ ⎢ 2 2 ⎛β⎞ 2 2 ⎛β⎞ ⎥ ⎢ ℯ ⋅ℯ ⋅cos⎜─⎟ ℯ ⋅ℯ ⋅sin⎜─⎟ ⎥ ⎢ ⎝2⎠ ⎝2⎠ ⎥ ⎢ ⎥ ⎢ -ⅈ⋅α ⅈ⋅γ -ⅈ⋅α -ⅈ⋅γ ⎥ ⎢ ───── ─── ───── ───── ⎥ ⎢ 2 2 ⎛β⎞ 2 2 ⎛β⎞⎥ ⎢-ℯ ⋅ℯ ⋅sin⎜─⎟ ℯ ⋅ℯ ⋅cos⎜─⎟⎥ ⎣ ⎝2⎠ ⎝2⎠⎦ """ d = wigner_d_small(J, beta) M = [J-i for i in range(2*J+1)] D = [[exp(I*Mi*alpha)*d[i, j]*exp(I*Mj*gamma) for j, Mj in enumerate(M)] for i, Mi in enumerate(M)] return Matrix(D)
[ "def", "wigner_d", "(", "J", ",", "alpha", ",", "beta", ",", "gamma", ")", ":", "d", "=", "wigner_d_small", "(", "J", ",", "beta", ")", "M", "=", "[", "J", "-", "i", "for", "i", "in", "range", "(", "2", "*", "J", "+", "1", ")", "]", "D", "=", "[", "[", "exp", "(", "I", "*", "Mi", "*", "alpha", ")", "*", "d", "[", "i", ",", "j", "]", "*", "exp", "(", "I", "*", "Mj", "*", "gamma", ")", "for", "j", ",", "Mj", "in", "enumerate", "(", "M", ")", "]", "for", "i", ",", "Mi", "in", "enumerate", "(", "M", ")", "]", "return", "Matrix", "(", "D", ")" ]
u"""Return the Wigner D matrix for angular momentum J. We use the general formula from [Edmonds74]_, equation 4.1.12. The simplest possible example: >>> from sympy import Integer, symbols, pprint >>> half = 1/Integer(2) >>> alpha, beta, gamma = symbols("alpha, beta, gamma", real=True) >>> pprint(wigner_d(half, alpha, beta, gamma), use_unicode=True) ⎡ ⅈ⋅α ⅈ⋅γ ⅈ⋅α -ⅈ⋅γ ⎤ ⎢ ─── ─── ─── ───── ⎥ ⎢ 2 2 ⎛β⎞ 2 2 ⎛β⎞ ⎥ ⎢ ℯ ⋅ℯ ⋅cos⎜─⎟ ℯ ⋅ℯ ⋅sin⎜─⎟ ⎥ ⎢ ⎝2⎠ ⎝2⎠ ⎥ ⎢ ⎥ ⎢ -ⅈ⋅α ⅈ⋅γ -ⅈ⋅α -ⅈ⋅γ ⎥ ⎢ ───── ─── ───── ───── ⎥ ⎢ 2 2 ⎛β⎞ 2 2 ⎛β⎞⎥ ⎢-ℯ ⋅ℯ ⋅sin⎜─⎟ ℯ ⋅ℯ ⋅cos⎜─⎟⎥ ⎣ ⎝2⎠ ⎝2⎠⎦
[ "u", "Return", "the", "Wigner", "D", "matrix", "for", "angular", "momentum", "J", "." ]
python
train
theislab/scanpy
scanpy/plotting/_utils.py
https://github.com/theislab/scanpy/blob/9e4e5ee02e04cf618872d9b098e24f0542e8b227/scanpy/plotting/_utils.py#L51-L111
def timeseries_subplot(X, time=None, color=None, var_names=(), highlightsX=(), xlabel='', ylabel='gene expression', yticks=None, xlim=None, legend=True, palette=None, color_map='viridis'): """Plot X. Parameters ---------- X : np.ndarray Call this with: X with one column, color categorical. X with one column, color continuous. X with n columns, color is of length n. """ if color is not None: use_color_map = isinstance(color[0], float) or isinstance(color[0], np.float32) palette = default_palette(palette) x_range = np.arange(X.shape[0]) if time is None else time if X.ndim == 1: X = X[:, None] if X.shape[1] > 1: colors = palette[:X.shape[1]].by_key()['color'] subsets = [(x_range, X[:, i]) for i in range(X.shape[1])] elif use_color_map: colors = [color] subsets = [(x_range, X[:, 0])] else: levels, _ = np.unique(color, return_inverse=True) colors = np.array(palette[:len(levels)].by_key()['color']) subsets = [(x_range[color == l], X[color == l, :]) for l in levels] for i, (x, y) in enumerate(subsets): pl.scatter( x, y, marker='.', edgecolor='face', s=rcParams['lines.markersize'], c=colors[i], label=var_names[i] if len(var_names) > 0 else '', cmap=color_map, rasterized=settings._vector_friendly) ylim = pl.ylim() for ih, h in enumerate(highlightsX): pl.plot([h, h], [ylim[0], ylim[1]], '--', color='black') pl.ylim(ylim) if xlim is not None: pl.xlim(xlim) pl.xlabel(xlabel) pl.ylabel(ylabel) if yticks is not None: pl.yticks(yticks) if len(var_names) > 0 and legend: pl.legend(frameon=False)
[ "def", "timeseries_subplot", "(", "X", ",", "time", "=", "None", ",", "color", "=", "None", ",", "var_names", "=", "(", ")", ",", "highlightsX", "=", "(", ")", ",", "xlabel", "=", "''", ",", "ylabel", "=", "'gene expression'", ",", "yticks", "=", "None", ",", "xlim", "=", "None", ",", "legend", "=", "True", ",", "palette", "=", "None", ",", "color_map", "=", "'viridis'", ")", ":", "if", "color", "is", "not", "None", ":", "use_color_map", "=", "isinstance", "(", "color", "[", "0", "]", ",", "float", ")", "or", "isinstance", "(", "color", "[", "0", "]", ",", "np", ".", "float32", ")", "palette", "=", "default_palette", "(", "palette", ")", "x_range", "=", "np", ".", "arange", "(", "X", ".", "shape", "[", "0", "]", ")", "if", "time", "is", "None", "else", "time", "if", "X", ".", "ndim", "==", "1", ":", "X", "=", "X", "[", ":", ",", "None", "]", "if", "X", ".", "shape", "[", "1", "]", ">", "1", ":", "colors", "=", "palette", "[", ":", "X", ".", "shape", "[", "1", "]", "]", ".", "by_key", "(", ")", "[", "'color'", "]", "subsets", "=", "[", "(", "x_range", ",", "X", "[", ":", ",", "i", "]", ")", "for", "i", "in", "range", "(", "X", ".", "shape", "[", "1", "]", ")", "]", "elif", "use_color_map", ":", "colors", "=", "[", "color", "]", "subsets", "=", "[", "(", "x_range", ",", "X", "[", ":", ",", "0", "]", ")", "]", "else", ":", "levels", ",", "_", "=", "np", ".", "unique", "(", "color", ",", "return_inverse", "=", "True", ")", "colors", "=", "np", ".", "array", "(", "palette", "[", ":", "len", "(", "levels", ")", "]", ".", "by_key", "(", ")", "[", "'color'", "]", ")", "subsets", "=", "[", "(", "x_range", "[", "color", "==", "l", "]", ",", "X", "[", "color", "==", "l", ",", ":", "]", ")", "for", "l", "in", "levels", "]", "for", "i", ",", "(", "x", ",", "y", ")", "in", "enumerate", "(", "subsets", ")", ":", "pl", ".", "scatter", "(", "x", ",", "y", ",", "marker", "=", "'.'", ",", "edgecolor", "=", "'face'", ",", "s", "=", "rcParams", "[", "'lines.markersize'", "]", ",", "c", "=", "colors", "[", "i", "]", ",", "label", "=", "var_names", "[", "i", "]", "if", "len", "(", "var_names", ")", ">", "0", "else", "''", ",", "cmap", "=", "color_map", ",", "rasterized", "=", "settings", ".", "_vector_friendly", ")", "ylim", "=", "pl", ".", "ylim", "(", ")", "for", "ih", ",", "h", "in", "enumerate", "(", "highlightsX", ")", ":", "pl", ".", "plot", "(", "[", "h", ",", "h", "]", ",", "[", "ylim", "[", "0", "]", ",", "ylim", "[", "1", "]", "]", ",", "'--'", ",", "color", "=", "'black'", ")", "pl", ".", "ylim", "(", "ylim", ")", "if", "xlim", "is", "not", "None", ":", "pl", ".", "xlim", "(", "xlim", ")", "pl", ".", "xlabel", "(", "xlabel", ")", "pl", ".", "ylabel", "(", "ylabel", ")", "if", "yticks", "is", "not", "None", ":", "pl", ".", "yticks", "(", "yticks", ")", "if", "len", "(", "var_names", ")", ">", "0", "and", "legend", ":", "pl", ".", "legend", "(", "frameon", "=", "False", ")" ]
Plot X. Parameters ---------- X : np.ndarray Call this with: X with one column, color categorical. X with one column, color continuous. X with n columns, color is of length n.
[ "Plot", "X", "." ]
python
train
bukun/TorCMS
torcms/handlers/link_handler.py
https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/handlers/link_handler.py#L212-L229
def delete_by_id(self, del_id): ''' Delete a link by id. ''' if self.check_post_role()['DELETE']: pass else: return False if self.is_p: if MLink.delete(del_id): output = {'del_link': 1} else: output = {'del_link': 0} return json.dump(output, self) else: is_deleted = MLink.delete(del_id) if is_deleted: self.redirect('/link/list')
[ "def", "delete_by_id", "(", "self", ",", "del_id", ")", ":", "if", "self", ".", "check_post_role", "(", ")", "[", "'DELETE'", "]", ":", "pass", "else", ":", "return", "False", "if", "self", ".", "is_p", ":", "if", "MLink", ".", "delete", "(", "del_id", ")", ":", "output", "=", "{", "'del_link'", ":", "1", "}", "else", ":", "output", "=", "{", "'del_link'", ":", "0", "}", "return", "json", ".", "dump", "(", "output", ",", "self", ")", "else", ":", "is_deleted", "=", "MLink", ".", "delete", "(", "del_id", ")", "if", "is_deleted", ":", "self", ".", "redirect", "(", "'/link/list'", ")" ]
Delete a link by id.
[ "Delete", "a", "link", "by", "id", "." ]
python
train
LudovicRousseau/pyscard
smartcard/pcsc/PCSCReaderGroups.py
https://github.com/LudovicRousseau/pyscard/blob/62e675028086c75656444cc21d563d9f08ebf8e7/smartcard/pcsc/PCSCReaderGroups.py#L43-L56
def getreadergroups(self): """ Returns the list of smartcard reader groups.""" innerreadergroups.getreadergroups(self) hresult, hcontext = SCardEstablishContext(SCARD_SCOPE_USER) if hresult != 0: raise EstablishContextException(hresult) hresult, readers = SCardListReaderGroups(hcontext) if hresult != 0: raise ListReadersException(hresult) hresult = SCardReleaseContext(hcontext) if hresult != 0: raise ReleaseContextException(hresult) return readers
[ "def", "getreadergroups", "(", "self", ")", ":", "innerreadergroups", ".", "getreadergroups", "(", "self", ")", "hresult", ",", "hcontext", "=", "SCardEstablishContext", "(", "SCARD_SCOPE_USER", ")", "if", "hresult", "!=", "0", ":", "raise", "EstablishContextException", "(", "hresult", ")", "hresult", ",", "readers", "=", "SCardListReaderGroups", "(", "hcontext", ")", "if", "hresult", "!=", "0", ":", "raise", "ListReadersException", "(", "hresult", ")", "hresult", "=", "SCardReleaseContext", "(", "hcontext", ")", "if", "hresult", "!=", "0", ":", "raise", "ReleaseContextException", "(", "hresult", ")", "return", "readers" ]
Returns the list of smartcard reader groups.
[ "Returns", "the", "list", "of", "smartcard", "reader", "groups", "." ]
python
train
tjcsl/ion
intranet/apps/events/models.py
https://github.com/tjcsl/ion/blob/5d722b0725d572039bb0929fd5715a4070c82c72/intranet/apps/events/models.py#L36-L45
def visible_to_user(self, user): """Get a list of visible events for a given user (usually request.user). These visible events will be those that either have no groups assigned to them (and are therefore public) or those in which the user is a member. """ return (Event.objects.filter(approved=True).filter(Q(groups__in=user.groups.all()) | Q(groups__isnull=True) | Q(user=user)))
[ "def", "visible_to_user", "(", "self", ",", "user", ")", ":", "return", "(", "Event", ".", "objects", ".", "filter", "(", "approved", "=", "True", ")", ".", "filter", "(", "Q", "(", "groups__in", "=", "user", ".", "groups", ".", "all", "(", ")", ")", "|", "Q", "(", "groups__isnull", "=", "True", ")", "|", "Q", "(", "user", "=", "user", ")", ")", ")" ]
Get a list of visible events for a given user (usually request.user). These visible events will be those that either have no groups assigned to them (and are therefore public) or those in which the user is a member.
[ "Get", "a", "list", "of", "visible", "events", "for", "a", "given", "user", "(", "usually", "request", ".", "user", ")", "." ]
python
train
sernst/cauldron
cauldron/environ/logger.py
https://github.com/sernst/cauldron/blob/4086aec9c038c402ea212c79fe8bd0d27104f9cf/cauldron/environ/logger.py#L14-L27
def add_output_path(path: str = None) -> str: """ Adds the specified path to the output logging paths if it is not already in the listed paths. :param path: The path to add to the logging output paths. If the path is empty or no path is given, the current working directory will be used instead. """ cleaned = paths.clean(path or os.getcwd()) if cleaned not in _logging_paths: _logging_paths.append(cleaned) return cleaned
[ "def", "add_output_path", "(", "path", ":", "str", "=", "None", ")", "->", "str", ":", "cleaned", "=", "paths", ".", "clean", "(", "path", "or", "os", ".", "getcwd", "(", ")", ")", "if", "cleaned", "not", "in", "_logging_paths", ":", "_logging_paths", ".", "append", "(", "cleaned", ")", "return", "cleaned" ]
Adds the specified path to the output logging paths if it is not already in the listed paths. :param path: The path to add to the logging output paths. If the path is empty or no path is given, the current working directory will be used instead.
[ "Adds", "the", "specified", "path", "to", "the", "output", "logging", "paths", "if", "it", "is", "not", "already", "in", "the", "listed", "paths", "." ]
python
train
materialsproject/pymatgen
pymatgen/io/zeopp.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/zeopp.py#L262-L336
def get_voronoi_nodes(structure, rad_dict=None, probe_rad=0.1): """ Analyze the void space in the input structure using voronoi decomposition Calls Zeo++ for Voronoi decomposition. Args: structure: pymatgen.core.structure.Structure rad_dict (optional): Dictionary of radii of elements in structure. If not given, Zeo++ default values are used. Note: Zeo++ uses atomic radii of elements. For ionic structures, pass rad_dict with ionic radii probe_rad (optional): Sampling probe radius in Angstroms. Default is 0.1 A Returns: voronoi nodes as pymatgen.core.structure.Strucutre within the unit cell defined by the lattice of input structure voronoi face centers as pymatgen.core.structure.Strucutre within the unit cell defined by the lattice of input structure """ with ScratchDir('.'): name = "temp_zeo1" zeo_inp_filename = name + ".cssr" ZeoCssr(structure).write_file(zeo_inp_filename) rad_file = None rad_flag = False if rad_dict: rad_file = name + ".rad" rad_flag = True with open(rad_file, 'w+') as fp: for el in rad_dict.keys(): fp.write("{} {}\n".format(el, rad_dict[el].real)) atmnet = AtomNetwork.read_from_CSSR( zeo_inp_filename, rad_flag=rad_flag, rad_file=rad_file) vornet, vor_edge_centers, vor_face_centers = \ atmnet.perform_voronoi_decomposition() vornet.analyze_writeto_XYZ(name, probe_rad, atmnet) voro_out_filename = name + '_voro.xyz' voro_node_mol = ZeoVoronoiXYZ.from_file(voro_out_filename).molecule species = ["X"] * len(voro_node_mol.sites) coords = [] prop = [] for site in voro_node_mol.sites: coords.append(list(site.coords)) prop.append(site.properties['voronoi_radius']) lattice = Lattice.from_lengths_and_angles( structure.lattice.abc, structure.lattice.angles) vor_node_struct = Structure( lattice, species, coords, coords_are_cartesian=True, to_unit_cell=True, site_properties={"voronoi_radius": prop}) # PMG-Zeo c<->a transformation for voronoi face centers rot_face_centers = [(center[1], center[2], center[0]) for center in vor_face_centers] rot_edge_centers = [(center[1], center[2], center[0]) for center in vor_edge_centers] species = ["X"] * len(rot_face_centers) prop = [0.0] * len(rot_face_centers) # Vor radius not evaluated for fc vor_facecenter_struct = Structure( lattice, species, rot_face_centers, coords_are_cartesian=True, to_unit_cell=True, site_properties={"voronoi_radius": prop}) species = ["X"] * len(rot_edge_centers) prop = [0.0] * len(rot_edge_centers) # Vor radius not evaluated for fc vor_edgecenter_struct = Structure( lattice, species, rot_edge_centers, coords_are_cartesian=True, to_unit_cell=True, site_properties={"voronoi_radius": prop}) return vor_node_struct, vor_edgecenter_struct, vor_facecenter_struct
[ "def", "get_voronoi_nodes", "(", "structure", ",", "rad_dict", "=", "None", ",", "probe_rad", "=", "0.1", ")", ":", "with", "ScratchDir", "(", "'.'", ")", ":", "name", "=", "\"temp_zeo1\"", "zeo_inp_filename", "=", "name", "+", "\".cssr\"", "ZeoCssr", "(", "structure", ")", ".", "write_file", "(", "zeo_inp_filename", ")", "rad_file", "=", "None", "rad_flag", "=", "False", "if", "rad_dict", ":", "rad_file", "=", "name", "+", "\".rad\"", "rad_flag", "=", "True", "with", "open", "(", "rad_file", ",", "'w+'", ")", "as", "fp", ":", "for", "el", "in", "rad_dict", ".", "keys", "(", ")", ":", "fp", ".", "write", "(", "\"{} {}\\n\"", ".", "format", "(", "el", ",", "rad_dict", "[", "el", "]", ".", "real", ")", ")", "atmnet", "=", "AtomNetwork", ".", "read_from_CSSR", "(", "zeo_inp_filename", ",", "rad_flag", "=", "rad_flag", ",", "rad_file", "=", "rad_file", ")", "vornet", ",", "vor_edge_centers", ",", "vor_face_centers", "=", "atmnet", ".", "perform_voronoi_decomposition", "(", ")", "vornet", ".", "analyze_writeto_XYZ", "(", "name", ",", "probe_rad", ",", "atmnet", ")", "voro_out_filename", "=", "name", "+", "'_voro.xyz'", "voro_node_mol", "=", "ZeoVoronoiXYZ", ".", "from_file", "(", "voro_out_filename", ")", ".", "molecule", "species", "=", "[", "\"X\"", "]", "*", "len", "(", "voro_node_mol", ".", "sites", ")", "coords", "=", "[", "]", "prop", "=", "[", "]", "for", "site", "in", "voro_node_mol", ".", "sites", ":", "coords", ".", "append", "(", "list", "(", "site", ".", "coords", ")", ")", "prop", ".", "append", "(", "site", ".", "properties", "[", "'voronoi_radius'", "]", ")", "lattice", "=", "Lattice", ".", "from_lengths_and_angles", "(", "structure", ".", "lattice", ".", "abc", ",", "structure", ".", "lattice", ".", "angles", ")", "vor_node_struct", "=", "Structure", "(", "lattice", ",", "species", ",", "coords", ",", "coords_are_cartesian", "=", "True", ",", "to_unit_cell", "=", "True", ",", "site_properties", "=", "{", "\"voronoi_radius\"", ":", "prop", "}", ")", "# PMG-Zeo c<->a transformation for voronoi face centers", "rot_face_centers", "=", "[", "(", "center", "[", "1", "]", ",", "center", "[", "2", "]", ",", "center", "[", "0", "]", ")", "for", "center", "in", "vor_face_centers", "]", "rot_edge_centers", "=", "[", "(", "center", "[", "1", "]", ",", "center", "[", "2", "]", ",", "center", "[", "0", "]", ")", "for", "center", "in", "vor_edge_centers", "]", "species", "=", "[", "\"X\"", "]", "*", "len", "(", "rot_face_centers", ")", "prop", "=", "[", "0.0", "]", "*", "len", "(", "rot_face_centers", ")", "# Vor radius not evaluated for fc", "vor_facecenter_struct", "=", "Structure", "(", "lattice", ",", "species", ",", "rot_face_centers", ",", "coords_are_cartesian", "=", "True", ",", "to_unit_cell", "=", "True", ",", "site_properties", "=", "{", "\"voronoi_radius\"", ":", "prop", "}", ")", "species", "=", "[", "\"X\"", "]", "*", "len", "(", "rot_edge_centers", ")", "prop", "=", "[", "0.0", "]", "*", "len", "(", "rot_edge_centers", ")", "# Vor radius not evaluated for fc", "vor_edgecenter_struct", "=", "Structure", "(", "lattice", ",", "species", ",", "rot_edge_centers", ",", "coords_are_cartesian", "=", "True", ",", "to_unit_cell", "=", "True", ",", "site_properties", "=", "{", "\"voronoi_radius\"", ":", "prop", "}", ")", "return", "vor_node_struct", ",", "vor_edgecenter_struct", ",", "vor_facecenter_struct" ]
Analyze the void space in the input structure using voronoi decomposition Calls Zeo++ for Voronoi decomposition. Args: structure: pymatgen.core.structure.Structure rad_dict (optional): Dictionary of radii of elements in structure. If not given, Zeo++ default values are used. Note: Zeo++ uses atomic radii of elements. For ionic structures, pass rad_dict with ionic radii probe_rad (optional): Sampling probe radius in Angstroms. Default is 0.1 A Returns: voronoi nodes as pymatgen.core.structure.Strucutre within the unit cell defined by the lattice of input structure voronoi face centers as pymatgen.core.structure.Strucutre within the unit cell defined by the lattice of input structure
[ "Analyze", "the", "void", "space", "in", "the", "input", "structure", "using", "voronoi", "decomposition", "Calls", "Zeo", "++", "for", "Voronoi", "decomposition", "." ]
python
train
jbasko/configmanager
configmanager/items.py
https://github.com/jbasko/configmanager/blob/1d7229ce367143c7210d8e5f0782de03945a1721/configmanager/items.py#L283-L295
def is_default(self): """ ``True`` if the item's value is its default value or if no value and no default value are set. If the item is backed by an environment variable, this will be ``True`` only if the environment variable is set and is different to the default value of the item. """ envvar_value = self._get_envvar_value() if envvar_value is not not_set: return envvar_value == self.default else: return self._value is not_set or self._value == self.default
[ "def", "is_default", "(", "self", ")", ":", "envvar_value", "=", "self", ".", "_get_envvar_value", "(", ")", "if", "envvar_value", "is", "not", "not_set", ":", "return", "envvar_value", "==", "self", ".", "default", "else", ":", "return", "self", ".", "_value", "is", "not_set", "or", "self", ".", "_value", "==", "self", ".", "default" ]
``True`` if the item's value is its default value or if no value and no default value are set. If the item is backed by an environment variable, this will be ``True`` only if the environment variable is set and is different to the default value of the item.
[ "True", "if", "the", "item", "s", "value", "is", "its", "default", "value", "or", "if", "no", "value", "and", "no", "default", "value", "are", "set", "." ]
python
train
monarch-initiative/dipper
dipper/sources/KEGG.py
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/KEGG.py#L354-L423
def _process_ortholog_classes(self, limit=None): """ This method add the KEGG orthology classes to the graph. If there's an embedded enzyme commission number, that is added as an xref. Triples created: <orthology_class_id> is a class <orthology_class_id> has label <orthology_symbols> <orthology_class_id> has description <orthology_description> :param limit: :return: """ LOG.info("Processing ortholog classes") if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) line_counter = 0 raw = '/'.join((self.rawdir, self.files['ortholog_classes']['file'])) with open(raw, 'r', encoding="iso-8859-1") as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') for row in filereader: line_counter += 1 (orthology_class_id, orthology_class_name) = row if self.test_mode and orthology_class_id \ not in self.test_ids['orthology_classes']: continue # The orthology class is essentially a KEGG gene ID # that is species agnostic. # Add the ID and label as a gene family class other_labels = re.split(r'[;,]', orthology_class_name) # the first one is the label we'll use orthology_label = other_labels[0] orthology_class_id = 'KEGG-'+orthology_class_id.strip() orthology_type = self.globaltt['gene_family'] model.addClassToGraph( orthology_class_id, orthology_label, orthology_type) if len(other_labels) > 1: # add the rest as synonyms # todo skip the first for s in other_labels: model.addSynonym(orthology_class_id, s.strip()) # add the last one as the description d = other_labels[len(other_labels)-1] model.addDescription(orthology_class_id, d) # add the enzyme commission number (EC:1.2.99.5)as an xref # sometimes there's two, like [EC:1.3.5.1 1.3.5.4] # can also have a dash, like EC:1.10.3.- ec_matches = re.findall(r'((?:\d+|\.|-){5,7})', d) if ec_matches is not None: for ecm in ec_matches: model.addXref(orthology_class_id, 'EC:' + ecm) if not self.test_mode and limit is not None and line_counter > limit: break LOG.info("Done with ortholog classes") return
[ "def", "_process_ortholog_classes", "(", "self", ",", "limit", "=", "None", ")", ":", "LOG", ".", "info", "(", "\"Processing ortholog classes\"", ")", "if", "self", ".", "test_mode", ":", "graph", "=", "self", ".", "testgraph", "else", ":", "graph", "=", "self", ".", "graph", "model", "=", "Model", "(", "graph", ")", "line_counter", "=", "0", "raw", "=", "'/'", ".", "join", "(", "(", "self", ".", "rawdir", ",", "self", ".", "files", "[", "'ortholog_classes'", "]", "[", "'file'", "]", ")", ")", "with", "open", "(", "raw", ",", "'r'", ",", "encoding", "=", "\"iso-8859-1\"", ")", "as", "csvfile", ":", "filereader", "=", "csv", ".", "reader", "(", "csvfile", ",", "delimiter", "=", "'\\t'", ",", "quotechar", "=", "'\\\"'", ")", "for", "row", "in", "filereader", ":", "line_counter", "+=", "1", "(", "orthology_class_id", ",", "orthology_class_name", ")", "=", "row", "if", "self", ".", "test_mode", "and", "orthology_class_id", "not", "in", "self", ".", "test_ids", "[", "'orthology_classes'", "]", ":", "continue", "# The orthology class is essentially a KEGG gene ID", "# that is species agnostic.", "# Add the ID and label as a gene family class", "other_labels", "=", "re", ".", "split", "(", "r'[;,]'", ",", "orthology_class_name", ")", "# the first one is the label we'll use", "orthology_label", "=", "other_labels", "[", "0", "]", "orthology_class_id", "=", "'KEGG-'", "+", "orthology_class_id", ".", "strip", "(", ")", "orthology_type", "=", "self", ".", "globaltt", "[", "'gene_family'", "]", "model", ".", "addClassToGraph", "(", "orthology_class_id", ",", "orthology_label", ",", "orthology_type", ")", "if", "len", "(", "other_labels", ")", ">", "1", ":", "# add the rest as synonyms", "# todo skip the first", "for", "s", "in", "other_labels", ":", "model", ".", "addSynonym", "(", "orthology_class_id", ",", "s", ".", "strip", "(", ")", ")", "# add the last one as the description", "d", "=", "other_labels", "[", "len", "(", "other_labels", ")", "-", "1", "]", "model", ".", "addDescription", "(", "orthology_class_id", ",", "d", ")", "# add the enzyme commission number (EC:1.2.99.5)as an xref", "# sometimes there's two, like [EC:1.3.5.1 1.3.5.4]", "# can also have a dash, like EC:1.10.3.-", "ec_matches", "=", "re", ".", "findall", "(", "r'((?:\\d+|\\.|-){5,7})'", ",", "d", ")", "if", "ec_matches", "is", "not", "None", ":", "for", "ecm", "in", "ec_matches", ":", "model", ".", "addXref", "(", "orthology_class_id", ",", "'EC:'", "+", "ecm", ")", "if", "not", "self", ".", "test_mode", "and", "limit", "is", "not", "None", "and", "line_counter", ">", "limit", ":", "break", "LOG", ".", "info", "(", "\"Done with ortholog classes\"", ")", "return" ]
This method add the KEGG orthology classes to the graph. If there's an embedded enzyme commission number, that is added as an xref. Triples created: <orthology_class_id> is a class <orthology_class_id> has label <orthology_symbols> <orthology_class_id> has description <orthology_description> :param limit: :return:
[ "This", "method", "add", "the", "KEGG", "orthology", "classes", "to", "the", "graph", "." ]
python
train
worldcompany/djangoembed
oembed/fields.py
https://github.com/worldcompany/djangoembed/blob/f3f2be283441d91d1f89db780444dc75f7b51902/oembed/fields.py#L66-L75
def register_field(cls, field): """ Handles registering the fields with the FieldRegistry and creating a post-save signal for the model. """ FieldRegistry.add_field(cls, field) signals.post_save.connect(handle_save_embeds, sender=cls, dispatch_uid='%s.%s.%s' % \ (cls._meta.app_label, cls._meta.module_name, field.name))
[ "def", "register_field", "(", "cls", ",", "field", ")", ":", "FieldRegistry", ".", "add_field", "(", "cls", ",", "field", ")", "signals", ".", "post_save", ".", "connect", "(", "handle_save_embeds", ",", "sender", "=", "cls", ",", "dispatch_uid", "=", "'%s.%s.%s'", "%", "(", "cls", ".", "_meta", ".", "app_label", ",", "cls", ".", "_meta", ".", "module_name", ",", "field", ".", "name", ")", ")" ]
Handles registering the fields with the FieldRegistry and creating a post-save signal for the model.
[ "Handles", "registering", "the", "fields", "with", "the", "FieldRegistry", "and", "creating", "a", "post", "-", "save", "signal", "for", "the", "model", "." ]
python
valid
google/apitools
apitools/base/protorpclite/protojson.py
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/protorpclite/protojson.py#L195-L216
def decode_message(self, message_type, encoded_message): """Merge JSON structure to Message instance. Args: message_type: Message to decode data to. encoded_message: JSON encoded version of message. Returns: Decoded instance of message_type. Raises: ValueError: If encoded_message is not valid JSON. messages.ValidationError if merged message is not initialized. """ encoded_message = six.ensure_str(encoded_message) if not encoded_message.strip(): return message_type() dictionary = json.loads(encoded_message) message = self.__decode_dictionary(message_type, dictionary) message.check_initialized() return message
[ "def", "decode_message", "(", "self", ",", "message_type", ",", "encoded_message", ")", ":", "encoded_message", "=", "six", ".", "ensure_str", "(", "encoded_message", ")", "if", "not", "encoded_message", ".", "strip", "(", ")", ":", "return", "message_type", "(", ")", "dictionary", "=", "json", ".", "loads", "(", "encoded_message", ")", "message", "=", "self", ".", "__decode_dictionary", "(", "message_type", ",", "dictionary", ")", "message", ".", "check_initialized", "(", ")", "return", "message" ]
Merge JSON structure to Message instance. Args: message_type: Message to decode data to. encoded_message: JSON encoded version of message. Returns: Decoded instance of message_type. Raises: ValueError: If encoded_message is not valid JSON. messages.ValidationError if merged message is not initialized.
[ "Merge", "JSON", "structure", "to", "Message", "instance", "." ]
python
train
LordGaav/python-chaos
chaos/arguments.py
https://github.com/LordGaav/python-chaos/blob/52cd29a6fd15693ee1e53786b93bcb23fbf84ddd/chaos/arguments.py#L82-L119
def get_default_config_file(argparser, suppress=None, default_override=None): """ Turn an ArgumentParser into a ConfigObj compatible configuration file. This method will take the given argparser, and loop over all options contained. The configuration file is formatted as follows: # <option help info> <option destination variable>=<option default value> Arguments --------- argparser: ArgumentParser suppress: list of strings All options specified will be suppressed from the config file. Useful to avoid adding stuff like version or help. default_override: dict This method will use the defaults from the given ArgumentParser, unless the option is specified here. If specified, the default from this dict will be used instead. The format is { "option": <new default value>, ... } . """ if not suppress: suppress = [] if not default_override: default_override = {} lines = [] seen_arguments = [] for arg in argparser._actions: if arg.dest in suppress: continue if arg.dest in seen_arguments: continue default = arg.default if arg.dest in default_override.keys(): default = default_override[arg.dest] lines.append("# {0}\n{1}={2}\n".format(arg.help, arg.dest, default)) seen_arguments.append(arg.dest) return "".join(lines)
[ "def", "get_default_config_file", "(", "argparser", ",", "suppress", "=", "None", ",", "default_override", "=", "None", ")", ":", "if", "not", "suppress", ":", "suppress", "=", "[", "]", "if", "not", "default_override", ":", "default_override", "=", "{", "}", "lines", "=", "[", "]", "seen_arguments", "=", "[", "]", "for", "arg", "in", "argparser", ".", "_actions", ":", "if", "arg", ".", "dest", "in", "suppress", ":", "continue", "if", "arg", ".", "dest", "in", "seen_arguments", ":", "continue", "default", "=", "arg", ".", "default", "if", "arg", ".", "dest", "in", "default_override", ".", "keys", "(", ")", ":", "default", "=", "default_override", "[", "arg", ".", "dest", "]", "lines", ".", "append", "(", "\"# {0}\\n{1}={2}\\n\"", ".", "format", "(", "arg", ".", "help", ",", "arg", ".", "dest", ",", "default", ")", ")", "seen_arguments", ".", "append", "(", "arg", ".", "dest", ")", "return", "\"\"", ".", "join", "(", "lines", ")" ]
Turn an ArgumentParser into a ConfigObj compatible configuration file. This method will take the given argparser, and loop over all options contained. The configuration file is formatted as follows: # <option help info> <option destination variable>=<option default value> Arguments --------- argparser: ArgumentParser suppress: list of strings All options specified will be suppressed from the config file. Useful to avoid adding stuff like version or help. default_override: dict This method will use the defaults from the given ArgumentParser, unless the option is specified here. If specified, the default from this dict will be used instead. The format is { "option": <new default value>, ... } .
[ "Turn", "an", "ArgumentParser", "into", "a", "ConfigObj", "compatible", "configuration", "file", "." ]
python
train
ivilata/pymultihash
multihash/funcs.py
https://github.com/ivilata/pymultihash/blob/093365f20f6d8627c1fae13e0f4e0b35e9b39ad2/multihash/funcs.py#L247-L259
def hash_from_func(cls, func): """Return a hashlib-compatible object for the multihash `func`. If the `func` is registered but no hashlib-compatible constructor is available for it, `None` is returned. If the `func` is not registered, a `KeyError` is raised. >>> h = FuncReg.hash_from_func(Func.sha2_256) >>> h.name 'sha256' """ new = cls._func_hash[func].new return new() if new else None
[ "def", "hash_from_func", "(", "cls", ",", "func", ")", ":", "new", "=", "cls", ".", "_func_hash", "[", "func", "]", ".", "new", "return", "new", "(", ")", "if", "new", "else", "None" ]
Return a hashlib-compatible object for the multihash `func`. If the `func` is registered but no hashlib-compatible constructor is available for it, `None` is returned. If the `func` is not registered, a `KeyError` is raised. >>> h = FuncReg.hash_from_func(Func.sha2_256) >>> h.name 'sha256'
[ "Return", "a", "hashlib", "-", "compatible", "object", "for", "the", "multihash", "func", "." ]
python
train
StorjOld/pyp2p
pyp2p/unl.py
https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/unl.py#L239-L373
def connect_handler(self, their_unl, events, force_master, hairpin, nonce): # Figure out who should make the connection. our_unl = self.value.encode("ascii") their_unl = their_unl.encode("ascii") master = self.is_master(their_unl) """ Master defines who connects if either side can. It's used to eliminate having multiple connections with the same host. """ if force_master: master = 1 # Deconstruct binary UNLs into dicts. our_unl = self.deconstruct(our_unl) their_unl = self.deconstruct(their_unl) if our_unl is None: raise Exception("Unable to deconstruct our UNL.") if their_unl is None: raise Exception("Unable to deconstruct their UNL.") # This means the nodes are behind the same router. if our_unl["wan_ip"] == their_unl["wan_ip"]: # Connect to LAN IP. our_unl["wan_ip"] = our_unl["lan_ip"] their_unl["wan_ip"] = their_unl["lan_ip"] # Already behind NAT so no forwarding needed. if hairpin: our_unl["node_type"] = "passive" their_unl["node_type"] = "passive" # Generate con ID. if nonce != "0" * 64: # Convert nonce to bytes. if sys.version_info >= (3, 0, 0): if type(nonce) == str: nonce.encode("ascii") else: if type(nonce) == unicode: nonce = str(nonce) # Check nonce length. assert(len(nonce) == 64) # Create con ID. con_id = self.net.generate_con_id( nonce, our_unl["wan_ip"], their_unl["wan_ip"] ) else: con_id = None # Acquire mutex. self.mutex.acquire() # Wait for other UNLs to finish. end_time = time.time() end_time += len(self.pending_unls) * 60 self.debug_print("Waiting for other unls to finish") while their_unl in self.pending_unls and time.time() < end_time: # This is an undifferentiated duplicate. if events is None: self.mutex.release() return time.sleep(1) self.debug_print("Other unl finished") is_exception = 0 try: # Wait for any other hole punches to finish. if (their_unl["node_type"] == "simultaneous" and our_unl["node_type"] != "passive"): self.pending_sim_open.append(their_unl["value"]) end_time = time.time() end_time += len(self.pending_unls) * 60 self.debug_print("wait for other hole punches to finish") while len(self.pending_sim_open) and time.time() < end_time: if self.pending_sim_open[0] == their_unl["value"]: break time.sleep(1) self.debug_print("other hole punches finished") # Set pending UNL. self.pending_unls.append(their_unl) # Release mutex. self.mutex.release() # Get connection. con = self.get_connection( our_unl, their_unl, master, nonce, force_master, con_id ) except Exception as e: is_exception = 1 print(e) print("EXCEPTION IN UNL.GET_CONNECTION") log_exception("error.log", parse_exception(e)) finally: # Release mutex. if self.mutex.locked() and is_exception: self.mutex.release() # Undo pending connect state. if their_unl in self.pending_unls: self.pending_unls.remove(their_unl) # Undo pending sim open. if len(self.pending_sim_open): if self.pending_sim_open[0] == their_unl["value"]: self.pending_sim_open = self.pending_sim_open[1:] # Only execute events if this function was called manually. if events is not None: # Success. if con is not None: if "success" in events: events["success"](con) # Failure. if con is None: if "failure" in events: events["failure"](con)
[ "def", "connect_handler", "(", "self", ",", "their_unl", ",", "events", ",", "force_master", ",", "hairpin", ",", "nonce", ")", ":", "# Figure out who should make the connection.\r", "our_unl", "=", "self", ".", "value", ".", "encode", "(", "\"ascii\"", ")", "their_unl", "=", "their_unl", ".", "encode", "(", "\"ascii\"", ")", "master", "=", "self", ".", "is_master", "(", "their_unl", ")", "if", "force_master", ":", "master", "=", "1", "# Deconstruct binary UNLs into dicts.\r", "our_unl", "=", "self", ".", "deconstruct", "(", "our_unl", ")", "their_unl", "=", "self", ".", "deconstruct", "(", "their_unl", ")", "if", "our_unl", "is", "None", ":", "raise", "Exception", "(", "\"Unable to deconstruct our UNL.\"", ")", "if", "their_unl", "is", "None", ":", "raise", "Exception", "(", "\"Unable to deconstruct their UNL.\"", ")", "# This means the nodes are behind the same router.\r", "if", "our_unl", "[", "\"wan_ip\"", "]", "==", "their_unl", "[", "\"wan_ip\"", "]", ":", "# Connect to LAN IP.\r", "our_unl", "[", "\"wan_ip\"", "]", "=", "our_unl", "[", "\"lan_ip\"", "]", "their_unl", "[", "\"wan_ip\"", "]", "=", "their_unl", "[", "\"lan_ip\"", "]", "# Already behind NAT so no forwarding needed.\r", "if", "hairpin", ":", "our_unl", "[", "\"node_type\"", "]", "=", "\"passive\"", "their_unl", "[", "\"node_type\"", "]", "=", "\"passive\"", "# Generate con ID.\r", "if", "nonce", "!=", "\"0\"", "*", "64", ":", "# Convert nonce to bytes.\r", "if", "sys", ".", "version_info", ">=", "(", "3", ",", "0", ",", "0", ")", ":", "if", "type", "(", "nonce", ")", "==", "str", ":", "nonce", ".", "encode", "(", "\"ascii\"", ")", "else", ":", "if", "type", "(", "nonce", ")", "==", "unicode", ":", "nonce", "=", "str", "(", "nonce", ")", "# Check nonce length.\r", "assert", "(", "len", "(", "nonce", ")", "==", "64", ")", "# Create con ID.\r", "con_id", "=", "self", ".", "net", ".", "generate_con_id", "(", "nonce", ",", "our_unl", "[", "\"wan_ip\"", "]", ",", "their_unl", "[", "\"wan_ip\"", "]", ")", "else", ":", "con_id", "=", "None", "# Acquire mutex.\r", "self", ".", "mutex", ".", "acquire", "(", ")", "# Wait for other UNLs to finish.\r", "end_time", "=", "time", ".", "time", "(", ")", "end_time", "+=", "len", "(", "self", ".", "pending_unls", ")", "*", "60", "self", ".", "debug_print", "(", "\"Waiting for other unls to finish\"", ")", "while", "their_unl", "in", "self", ".", "pending_unls", "and", "time", ".", "time", "(", ")", "<", "end_time", ":", "# This is an undifferentiated duplicate.\r", "if", "events", "is", "None", ":", "self", ".", "mutex", ".", "release", "(", ")", "return", "time", ".", "sleep", "(", "1", ")", "self", ".", "debug_print", "(", "\"Other unl finished\"", ")", "is_exception", "=", "0", "try", ":", "# Wait for any other hole punches to finish.\r", "if", "(", "their_unl", "[", "\"node_type\"", "]", "==", "\"simultaneous\"", "and", "our_unl", "[", "\"node_type\"", "]", "!=", "\"passive\"", ")", ":", "self", ".", "pending_sim_open", ".", "append", "(", "their_unl", "[", "\"value\"", "]", ")", "end_time", "=", "time", ".", "time", "(", ")", "end_time", "+=", "len", "(", "self", ".", "pending_unls", ")", "*", "60", "self", ".", "debug_print", "(", "\"wait for other hole punches to finish\"", ")", "while", "len", "(", "self", ".", "pending_sim_open", ")", "and", "time", ".", "time", "(", ")", "<", "end_time", ":", "if", "self", ".", "pending_sim_open", "[", "0", "]", "==", "their_unl", "[", "\"value\"", "]", ":", "break", "time", ".", "sleep", "(", "1", ")", "self", ".", "debug_print", "(", "\"other hole punches finished\"", ")", "# Set pending UNL.\r", "self", ".", "pending_unls", ".", "append", "(", "their_unl", ")", "# Release mutex.\r", "self", ".", "mutex", ".", "release", "(", ")", "# Get connection.\r", "con", "=", "self", ".", "get_connection", "(", "our_unl", ",", "their_unl", ",", "master", ",", "nonce", ",", "force_master", ",", "con_id", ")", "except", "Exception", "as", "e", ":", "is_exception", "=", "1", "print", "(", "e", ")", "print", "(", "\"EXCEPTION IN UNL.GET_CONNECTION\"", ")", "log_exception", "(", "\"error.log\"", ",", "parse_exception", "(", "e", ")", ")", "finally", ":", "# Release mutex.\r", "if", "self", ".", "mutex", ".", "locked", "(", ")", "and", "is_exception", ":", "self", ".", "mutex", ".", "release", "(", ")", "# Undo pending connect state.\r", "if", "their_unl", "in", "self", ".", "pending_unls", ":", "self", ".", "pending_unls", ".", "remove", "(", "their_unl", ")", "# Undo pending sim open.\r", "if", "len", "(", "self", ".", "pending_sim_open", ")", ":", "if", "self", ".", "pending_sim_open", "[", "0", "]", "==", "their_unl", "[", "\"value\"", "]", ":", "self", ".", "pending_sim_open", "=", "self", ".", "pending_sim_open", "[", "1", ":", "]", "# Only execute events if this function was called manually.\r", "if", "events", "is", "not", "None", ":", "# Success.\r", "if", "con", "is", "not", "None", ":", "if", "\"success\"", "in", "events", ":", "events", "[", "\"success\"", "]", "(", "con", ")", "# Failure.\r", "if", "con", "is", "None", ":", "if", "\"failure\"", "in", "events", ":", "events", "[", "\"failure\"", "]", "(", "con", ")" ]
Master defines who connects if either side can. It's used to eliminate having multiple connections with the same host.
[ "Master", "defines", "who", "connects", "if", "either", "side", "can", ".", "It", "s", "used", "to", "eliminate", "having", "multiple", "connections", "with", "the", "same", "host", "." ]
python
train
collectiveacuity/labPack
labpack/platforms/aws/ec2.py
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/platforms/aws/ec2.py#L1239-L1298
def delete_image(self, image_id): ''' method for removing an image from AWS EC2 :param image_id: string with AWS id of instance :return: string with AWS response from snapshot delete ''' title = '%s.delete_image' % self.__class__.__name__ # validate inputs input_fields = { 'image_id': image_id } for key, value in input_fields.items(): object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # report query self.iam.printer('Removing image %s from AWS region %s.' % (image_id, self.iam.region_name)) # retrieve state old_state = self.check_image_state(image_id) # discover snapshot id and tags associated with instance id image_details = self.read_image(image_id) tag_list = image_details['tags'] snapshot_id = image_details['snapshot_id'] # remove tags from instance try: delete_kwargs = { 'Resources': [ image_id ], 'Tags': self.iam.prepare(tag_list) } self.connection.delete_tags(**delete_kwargs) self.iam.printer('Tags have been deleted from %s.' % image_id) except: raise AWSConnectionError(title) # deregister image try: self.connection.deregister_image( ImageId=image_id ) except: raise AWSConnectionError(title) self.iam.printer('Image %s has been deregistered.' % image_id) # delete snapshot try: response = self.connection.delete_snapshot( SnapshotId=snapshot_id ) except: raise AWSConnectionError(title) self.iam.printer('Snapshot %s associated with image %s has been deleted.' % (snapshot_id, image_id)) return response
[ "def", "delete_image", "(", "self", ",", "image_id", ")", ":", "title", "=", "'%s.delete_image'", "%", "self", ".", "__class__", ".", "__name__", "# validate inputs", "input_fields", "=", "{", "'image_id'", ":", "image_id", "}", "for", "key", ",", "value", "in", "input_fields", ".", "items", "(", ")", ":", "object_title", "=", "'%s(%s=%s)'", "%", "(", "title", ",", "key", ",", "str", "(", "value", ")", ")", "self", ".", "fields", ".", "validate", "(", "value", ",", "'.%s'", "%", "key", ",", "object_title", ")", "# report query", "self", ".", "iam", ".", "printer", "(", "'Removing image %s from AWS region %s.'", "%", "(", "image_id", ",", "self", ".", "iam", ".", "region_name", ")", ")", "# retrieve state", "old_state", "=", "self", ".", "check_image_state", "(", "image_id", ")", "# discover snapshot id and tags associated with instance id", "image_details", "=", "self", ".", "read_image", "(", "image_id", ")", "tag_list", "=", "image_details", "[", "'tags'", "]", "snapshot_id", "=", "image_details", "[", "'snapshot_id'", "]", "# remove tags from instance", "try", ":", "delete_kwargs", "=", "{", "'Resources'", ":", "[", "image_id", "]", ",", "'Tags'", ":", "self", ".", "iam", ".", "prepare", "(", "tag_list", ")", "}", "self", ".", "connection", ".", "delete_tags", "(", "*", "*", "delete_kwargs", ")", "self", ".", "iam", ".", "printer", "(", "'Tags have been deleted from %s.'", "%", "image_id", ")", "except", ":", "raise", "AWSConnectionError", "(", "title", ")", "# deregister image", "try", ":", "self", ".", "connection", ".", "deregister_image", "(", "ImageId", "=", "image_id", ")", "except", ":", "raise", "AWSConnectionError", "(", "title", ")", "self", ".", "iam", ".", "printer", "(", "'Image %s has been deregistered.'", "%", "image_id", ")", "# delete snapshot", "try", ":", "response", "=", "self", ".", "connection", ".", "delete_snapshot", "(", "SnapshotId", "=", "snapshot_id", ")", "except", ":", "raise", "AWSConnectionError", "(", "title", ")", "self", ".", "iam", ".", "printer", "(", "'Snapshot %s associated with image %s has been deleted.'", "%", "(", "snapshot_id", ",", "image_id", ")", ")", "return", "response" ]
method for removing an image from AWS EC2 :param image_id: string with AWS id of instance :return: string with AWS response from snapshot delete
[ "method", "for", "removing", "an", "image", "from", "AWS", "EC2" ]
python
train
pgmpy/pgmpy
pgmpy/models/ClusterGraph.py
https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/models/ClusterGraph.py#L211-L255
def get_cardinality(self, node=None): """ Returns the cardinality of the node Parameters ---------- node: any hashable python object (optional) The node whose cardinality we want. If node is not specified returns a dictionary with the given variable as keys and their respective cardinality as values. Returns ------- int or dict : If node is specified returns the cardinality of the node. If node is not specified returns a dictionary with the given variable as keys and their respective cardinality as values. Examples -------- >>> from pgmpy.models import ClusterGraph >>> from pgmpy.factors.discrete import DiscreteFactor >>> student = ClusterGraph() >>> factor = DiscreteFactor(['Alice', 'Bob'], cardinality=[2, 2], ... values=np.random.rand(4)) >>> student.add_node(('Alice', 'Bob')) >>> student.add_factors(factor) >>> student.get_cardinality() defaultdict(<class 'int'>, {'Bob': 2, 'Alice': 2}) >>> student.get_cardinality(node='Alice') 2 """ if node: for factor in self.factors: for variable, cardinality in zip(factor.scope(), factor.cardinality): if node == variable: return cardinality else: cardinalities = defaultdict(int) for factor in self.factors: for variable, cardinality in zip(factor.scope(), factor.cardinality): cardinalities[variable] = cardinality return cardinalities
[ "def", "get_cardinality", "(", "self", ",", "node", "=", "None", ")", ":", "if", "node", ":", "for", "factor", "in", "self", ".", "factors", ":", "for", "variable", ",", "cardinality", "in", "zip", "(", "factor", ".", "scope", "(", ")", ",", "factor", ".", "cardinality", ")", ":", "if", "node", "==", "variable", ":", "return", "cardinality", "else", ":", "cardinalities", "=", "defaultdict", "(", "int", ")", "for", "factor", "in", "self", ".", "factors", ":", "for", "variable", ",", "cardinality", "in", "zip", "(", "factor", ".", "scope", "(", ")", ",", "factor", ".", "cardinality", ")", ":", "cardinalities", "[", "variable", "]", "=", "cardinality", "return", "cardinalities" ]
Returns the cardinality of the node Parameters ---------- node: any hashable python object (optional) The node whose cardinality we want. If node is not specified returns a dictionary with the given variable as keys and their respective cardinality as values. Returns ------- int or dict : If node is specified returns the cardinality of the node. If node is not specified returns a dictionary with the given variable as keys and their respective cardinality as values. Examples -------- >>> from pgmpy.models import ClusterGraph >>> from pgmpy.factors.discrete import DiscreteFactor >>> student = ClusterGraph() >>> factor = DiscreteFactor(['Alice', 'Bob'], cardinality=[2, 2], ... values=np.random.rand(4)) >>> student.add_node(('Alice', 'Bob')) >>> student.add_factors(factor) >>> student.get_cardinality() defaultdict(<class 'int'>, {'Bob': 2, 'Alice': 2}) >>> student.get_cardinality(node='Alice') 2
[ "Returns", "the", "cardinality", "of", "the", "node" ]
python
train
Pelagicore/qface
qface/idl/domain.py
https://github.com/Pelagicore/qface/blob/7f60e91e3a91a7cb04cfacbc9ce80f43df444853/qface/idl/domain.py#L146-L149
def add_tag(self, tag): """ add a tag to the tag list """ if tag not in self._tags: self._tags[tag] = dict()
[ "def", "add_tag", "(", "self", ",", "tag", ")", ":", "if", "tag", "not", "in", "self", ".", "_tags", ":", "self", ".", "_tags", "[", "tag", "]", "=", "dict", "(", ")" ]
add a tag to the tag list
[ "add", "a", "tag", "to", "the", "tag", "list" ]
python
train
klen/graphite-beacon
graphite_beacon/core.py
https://github.com/klen/graphite-beacon/blob/c1f071e9f557693bc90f6acbc314994985dc3b77/graphite_beacon/core.py#L148-L162
def start(self, start_loop=True): """Start all the things. :param start_loop bool: whether to start the ioloop. should be False if the IOLoop is managed externally """ self.start_alerts() if self.options.get('pidfile'): with open(self.options.get('pidfile'), 'w') as fpid: fpid.write(str(os.getpid())) self.callback.start() LOGGER.info('Reactor starts') if start_loop: self.loop.start()
[ "def", "start", "(", "self", ",", "start_loop", "=", "True", ")", ":", "self", ".", "start_alerts", "(", ")", "if", "self", ".", "options", ".", "get", "(", "'pidfile'", ")", ":", "with", "open", "(", "self", ".", "options", ".", "get", "(", "'pidfile'", ")", ",", "'w'", ")", "as", "fpid", ":", "fpid", ".", "write", "(", "str", "(", "os", ".", "getpid", "(", ")", ")", ")", "self", ".", "callback", ".", "start", "(", ")", "LOGGER", ".", "info", "(", "'Reactor starts'", ")", "if", "start_loop", ":", "self", ".", "loop", ".", "start", "(", ")" ]
Start all the things. :param start_loop bool: whether to start the ioloop. should be False if the IOLoop is managed externally
[ "Start", "all", "the", "things", "." ]
python
train
apache/incubator-mxnet
python/mxnet/ndarray/ndarray.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/ndarray.py#L3705-L3759
def concatenate(arrays, axis=0, always_copy=True): """DEPRECATED, use ``concat`` instead Parameters ---------- arrays : list of `NDArray` Arrays to be concatenate. They must have identical shape except the first dimension. They also must have the same data type. axis : int The axis along which to concatenate. always_copy : bool Default `True`. When not `True`, if the arrays only contain one `NDArray`, that element will be returned directly, avoid copying. Returns ------- NDArray An `NDArray` that lives on the same context as `arrays[0].context`. """ assert isinstance(arrays, list) assert len(arrays) > 0 assert isinstance(arrays[0], NDArray) if not always_copy and len(arrays) == 1: return arrays[0] shape_axis = arrays[0].shape[axis] shape_rest1 = arrays[0].shape[0:axis] shape_rest2 = arrays[0].shape[axis+1:] dtype = arrays[0].dtype for arr in arrays[1:]: shape_axis += arr.shape[axis] assert shape_rest1 == arr.shape[0:axis] assert shape_rest2 == arr.shape[axis+1:] assert dtype == arr.dtype ret_shape = shape_rest1 + (shape_axis,) + shape_rest2 ret = empty(ret_shape, ctx=arrays[0].context, dtype=dtype) idx = 0 begin = [0 for _ in ret_shape] end = list(ret_shape) for arr in arrays: if axis == 0: ret[idx:idx+arr.shape[0]] = arr else: begin[axis] = idx end[axis] = idx+arr.shape[axis] # pylint: disable=no-member,protected-access _internal._crop_assign(ret, arr, out=ret, begin=tuple(begin), end=tuple(end)) # pylint: enable=no-member,protected-access idx += arr.shape[axis] return ret
[ "def", "concatenate", "(", "arrays", ",", "axis", "=", "0", ",", "always_copy", "=", "True", ")", ":", "assert", "isinstance", "(", "arrays", ",", "list", ")", "assert", "len", "(", "arrays", ")", ">", "0", "assert", "isinstance", "(", "arrays", "[", "0", "]", ",", "NDArray", ")", "if", "not", "always_copy", "and", "len", "(", "arrays", ")", "==", "1", ":", "return", "arrays", "[", "0", "]", "shape_axis", "=", "arrays", "[", "0", "]", ".", "shape", "[", "axis", "]", "shape_rest1", "=", "arrays", "[", "0", "]", ".", "shape", "[", "0", ":", "axis", "]", "shape_rest2", "=", "arrays", "[", "0", "]", ".", "shape", "[", "axis", "+", "1", ":", "]", "dtype", "=", "arrays", "[", "0", "]", ".", "dtype", "for", "arr", "in", "arrays", "[", "1", ":", "]", ":", "shape_axis", "+=", "arr", ".", "shape", "[", "axis", "]", "assert", "shape_rest1", "==", "arr", ".", "shape", "[", "0", ":", "axis", "]", "assert", "shape_rest2", "==", "arr", ".", "shape", "[", "axis", "+", "1", ":", "]", "assert", "dtype", "==", "arr", ".", "dtype", "ret_shape", "=", "shape_rest1", "+", "(", "shape_axis", ",", ")", "+", "shape_rest2", "ret", "=", "empty", "(", "ret_shape", ",", "ctx", "=", "arrays", "[", "0", "]", ".", "context", ",", "dtype", "=", "dtype", ")", "idx", "=", "0", "begin", "=", "[", "0", "for", "_", "in", "ret_shape", "]", "end", "=", "list", "(", "ret_shape", ")", "for", "arr", "in", "arrays", ":", "if", "axis", "==", "0", ":", "ret", "[", "idx", ":", "idx", "+", "arr", ".", "shape", "[", "0", "]", "]", "=", "arr", "else", ":", "begin", "[", "axis", "]", "=", "idx", "end", "[", "axis", "]", "=", "idx", "+", "arr", ".", "shape", "[", "axis", "]", "# pylint: disable=no-member,protected-access", "_internal", ".", "_crop_assign", "(", "ret", ",", "arr", ",", "out", "=", "ret", ",", "begin", "=", "tuple", "(", "begin", ")", ",", "end", "=", "tuple", "(", "end", ")", ")", "# pylint: enable=no-member,protected-access", "idx", "+=", "arr", ".", "shape", "[", "axis", "]", "return", "ret" ]
DEPRECATED, use ``concat`` instead Parameters ---------- arrays : list of `NDArray` Arrays to be concatenate. They must have identical shape except the first dimension. They also must have the same data type. axis : int The axis along which to concatenate. always_copy : bool Default `True`. When not `True`, if the arrays only contain one `NDArray`, that element will be returned directly, avoid copying. Returns ------- NDArray An `NDArray` that lives on the same context as `arrays[0].context`.
[ "DEPRECATED", "use", "concat", "instead" ]
python
train
klmitch/turnstile
turnstile/control.py
https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/control.py#L82-L98
def get_limits(self, limit_sum=None): """ Gets the current limit data if it is different from the data indicated by limit_sum. The db argument is used for hydrating the limit objects. Raises a NoChangeException if the limit_sum represents no change, otherwise returns a tuple consisting of the current limit_sum and a list of Limit objects. """ with self.limit_lock: # Any changes? if limit_sum and self.limit_sum == limit_sum: raise NoChangeException() # Return a tuple of the limits and limit sum return (self.limit_sum, self.limit_data)
[ "def", "get_limits", "(", "self", ",", "limit_sum", "=", "None", ")", ":", "with", "self", ".", "limit_lock", ":", "# Any changes?", "if", "limit_sum", "and", "self", ".", "limit_sum", "==", "limit_sum", ":", "raise", "NoChangeException", "(", ")", "# Return a tuple of the limits and limit sum", "return", "(", "self", ".", "limit_sum", ",", "self", ".", "limit_data", ")" ]
Gets the current limit data if it is different from the data indicated by limit_sum. The db argument is used for hydrating the limit objects. Raises a NoChangeException if the limit_sum represents no change, otherwise returns a tuple consisting of the current limit_sum and a list of Limit objects.
[ "Gets", "the", "current", "limit", "data", "if", "it", "is", "different", "from", "the", "data", "indicated", "by", "limit_sum", ".", "The", "db", "argument", "is", "used", "for", "hydrating", "the", "limit", "objects", ".", "Raises", "a", "NoChangeException", "if", "the", "limit_sum", "represents", "no", "change", "otherwise", "returns", "a", "tuple", "consisting", "of", "the", "current", "limit_sum", "and", "a", "list", "of", "Limit", "objects", "." ]
python
train
newville/wxmplot
examples/tifffile.py
https://github.com/newville/wxmplot/blob/8e0dc037453e5cdf18c968dc5a3d29efd761edee/examples/tifffile.py#L1249-L1255
def _fromdata(self, code, dtype, count, value, name=None): """Initialize instance from arguments.""" self.code = int(code) self.name = name if name else str(code) self.dtype = TIFF_DATA_TYPES[dtype] self.count = int(count) self.value = value
[ "def", "_fromdata", "(", "self", ",", "code", ",", "dtype", ",", "count", ",", "value", ",", "name", "=", "None", ")", ":", "self", ".", "code", "=", "int", "(", "code", ")", "self", ".", "name", "=", "name", "if", "name", "else", "str", "(", "code", ")", "self", ".", "dtype", "=", "TIFF_DATA_TYPES", "[", "dtype", "]", "self", ".", "count", "=", "int", "(", "count", ")", "self", ".", "value", "=", "value" ]
Initialize instance from arguments.
[ "Initialize", "instance", "from", "arguments", "." ]
python
train
dpgaspar/Flask-AppBuilder
flask_appbuilder/api/__init__.py
https://github.com/dpgaspar/Flask-AppBuilder/blob/c293734c1b86e176a3ba57ee2deab6676d125576/flask_appbuilder/api/__init__.py#L1496-L1523
def _get_fields_info(self, cols, model_schema, filter_rel_fields, **kwargs): """ Returns a dict with fields detail from a marshmallow schema :param cols: list of columns to show info for :param model_schema: Marshmallow model schema :param filter_rel_fields: expects add_query_rel_fields or edit_query_rel_fields :param kwargs: Receives all rison arguments for pagination :return: dict with all fields details """ ret = list() for col in cols: page = page_size = None col_args = kwargs.get(col, {}) if col_args: page = col_args.get(API_PAGE_INDEX_RIS_KEY, None) page_size = col_args.get(API_PAGE_SIZE_RIS_KEY, None) ret.append( self._get_field_info( model_schema.fields[col], filter_rel_fields.get(col, []), page=page, page_size=page_size, ) ) return ret
[ "def", "_get_fields_info", "(", "self", ",", "cols", ",", "model_schema", ",", "filter_rel_fields", ",", "*", "*", "kwargs", ")", ":", "ret", "=", "list", "(", ")", "for", "col", "in", "cols", ":", "page", "=", "page_size", "=", "None", "col_args", "=", "kwargs", ".", "get", "(", "col", ",", "{", "}", ")", "if", "col_args", ":", "page", "=", "col_args", ".", "get", "(", "API_PAGE_INDEX_RIS_KEY", ",", "None", ")", "page_size", "=", "col_args", ".", "get", "(", "API_PAGE_SIZE_RIS_KEY", ",", "None", ")", "ret", ".", "append", "(", "self", ".", "_get_field_info", "(", "model_schema", ".", "fields", "[", "col", "]", ",", "filter_rel_fields", ".", "get", "(", "col", ",", "[", "]", ")", ",", "page", "=", "page", ",", "page_size", "=", "page_size", ",", ")", ")", "return", "ret" ]
Returns a dict with fields detail from a marshmallow schema :param cols: list of columns to show info for :param model_schema: Marshmallow model schema :param filter_rel_fields: expects add_query_rel_fields or edit_query_rel_fields :param kwargs: Receives all rison arguments for pagination :return: dict with all fields details
[ "Returns", "a", "dict", "with", "fields", "detail", "from", "a", "marshmallow", "schema" ]
python
train
lpantano/seqcluster
seqcluster/make_predictions.py
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/make_predictions.py#L11-L29
def predictions(args): """ Create predictions of clusters """ logger.info(args) logger.info("reading sequeces") out_file = os.path.abspath(os.path.splitext(args.json)[0] + "_prediction.json") data = load_data(args.json) out_dir = os.path.abspath(safe_dirs(os.path.join(args.out, "predictions"))) logger.info("make predictions") data = is_tRNA(data, out_dir, args) if args.coral: logger.info("make CoRaL predictions") run_coral(data, out_dir, args) write_data(data[0], out_file) logger.info("Done")
[ "def", "predictions", "(", "args", ")", ":", "logger", ".", "info", "(", "args", ")", "logger", ".", "info", "(", "\"reading sequeces\"", ")", "out_file", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "splitext", "(", "args", ".", "json", ")", "[", "0", "]", "+", "\"_prediction.json\"", ")", "data", "=", "load_data", "(", "args", ".", "json", ")", "out_dir", "=", "os", ".", "path", ".", "abspath", "(", "safe_dirs", "(", "os", ".", "path", ".", "join", "(", "args", ".", "out", ",", "\"predictions\"", ")", ")", ")", "logger", ".", "info", "(", "\"make predictions\"", ")", "data", "=", "is_tRNA", "(", "data", ",", "out_dir", ",", "args", ")", "if", "args", ".", "coral", ":", "logger", ".", "info", "(", "\"make CoRaL predictions\"", ")", "run_coral", "(", "data", ",", "out_dir", ",", "args", ")", "write_data", "(", "data", "[", "0", "]", ",", "out_file", ")", "logger", ".", "info", "(", "\"Done\"", ")" ]
Create predictions of clusters
[ "Create", "predictions", "of", "clusters" ]
python
train
androguard/androguard
androguard/core/bytecodes/apk.py
https://github.com/androguard/androguard/blob/984c0d981be2950cf0451e484f7b0d4d53bc4911/androguard/core/bytecodes/apk.py#L785-L794
def get_dex_names(self): """ Return the names of all DEX files found in the APK. This method only accounts for "offical" dex files, i.e. all files in the root directory of the APK named classes.dex or classes[0-9]+.dex :rtype: a list of str """ dexre = re.compile(r"classes(\d*).dex") return filter(lambda x: dexre.match(x), self.get_files())
[ "def", "get_dex_names", "(", "self", ")", ":", "dexre", "=", "re", ".", "compile", "(", "r\"classes(\\d*).dex\"", ")", "return", "filter", "(", "lambda", "x", ":", "dexre", ".", "match", "(", "x", ")", ",", "self", ".", "get_files", "(", ")", ")" ]
Return the names of all DEX files found in the APK. This method only accounts for "offical" dex files, i.e. all files in the root directory of the APK named classes.dex or classes[0-9]+.dex :rtype: a list of str
[ "Return", "the", "names", "of", "all", "DEX", "files", "found", "in", "the", "APK", ".", "This", "method", "only", "accounts", "for", "offical", "dex", "files", "i", ".", "e", ".", "all", "files", "in", "the", "root", "directory", "of", "the", "APK", "named", "classes", ".", "dex", "or", "classes", "[", "0", "-", "9", "]", "+", ".", "dex" ]
python
train
Ouranosinc/xclim
xclim/utils.py
https://github.com/Ouranosinc/xclim/blob/2080d139188bd8de2aeca097a025c2d89d6e0e09/xclim/utils.py#L116-L143
def pint2cfunits(value): """Return a CF-Convention unit string from a `pint` unit. Parameters ---------- value : pint.Unit Input unit. Returns ------- out : str Units following CF-Convention. """ # Print units using abbreviations (millimeter -> mm) s = "{:~}".format(value) # Search and replace patterns pat = r'(?P<inverse>/ )?(?P<unit>\w+)(?: \*\* (?P<pow>\d))?' def repl(m): i, u, p = m.groups() p = p or (1 if i else '') neg = '-' if i else ('^' if p else '') return "{}{}{}".format(u, neg, p) out, n = re.subn(pat, repl, s) return out
[ "def", "pint2cfunits", "(", "value", ")", ":", "# Print units using abbreviations (millimeter -> mm)", "s", "=", "\"{:~}\"", ".", "format", "(", "value", ")", "# Search and replace patterns", "pat", "=", "r'(?P<inverse>/ )?(?P<unit>\\w+)(?: \\*\\* (?P<pow>\\d))?'", "def", "repl", "(", "m", ")", ":", "i", ",", "u", ",", "p", "=", "m", ".", "groups", "(", ")", "p", "=", "p", "or", "(", "1", "if", "i", "else", "''", ")", "neg", "=", "'-'", "if", "i", "else", "(", "'^'", "if", "p", "else", "''", ")", "return", "\"{}{}{}\"", ".", "format", "(", "u", ",", "neg", ",", "p", ")", "out", ",", "n", "=", "re", ".", "subn", "(", "pat", ",", "repl", ",", "s", ")", "return", "out" ]
Return a CF-Convention unit string from a `pint` unit. Parameters ---------- value : pint.Unit Input unit. Returns ------- out : str Units following CF-Convention.
[ "Return", "a", "CF", "-", "Convention", "unit", "string", "from", "a", "pint", "unit", "." ]
python
train
cuihantao/andes
andes/routines/tds.py
https://github.com/cuihantao/andes/blob/7067898d4f26ce7534e968b8486c4aa8fe3a511a/andes/routines/tds.py#L343-L371
def restore_values(self): """ Restore x, y, and f values if not converged Returns ------- None """ if self.convergence is True: return dae = self.system.dae system = self.system inc_g = self.inc[dae.n:dae.m + dae.n] max_g_err_sign = 1 if abs(max(inc_g)) > abs(min(inc_g)) else -1 if max_g_err_sign == 1: max_g_err_idx = list(inc_g).index(max(inc_g)) else: max_g_err_idx = list(inc_g).index(min(inc_g)) logger.debug( 'Maximum mismatch = {:.4g} at equation <{}>'.format( max(abs(inc_g)), system.varname.unamey[max_g_err_idx])) logger.debug( 'Reducing time step h={:.4g}s for t={:.4g}'.format(self.h, self.t)) # restore initial variable data dae.x = matrix(self.x0) dae.y = matrix(self.y0) dae.f = matrix(self.f0)
[ "def", "restore_values", "(", "self", ")", ":", "if", "self", ".", "convergence", "is", "True", ":", "return", "dae", "=", "self", ".", "system", ".", "dae", "system", "=", "self", ".", "system", "inc_g", "=", "self", ".", "inc", "[", "dae", ".", "n", ":", "dae", ".", "m", "+", "dae", ".", "n", "]", "max_g_err_sign", "=", "1", "if", "abs", "(", "max", "(", "inc_g", ")", ")", ">", "abs", "(", "min", "(", "inc_g", ")", ")", "else", "-", "1", "if", "max_g_err_sign", "==", "1", ":", "max_g_err_idx", "=", "list", "(", "inc_g", ")", ".", "index", "(", "max", "(", "inc_g", ")", ")", "else", ":", "max_g_err_idx", "=", "list", "(", "inc_g", ")", ".", "index", "(", "min", "(", "inc_g", ")", ")", "logger", ".", "debug", "(", "'Maximum mismatch = {:.4g} at equation <{}>'", ".", "format", "(", "max", "(", "abs", "(", "inc_g", ")", ")", ",", "system", ".", "varname", ".", "unamey", "[", "max_g_err_idx", "]", ")", ")", "logger", ".", "debug", "(", "'Reducing time step h={:.4g}s for t={:.4g}'", ".", "format", "(", "self", ".", "h", ",", "self", ".", "t", ")", ")", "# restore initial variable data", "dae", ".", "x", "=", "matrix", "(", "self", ".", "x0", ")", "dae", ".", "y", "=", "matrix", "(", "self", ".", "y0", ")", "dae", ".", "f", "=", "matrix", "(", "self", ".", "f0", ")" ]
Restore x, y, and f values if not converged Returns ------- None
[ "Restore", "x", "y", "and", "f", "values", "if", "not", "converged" ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/core/completer.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/completer.py#L132-L169
def expand_user(path): """Expand '~'-style usernames in strings. This is similar to :func:`os.path.expanduser`, but it computes and returns extra information that will be useful if the input was being used in computing completions, and you wish to return the completions with the original '~' instead of its expanded value. Parameters ---------- path : str String to be expanded. If no ~ is present, the output is the same as the input. Returns ------- newpath : str Result of ~ expansion in the input path. tilde_expand : bool Whether any expansion was performed or not. tilde_val : str The value that ~ was replaced with. """ # Default values tilde_expand = False tilde_val = '' newpath = path if path.startswith('~'): tilde_expand = True rest = len(path)-1 newpath = os.path.expanduser(path) if rest: tilde_val = newpath[:-rest] else: tilde_val = newpath return newpath, tilde_expand, tilde_val
[ "def", "expand_user", "(", "path", ")", ":", "# Default values", "tilde_expand", "=", "False", "tilde_val", "=", "''", "newpath", "=", "path", "if", "path", ".", "startswith", "(", "'~'", ")", ":", "tilde_expand", "=", "True", "rest", "=", "len", "(", "path", ")", "-", "1", "newpath", "=", "os", ".", "path", ".", "expanduser", "(", "path", ")", "if", "rest", ":", "tilde_val", "=", "newpath", "[", ":", "-", "rest", "]", "else", ":", "tilde_val", "=", "newpath", "return", "newpath", ",", "tilde_expand", ",", "tilde_val" ]
Expand '~'-style usernames in strings. This is similar to :func:`os.path.expanduser`, but it computes and returns extra information that will be useful if the input was being used in computing completions, and you wish to return the completions with the original '~' instead of its expanded value. Parameters ---------- path : str String to be expanded. If no ~ is present, the output is the same as the input. Returns ------- newpath : str Result of ~ expansion in the input path. tilde_expand : bool Whether any expansion was performed or not. tilde_val : str The value that ~ was replaced with.
[ "Expand", "~", "-", "style", "usernames", "in", "strings", "." ]
python
test
ocaballeror/LyricFetch
lyricfetch/song.py
https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/song.py#L65-L103
def from_filename(cls, filename): """ Class constructor using the path to the corresponding mp3 file. The metadata will be read from this file to create the song object, so it must at least contain valid ID3 tags for artist and title. """ if not filename: logger.error('No filename specified') return None if not os.path.exists(filename): logger.error("Err: File '%s' does not exist", filename) return None if os.path.isdir(filename): logger.error("Err: File '%s' is a directory", filename) return None try: audiofile = eyed3.load(filename) except Exception as error: print(type(error), error) return None # Sometimes eyed3 may return a null object and not raise any exceptions if audiofile is None: return None tags = audiofile.tag album = tags.album title = tags.title lyrics = ''.join([l.text for l in tags.lyrics]) artist = tags.album_artist if not artist: artist = tags.artist song = cls(artist, title, album, lyrics) song.filename = filename return song
[ "def", "from_filename", "(", "cls", ",", "filename", ")", ":", "if", "not", "filename", ":", "logger", ".", "error", "(", "'No filename specified'", ")", "return", "None", "if", "not", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "logger", ".", "error", "(", "\"Err: File '%s' does not exist\"", ",", "filename", ")", "return", "None", "if", "os", ".", "path", ".", "isdir", "(", "filename", ")", ":", "logger", ".", "error", "(", "\"Err: File '%s' is a directory\"", ",", "filename", ")", "return", "None", "try", ":", "audiofile", "=", "eyed3", ".", "load", "(", "filename", ")", "except", "Exception", "as", "error", ":", "print", "(", "type", "(", "error", ")", ",", "error", ")", "return", "None", "# Sometimes eyed3 may return a null object and not raise any exceptions", "if", "audiofile", "is", "None", ":", "return", "None", "tags", "=", "audiofile", ".", "tag", "album", "=", "tags", ".", "album", "title", "=", "tags", ".", "title", "lyrics", "=", "''", ".", "join", "(", "[", "l", ".", "text", "for", "l", "in", "tags", ".", "lyrics", "]", ")", "artist", "=", "tags", ".", "album_artist", "if", "not", "artist", ":", "artist", "=", "tags", ".", "artist", "song", "=", "cls", "(", "artist", ",", "title", ",", "album", ",", "lyrics", ")", "song", ".", "filename", "=", "filename", "return", "song" ]
Class constructor using the path to the corresponding mp3 file. The metadata will be read from this file to create the song object, so it must at least contain valid ID3 tags for artist and title.
[ "Class", "constructor", "using", "the", "path", "to", "the", "corresponding", "mp3", "file", ".", "The", "metadata", "will", "be", "read", "from", "this", "file", "to", "create", "the", "song", "object", "so", "it", "must", "at", "least", "contain", "valid", "ID3", "tags", "for", "artist", "and", "title", "." ]
python
train
pywbem/pywbem
pywbem/_valuemapping.py
https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/pywbem/_valuemapping.py#L468-L476
def _to_int(self, val_str): """Conver val_str to an integer or raise ValueError""" val = _integerValue_to_int(val_str) if val is None: raise ValueError( _format("The value-mapped {0} has an invalid integer " "representation in a ValueMap entry: {1!A}", self._element_str(), val_str)) return val
[ "def", "_to_int", "(", "self", ",", "val_str", ")", ":", "val", "=", "_integerValue_to_int", "(", "val_str", ")", "if", "val", "is", "None", ":", "raise", "ValueError", "(", "_format", "(", "\"The value-mapped {0} has an invalid integer \"", "\"representation in a ValueMap entry: {1!A}\"", ",", "self", ".", "_element_str", "(", ")", ",", "val_str", ")", ")", "return", "val" ]
Conver val_str to an integer or raise ValueError
[ "Conver", "val_str", "to", "an", "integer", "or", "raise", "ValueError" ]
python
train
CivicSpleen/ambry
ambry/library/__init__.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/library/__init__.py#L825-L847
def _find_remote_bundle(self, ref, remote_service_type='s3'): """ Locate a bundle, by any reference, among the configured remotes. The routine will only look in the cache directory lists stored in the remotes, which must be updated to be current. :param ref: :return: (remote,vname) or (None,None) if the ref is not found """ for r in self.remotes: if remote_service_type and r.service != remote_service_type: continue if 'list' not in r.data: continue for k, v in r.data['list'].items(): if ref in v.values(): return (r, v['vname']) return None, None
[ "def", "_find_remote_bundle", "(", "self", ",", "ref", ",", "remote_service_type", "=", "'s3'", ")", ":", "for", "r", "in", "self", ".", "remotes", ":", "if", "remote_service_type", "and", "r", ".", "service", "!=", "remote_service_type", ":", "continue", "if", "'list'", "not", "in", "r", ".", "data", ":", "continue", "for", "k", ",", "v", "in", "r", ".", "data", "[", "'list'", "]", ".", "items", "(", ")", ":", "if", "ref", "in", "v", ".", "values", "(", ")", ":", "return", "(", "r", ",", "v", "[", "'vname'", "]", ")", "return", "None", ",", "None" ]
Locate a bundle, by any reference, among the configured remotes. The routine will only look in the cache directory lists stored in the remotes, which must be updated to be current. :param ref: :return: (remote,vname) or (None,None) if the ref is not found
[ "Locate", "a", "bundle", "by", "any", "reference", "among", "the", "configured", "remotes", ".", "The", "routine", "will", "only", "look", "in", "the", "cache", "directory", "lists", "stored", "in", "the", "remotes", "which", "must", "be", "updated", "to", "be", "current", "." ]
python
train
jcrist/skein
skein/core.py
https://github.com/jcrist/skein/blob/16f8b1d3b3d9f79f36e2f152e45893339a1793e8/skein/core.py#L801-L811
def move_application(self, app_id, queue): """Move an application to a different queue. Parameters ---------- app_id : str The id of the application to move. queue : str The queue to move the application to. """ self._call('moveApplication', proto.MoveRequest(id=app_id, queue=queue))
[ "def", "move_application", "(", "self", ",", "app_id", ",", "queue", ")", ":", "self", ".", "_call", "(", "'moveApplication'", ",", "proto", ".", "MoveRequest", "(", "id", "=", "app_id", ",", "queue", "=", "queue", ")", ")" ]
Move an application to a different queue. Parameters ---------- app_id : str The id of the application to move. queue : str The queue to move the application to.
[ "Move", "an", "application", "to", "a", "different", "queue", "." ]
python
train
ianepperson/pyredminews
redmine/redmine_rest.py
https://github.com/ianepperson/pyredminews/blob/b2b0581483632738a3acca3b4e093c181847b813/redmine/redmine_rest.py#L769-L783
def find_all_item_classes(self): '''Finds and stores a reference to all Redmine_Item subclasses for later use.''' # This is a circular import, but performed after the class is defined and an object is instatiated. # We do this in order to get references to any objects definitions in the redmine.py file # without requiring anyone editing the file to do anything other than create a class with the proper name. import redmine as public_classes item_class = {} for key, value in public_classes.__dict__.items(): try: if issubclass(value, Redmine_Item): item_class[key.lower()] = value except: continue self.item_class = item_class
[ "def", "find_all_item_classes", "(", "self", ")", ":", "# This is a circular import, but performed after the class is defined and an object is instatiated.", "# We do this in order to get references to any objects definitions in the redmine.py file", "# without requiring anyone editing the file to do anything other than create a class with the proper name.", "import", "redmine", "as", "public_classes", "item_class", "=", "{", "}", "for", "key", ",", "value", "in", "public_classes", ".", "__dict__", ".", "items", "(", ")", ":", "try", ":", "if", "issubclass", "(", "value", ",", "Redmine_Item", ")", ":", "item_class", "[", "key", ".", "lower", "(", ")", "]", "=", "value", "except", ":", "continue", "self", ".", "item_class", "=", "item_class" ]
Finds and stores a reference to all Redmine_Item subclasses for later use.
[ "Finds", "and", "stores", "a", "reference", "to", "all", "Redmine_Item", "subclasses", "for", "later", "use", "." ]
python
train
codenerix/django-codenerix
codenerix/djng/angular_base.py
https://github.com/codenerix/django-codenerix/blob/1f5527b352141caaee902b37b2648791a06bd57d/codenerix/djng/angular_base.py#L291-L303
def convert_widgets(self): """ During form initialization, some widgets have to be replaced by a counterpart suitable to be rendered the AngularJS way. """ for field in self.base_fields.values(): try: new_widget = field.get_converted_widget() except AttributeError: pass else: if new_widget: field.widget = new_widget
[ "def", "convert_widgets", "(", "self", ")", ":", "for", "field", "in", "self", ".", "base_fields", ".", "values", "(", ")", ":", "try", ":", "new_widget", "=", "field", ".", "get_converted_widget", "(", ")", "except", "AttributeError", ":", "pass", "else", ":", "if", "new_widget", ":", "field", ".", "widget", "=", "new_widget" ]
During form initialization, some widgets have to be replaced by a counterpart suitable to be rendered the AngularJS way.
[ "During", "form", "initialization", "some", "widgets", "have", "to", "be", "replaced", "by", "a", "counterpart", "suitable", "to", "be", "rendered", "the", "AngularJS", "way", "." ]
python
train
materialsproject/pymatgen
pymatgen/io/vasp/outputs.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/vasp/outputs.py#L1784-L1806
def read_electrostatic_potential(self): """ Parses the eletrostatic potential for the last ionic step """ pattern = {"ngf": r"\s+dimension x,y,z NGXF=\s+([\.\-\d]+)\sNGYF=\s+([\.\-\d]+)\sNGZF=\s+([\.\-\d]+)"} self.read_pattern(pattern, postprocess=int) self.ngf = self.data.get("ngf", [[]])[0] pattern = {"radii": r"the test charge radii are((?:\s+[\.\-\d]+)+)"} self.read_pattern(pattern, reverse=True, terminate_on_match=True, postprocess=str) self.sampling_radii = [float(f) for f in self.data["radii"][0][0].split()] header_pattern = r"\(the norm of the test charge is\s+[\.\-\d]+\)" table_pattern = r"((?:\s+\d+\s*[\.\-\d]+)+)" footer_pattern = r"\s+E-fermi :" pots = self.read_table_pattern(header_pattern, table_pattern, footer_pattern) pots = "".join(itertools.chain.from_iterable(pots)) pots = re.findall(r"\s+\d+\s?([\.\-\d]+)+", pots) pots = [float(f) for f in pots] self.electrostatic_potential = pots
[ "def", "read_electrostatic_potential", "(", "self", ")", ":", "pattern", "=", "{", "\"ngf\"", ":", "r\"\\s+dimension x,y,z NGXF=\\s+([\\.\\-\\d]+)\\sNGYF=\\s+([\\.\\-\\d]+)\\sNGZF=\\s+([\\.\\-\\d]+)\"", "}", "self", ".", "read_pattern", "(", "pattern", ",", "postprocess", "=", "int", ")", "self", ".", "ngf", "=", "self", ".", "data", ".", "get", "(", "\"ngf\"", ",", "[", "[", "]", "]", ")", "[", "0", "]", "pattern", "=", "{", "\"radii\"", ":", "r\"the test charge radii are((?:\\s+[\\.\\-\\d]+)+)\"", "}", "self", ".", "read_pattern", "(", "pattern", ",", "reverse", "=", "True", ",", "terminate_on_match", "=", "True", ",", "postprocess", "=", "str", ")", "self", ".", "sampling_radii", "=", "[", "float", "(", "f", ")", "for", "f", "in", "self", ".", "data", "[", "\"radii\"", "]", "[", "0", "]", "[", "0", "]", ".", "split", "(", ")", "]", "header_pattern", "=", "r\"\\(the norm of the test charge is\\s+[\\.\\-\\d]+\\)\"", "table_pattern", "=", "r\"((?:\\s+\\d+\\s*[\\.\\-\\d]+)+)\"", "footer_pattern", "=", "r\"\\s+E-fermi :\"", "pots", "=", "self", ".", "read_table_pattern", "(", "header_pattern", ",", "table_pattern", ",", "footer_pattern", ")", "pots", "=", "\"\"", ".", "join", "(", "itertools", ".", "chain", ".", "from_iterable", "(", "pots", ")", ")", "pots", "=", "re", ".", "findall", "(", "r\"\\s+\\d+\\s?([\\.\\-\\d]+)+\"", ",", "pots", ")", "pots", "=", "[", "float", "(", "f", ")", "for", "f", "in", "pots", "]", "self", ".", "electrostatic_potential", "=", "pots" ]
Parses the eletrostatic potential for the last ionic step
[ "Parses", "the", "eletrostatic", "potential", "for", "the", "last", "ionic", "step" ]
python
train
datamachine/twx
twx/mtproto/rpc.py
https://github.com/datamachine/twx/blob/d9633f12f3647b1e54ba87b70b39df3b7e02b4eb/twx/mtproto/rpc.py#L265-L275
def get_bytes(self): """client_DH_inner_data#6643b654 nonce:int128 server_nonce:int128 retry_id:long g_b:string = Client_DH_Inner_Data""" ret = struct.pack("<I16s16sQ", client_DH_inner_data.constructor, self.nonce, self.server_nonce, self.retry_id) bytes_io = BytesIO() bytes_io.write(ret) serialize_string(bytes_io, self.g_b) return bytes_io.getvalue()
[ "def", "get_bytes", "(", "self", ")", ":", "ret", "=", "struct", ".", "pack", "(", "\"<I16s16sQ\"", ",", "client_DH_inner_data", ".", "constructor", ",", "self", ".", "nonce", ",", "self", ".", "server_nonce", ",", "self", ".", "retry_id", ")", "bytes_io", "=", "BytesIO", "(", ")", "bytes_io", ".", "write", "(", "ret", ")", "serialize_string", "(", "bytes_io", ",", "self", ".", "g_b", ")", "return", "bytes_io", ".", "getvalue", "(", ")" ]
client_DH_inner_data#6643b654 nonce:int128 server_nonce:int128 retry_id:long g_b:string = Client_DH_Inner_Data
[ "client_DH_inner_data#6643b654", "nonce", ":", "int128", "server_nonce", ":", "int128", "retry_id", ":", "long", "g_b", ":", "string", "=", "Client_DH_Inner_Data" ]
python
train
saltant-org/saltant-py
saltant/models/task_queue.py
https://github.com/saltant-org/saltant-py/blob/bf3bdbc4ec9c772c7f621f8bd6a76c5932af68be/saltant/models/task_queue.py#L336-L397
def put( self, id, name, description, private, runs_executable_tasks, runs_docker_container_tasks, runs_singularity_container_tasks, active, whitelists, ): """Updates a task queue on the saltant server. Args: id (int): The ID of the task queue. name (str): The name of the task queue. description (str): The description of the task queue. private (bool): A Booleon signalling whether the queue can only be used by its associated user. runs_executable_tasks (bool): A Boolean specifying whether the queue runs executable tasks. runs_docker_container_tasks (bool): A Boolean specifying whether the queue runs container tasks that run in Docker containers. runs_singularity_container_tasks (bool): A Boolean specifying whether the queue runs container tasks that run in Singularity containers. active (bool): A Booleon signalling whether the queue is active. whitelists (list): A list of task whitelist IDs. Returns: :class:`saltant.models.task_queue.TaskQueue`: A task queue model instance representing the task queue just updated. """ # Update the object request_url = self._client.base_api_url + self.detail_url.format(id=id) data_to_put = { "name": name, "description": description, "private": private, "runs_executable_tasks": runs_executable_tasks, "runs_docker_container_tasks": runs_docker_container_tasks, "runs_singularity_container_tasks": runs_singularity_container_tasks, "active": active, "whitelists": whitelists, } response = self._client.session.put(request_url, data=data_to_put) # Validate that the request was successful self.validate_request_success( response_text=response.text, request_url=request_url, status_code=response.status_code, expected_status_code=HTTP_200_OK, ) # Return a model instance representing the task instance return self.response_data_to_model_instance(response.json())
[ "def", "put", "(", "self", ",", "id", ",", "name", ",", "description", ",", "private", ",", "runs_executable_tasks", ",", "runs_docker_container_tasks", ",", "runs_singularity_container_tasks", ",", "active", ",", "whitelists", ",", ")", ":", "# Update the object", "request_url", "=", "self", ".", "_client", ".", "base_api_url", "+", "self", ".", "detail_url", ".", "format", "(", "id", "=", "id", ")", "data_to_put", "=", "{", "\"name\"", ":", "name", ",", "\"description\"", ":", "description", ",", "\"private\"", ":", "private", ",", "\"runs_executable_tasks\"", ":", "runs_executable_tasks", ",", "\"runs_docker_container_tasks\"", ":", "runs_docker_container_tasks", ",", "\"runs_singularity_container_tasks\"", ":", "runs_singularity_container_tasks", ",", "\"active\"", ":", "active", ",", "\"whitelists\"", ":", "whitelists", ",", "}", "response", "=", "self", ".", "_client", ".", "session", ".", "put", "(", "request_url", ",", "data", "=", "data_to_put", ")", "# Validate that the request was successful", "self", ".", "validate_request_success", "(", "response_text", "=", "response", ".", "text", ",", "request_url", "=", "request_url", ",", "status_code", "=", "response", ".", "status_code", ",", "expected_status_code", "=", "HTTP_200_OK", ",", ")", "# Return a model instance representing the task instance", "return", "self", ".", "response_data_to_model_instance", "(", "response", ".", "json", "(", ")", ")" ]
Updates a task queue on the saltant server. Args: id (int): The ID of the task queue. name (str): The name of the task queue. description (str): The description of the task queue. private (bool): A Booleon signalling whether the queue can only be used by its associated user. runs_executable_tasks (bool): A Boolean specifying whether the queue runs executable tasks. runs_docker_container_tasks (bool): A Boolean specifying whether the queue runs container tasks that run in Docker containers. runs_singularity_container_tasks (bool): A Boolean specifying whether the queue runs container tasks that run in Singularity containers. active (bool): A Booleon signalling whether the queue is active. whitelists (list): A list of task whitelist IDs. Returns: :class:`saltant.models.task_queue.TaskQueue`: A task queue model instance representing the task queue just updated.
[ "Updates", "a", "task", "queue", "on", "the", "saltant", "server", "." ]
python
train
theislab/scanpy
scanpy/tools/_phenograph.py
https://github.com/theislab/scanpy/blob/9e4e5ee02e04cf618872d9b098e24f0542e8b227/scanpy/tools/_phenograph.py#L7-L145
def phenograph( adata, k=30, directed=False, prune=False, min_cluster_size=10, jaccard=True, primary_metric='euclidean', n_jobs=-1, q_tol=1e-3, louvain_time_limit=2000, nn_method='kdtree'): """ PhenoGraph clustering [Levine15]_. :param adata: Numpy ndarray of data to cluster, or sparse matrix of k-nearest neighbor graph If ndarray, n-by-d array of n cells in d dimensions If sparse matrix, n-by-n adjacency matrix :param k: Number of nearest neighbors to use in first step of graph construction :param directed: Whether to use a symmetric (default) or asymmetric ("directed") graph The graph construction process produces a directed graph, which is symmetrized by one of two methods (see below) :param prune: Whether to symmetrize by taking the average (prune=False) or product (prune=True) between the graph and its transpose :param min_cluster_size: Cells that end up in a cluster smaller than min_cluster_size are considered outliers and are assigned to -1 in the cluster labels :param jaccard: If True, use Jaccard metric between k-neighborhoods to build graph If False, use a Gaussian kernel :param primary_metric: Distance metric to define nearest neighbors Options include: {'euclidean','manhattan','correlation','cosine'}. Note that performance will be slower for correlation and cosine :param n_jobs: Nearest Neighbors and Jaccard coefficients will be computed in parallel using n_jobs. If n_jobs=-1, the number of jobs is determined automatically :param q_tol: Tolerance (i.e., precision) for monitoring modularity optimization :param louvain_time_limit: Maximum number of seconds to run modularity optimization. If exceeded the best result so far is returned :param nn_method: Whether to use brute force or kdtree for nearest neighbor search. For very large high-dimensional data sets, brute force (with parallel computation) performs faster than kdtree :return communities: numpy integer array of community assignments for each row in data :return graph: numpy sparse array of the graph that was used for clustering :return Q: the modularity score for communities on graph Example ------- >>> import scanpy.external as sce >>> import scanpy.api as sc >>> import numpy as np >>> import pandas as pd Assume adata is your annotated data which has the normalized data. Then do PCA: >>> sc.tl.pca(adata, n_comps = 100) Compute phenograph clusters: >>> result = sce.tl.phenograph(adata.obsm['X_pca'], k = 30) Embed the phenograph result into adata as a *categorical* variable (this helps in plotting): >>> adata.obs['pheno'] = pd.Categorical(result[0]) Check by typing "adata" and you should see under obs a key called 'pheno'. Now to show phenograph on tSNE (for example): Compute tSNE: >>> sc.tl.tsne(adata, random_state = 7) Plot phenograph clusters on tSNE: >>> sc.pl.tsne(adata, color = ['pheno'], s = 100, palette = sc.pl.palettes.vega_20_scanpy, legend_fontsize = 10) Cluster and cluster centroids for input Numpy ndarray >>> df = np.random.rand(1000,40) >>> df.shape (1000, 40) >>> result = sce.tl.phenograph(df, k=50) Finding 50 nearest neighbors using minkowski metric and 'auto' algorithm Neighbors computed in 0.16141605377197266 seconds Jaccard graph constructed in 0.7866239547729492 seconds Wrote graph to binary file in 0.42542195320129395 seconds Running Louvain modularity optimization After 1 runs, maximum modularity is Q = 0.223536 After 2 runs, maximum modularity is Q = 0.235874 Louvain completed 22 runs in 1.5609488487243652 seconds PhenoGraph complete in 2.9466471672058105 seconds New results can be pushed into adata object: >>> dframe = pd.DataFrame(data=df, columns=range(df.shape[1]),index=range(df.shape[0]) ) >>> adata = sc.AnnData( X=dframe, obs=dframe, var=dframe) >>> adata.obs['pheno'] = pd.Categorical(result[0]) """ logg.info('PhenoGraph clustering', r=True) try: import phenograph except ImportError: raise ImportError( 'please install phenograph: ' 'pip3 install git+https://github.com/jacoblevine/phenograph.git') communities, graph, Q = phenograph.cluster( data=adata, k=k, directed=directed, prune=prune, min_cluster_size=min_cluster_size, jaccard=jaccard, primary_metric=primary_metric, n_jobs=n_jobs, q_tol=q_tol, louvain_time_limit=louvain_time_limit, nn_method=nn_method ) logg.info(' finished', time=True) return communities, graph, Q
[ "def", "phenograph", "(", "adata", ",", "k", "=", "30", ",", "directed", "=", "False", ",", "prune", "=", "False", ",", "min_cluster_size", "=", "10", ",", "jaccard", "=", "True", ",", "primary_metric", "=", "'euclidean'", ",", "n_jobs", "=", "-", "1", ",", "q_tol", "=", "1e-3", ",", "louvain_time_limit", "=", "2000", ",", "nn_method", "=", "'kdtree'", ")", ":", "logg", ".", "info", "(", "'PhenoGraph clustering'", ",", "r", "=", "True", ")", "try", ":", "import", "phenograph", "except", "ImportError", ":", "raise", "ImportError", "(", "'please install phenograph: '", "'pip3 install git+https://github.com/jacoblevine/phenograph.git'", ")", "communities", ",", "graph", ",", "Q", "=", "phenograph", ".", "cluster", "(", "data", "=", "adata", ",", "k", "=", "k", ",", "directed", "=", "directed", ",", "prune", "=", "prune", ",", "min_cluster_size", "=", "min_cluster_size", ",", "jaccard", "=", "jaccard", ",", "primary_metric", "=", "primary_metric", ",", "n_jobs", "=", "n_jobs", ",", "q_tol", "=", "q_tol", ",", "louvain_time_limit", "=", "louvain_time_limit", ",", "nn_method", "=", "nn_method", ")", "logg", ".", "info", "(", "' finished'", ",", "time", "=", "True", ")", "return", "communities", ",", "graph", ",", "Q" ]
PhenoGraph clustering [Levine15]_. :param adata: Numpy ndarray of data to cluster, or sparse matrix of k-nearest neighbor graph If ndarray, n-by-d array of n cells in d dimensions If sparse matrix, n-by-n adjacency matrix :param k: Number of nearest neighbors to use in first step of graph construction :param directed: Whether to use a symmetric (default) or asymmetric ("directed") graph The graph construction process produces a directed graph, which is symmetrized by one of two methods (see below) :param prune: Whether to symmetrize by taking the average (prune=False) or product (prune=True) between the graph and its transpose :param min_cluster_size: Cells that end up in a cluster smaller than min_cluster_size are considered outliers and are assigned to -1 in the cluster labels :param jaccard: If True, use Jaccard metric between k-neighborhoods to build graph If False, use a Gaussian kernel :param primary_metric: Distance metric to define nearest neighbors Options include: {'euclidean','manhattan','correlation','cosine'}. Note that performance will be slower for correlation and cosine :param n_jobs: Nearest Neighbors and Jaccard coefficients will be computed in parallel using n_jobs. If n_jobs=-1, the number of jobs is determined automatically :param q_tol: Tolerance (i.e., precision) for monitoring modularity optimization :param louvain_time_limit: Maximum number of seconds to run modularity optimization. If exceeded the best result so far is returned :param nn_method: Whether to use brute force or kdtree for nearest neighbor search. For very large high-dimensional data sets, brute force (with parallel computation) performs faster than kdtree :return communities: numpy integer array of community assignments for each row in data :return graph: numpy sparse array of the graph that was used for clustering :return Q: the modularity score for communities on graph Example ------- >>> import scanpy.external as sce >>> import scanpy.api as sc >>> import numpy as np >>> import pandas as pd Assume adata is your annotated data which has the normalized data. Then do PCA: >>> sc.tl.pca(adata, n_comps = 100) Compute phenograph clusters: >>> result = sce.tl.phenograph(adata.obsm['X_pca'], k = 30) Embed the phenograph result into adata as a *categorical* variable (this helps in plotting): >>> adata.obs['pheno'] = pd.Categorical(result[0]) Check by typing "adata" and you should see under obs a key called 'pheno'. Now to show phenograph on tSNE (for example): Compute tSNE: >>> sc.tl.tsne(adata, random_state = 7) Plot phenograph clusters on tSNE: >>> sc.pl.tsne(adata, color = ['pheno'], s = 100, palette = sc.pl.palettes.vega_20_scanpy, legend_fontsize = 10) Cluster and cluster centroids for input Numpy ndarray >>> df = np.random.rand(1000,40) >>> df.shape (1000, 40) >>> result = sce.tl.phenograph(df, k=50) Finding 50 nearest neighbors using minkowski metric and 'auto' algorithm Neighbors computed in 0.16141605377197266 seconds Jaccard graph constructed in 0.7866239547729492 seconds Wrote graph to binary file in 0.42542195320129395 seconds Running Louvain modularity optimization After 1 runs, maximum modularity is Q = 0.223536 After 2 runs, maximum modularity is Q = 0.235874 Louvain completed 22 runs in 1.5609488487243652 seconds PhenoGraph complete in 2.9466471672058105 seconds New results can be pushed into adata object: >>> dframe = pd.DataFrame(data=df, columns=range(df.shape[1]),index=range(df.shape[0]) ) >>> adata = sc.AnnData( X=dframe, obs=dframe, var=dframe) >>> adata.obs['pheno'] = pd.Categorical(result[0])
[ "PhenoGraph", "clustering", "[", "Levine15", "]", "_", "." ]
python
train
LuminosoInsight/python-ftfy
ftfy/bad_codecs/__init__.py
https://github.com/LuminosoInsight/python-ftfy/blob/476acc6ad270bffe07f97d4f7cf2139acdc69633/ftfy/bad_codecs/__init__.py#L47-L76
def search_function(encoding): """ Register our "bad codecs" with Python's codecs API. This involves adding a search function that takes in an encoding name, and returns a codec for that encoding if it knows one, or None if it doesn't. The encodings this will match are: - Encodings of the form 'sloppy-windows-NNNN' or 'sloppy-iso-8859-N', where the non-sloppy version is an encoding that leaves some bytes unmapped to characters. - The 'utf-8-variants' encoding, which has the several aliases seen above. """ if encoding in _CACHE: return _CACHE[encoding] norm_encoding = normalize_encoding(encoding) codec = None if norm_encoding in UTF8_VAR_NAMES: from ftfy.bad_codecs.utf8_variants import CODEC_INFO codec = CODEC_INFO elif norm_encoding.startswith('sloppy_'): from ftfy.bad_codecs.sloppy import CODECS codec = CODECS.get(norm_encoding) if codec is not None: _CACHE[encoding] = codec return codec
[ "def", "search_function", "(", "encoding", ")", ":", "if", "encoding", "in", "_CACHE", ":", "return", "_CACHE", "[", "encoding", "]", "norm_encoding", "=", "normalize_encoding", "(", "encoding", ")", "codec", "=", "None", "if", "norm_encoding", "in", "UTF8_VAR_NAMES", ":", "from", "ftfy", ".", "bad_codecs", ".", "utf8_variants", "import", "CODEC_INFO", "codec", "=", "CODEC_INFO", "elif", "norm_encoding", ".", "startswith", "(", "'sloppy_'", ")", ":", "from", "ftfy", ".", "bad_codecs", ".", "sloppy", "import", "CODECS", "codec", "=", "CODECS", ".", "get", "(", "norm_encoding", ")", "if", "codec", "is", "not", "None", ":", "_CACHE", "[", "encoding", "]", "=", "codec", "return", "codec" ]
Register our "bad codecs" with Python's codecs API. This involves adding a search function that takes in an encoding name, and returns a codec for that encoding if it knows one, or None if it doesn't. The encodings this will match are: - Encodings of the form 'sloppy-windows-NNNN' or 'sloppy-iso-8859-N', where the non-sloppy version is an encoding that leaves some bytes unmapped to characters. - The 'utf-8-variants' encoding, which has the several aliases seen above.
[ "Register", "our", "bad", "codecs", "with", "Python", "s", "codecs", "API", ".", "This", "involves", "adding", "a", "search", "function", "that", "takes", "in", "an", "encoding", "name", "and", "returns", "a", "codec", "for", "that", "encoding", "if", "it", "knows", "one", "or", "None", "if", "it", "doesn", "t", "." ]
python
train
poppy-project/pypot
pypot/vrep/remoteApiBindings/vrep.py
https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/vrep/remoteApiBindings/vrep.py#L487-L511
def simxAuxiliaryConsoleOpen(clientID, title, maxLines, mode, position, size, textColor, backgroundColor, operationMode): ''' Please have a look at the function description/documentation in the V-REP user manual ''' consoleHandle = ct.c_int() if (sys.version_info[0] == 3) and (type(title) is str): title=title.encode('utf-8') if position != None: c_position = (ct.c_int*2)(*position) else: c_position = None if size != None: c_size = (ct.c_int*2)(*size) else: c_size = None if textColor != None: c_textColor = (ct.c_float*3)(*textColor) else: c_textColor = None if backgroundColor != None: c_backgroundColor = (ct.c_float*3)(*backgroundColor) else: c_backgroundColor = None return c_AuxiliaryConsoleOpen(clientID, title, maxLines, mode, c_position, c_size, c_textColor, c_backgroundColor, ct.byref(consoleHandle), operationMode), consoleHandle.value
[ "def", "simxAuxiliaryConsoleOpen", "(", "clientID", ",", "title", ",", "maxLines", ",", "mode", ",", "position", ",", "size", ",", "textColor", ",", "backgroundColor", ",", "operationMode", ")", ":", "consoleHandle", "=", "ct", ".", "c_int", "(", ")", "if", "(", "sys", ".", "version_info", "[", "0", "]", "==", "3", ")", "and", "(", "type", "(", "title", ")", "is", "str", ")", ":", "title", "=", "title", ".", "encode", "(", "'utf-8'", ")", "if", "position", "!=", "None", ":", "c_position", "=", "(", "ct", ".", "c_int", "*", "2", ")", "(", "*", "position", ")", "else", ":", "c_position", "=", "None", "if", "size", "!=", "None", ":", "c_size", "=", "(", "ct", ".", "c_int", "*", "2", ")", "(", "*", "size", ")", "else", ":", "c_size", "=", "None", "if", "textColor", "!=", "None", ":", "c_textColor", "=", "(", "ct", ".", "c_float", "*", "3", ")", "(", "*", "textColor", ")", "else", ":", "c_textColor", "=", "None", "if", "backgroundColor", "!=", "None", ":", "c_backgroundColor", "=", "(", "ct", ".", "c_float", "*", "3", ")", "(", "*", "backgroundColor", ")", "else", ":", "c_backgroundColor", "=", "None", "return", "c_AuxiliaryConsoleOpen", "(", "clientID", ",", "title", ",", "maxLines", ",", "mode", ",", "c_position", ",", "c_size", ",", "c_textColor", ",", "c_backgroundColor", ",", "ct", ".", "byref", "(", "consoleHandle", ")", ",", "operationMode", ")", ",", "consoleHandle", ".", "value" ]
Please have a look at the function description/documentation in the V-REP user manual
[ "Please", "have", "a", "look", "at", "the", "function", "description", "/", "documentation", "in", "the", "V", "-", "REP", "user", "manual" ]
python
train
openatx/facebook-wda
wda/__init__.py
https://github.com/openatx/facebook-wda/blob/aa644204620c6d5c7705a9c7452d8c0cc39330d5/wda/__init__.py#L882-L902
def wait_gone(self, timeout=None, raise_error=True): """ Args: timeout (float): default timeout raise_error (bool): return bool or raise error Returns: bool: works when raise_error is False Raises: WDAElementNotDisappearError """ start_time = time.time() if timeout is None or timeout <= 0: timeout = self.timeout while start_time + timeout > time.time(): if not self.exists: return True if not raise_error: return False raise WDAElementNotDisappearError("element not gone")
[ "def", "wait_gone", "(", "self", ",", "timeout", "=", "None", ",", "raise_error", "=", "True", ")", ":", "start_time", "=", "time", ".", "time", "(", ")", "if", "timeout", "is", "None", "or", "timeout", "<=", "0", ":", "timeout", "=", "self", ".", "timeout", "while", "start_time", "+", "timeout", ">", "time", ".", "time", "(", ")", ":", "if", "not", "self", ".", "exists", ":", "return", "True", "if", "not", "raise_error", ":", "return", "False", "raise", "WDAElementNotDisappearError", "(", "\"element not gone\"", ")" ]
Args: timeout (float): default timeout raise_error (bool): return bool or raise error Returns: bool: works when raise_error is False Raises: WDAElementNotDisappearError
[ "Args", ":", "timeout", "(", "float", ")", ":", "default", "timeout", "raise_error", "(", "bool", ")", ":", "return", "bool", "or", "raise", "error", "Returns", ":", "bool", ":", "works", "when", "raise_error", "is", "False" ]
python
train
klen/adrest
adrest/utils/emitter.py
https://github.com/klen/adrest/blob/8b75c67123cffabe5ed98c222bb7ab43c904d89c/adrest/utils/emitter.py#L135-L143
def serialize(self, content): """ Serialize to JSONP. :return string: serializaed JSONP """ content = super(JSONPEmitter, self).serialize(content) callback = self.request.GET.get('callback', 'callback') return u'%s(%s)' % (callback, content)
[ "def", "serialize", "(", "self", ",", "content", ")", ":", "content", "=", "super", "(", "JSONPEmitter", ",", "self", ")", ".", "serialize", "(", "content", ")", "callback", "=", "self", ".", "request", ".", "GET", ".", "get", "(", "'callback'", ",", "'callback'", ")", "return", "u'%s(%s)'", "%", "(", "callback", ",", "content", ")" ]
Serialize to JSONP. :return string: serializaed JSONP
[ "Serialize", "to", "JSONP", "." ]
python
train
shmir/PyTrafficGenerator
trafficgenerator/tgn_object.py
https://github.com/shmir/PyTrafficGenerator/blob/382e5d549c83404af2a6571fe19c9e71df8bac14/trafficgenerator/tgn_object.py#L177-L187
def get_objects_or_children_by_type(self, *types): """ Get objects if children already been read or get children. Use this method for fast access to objects in case of static configurations. :param types: requested object types. :return: all children of the specified types. """ objects = self.get_objects_by_type(*types) return objects if objects else self.get_children(*types)
[ "def", "get_objects_or_children_by_type", "(", "self", ",", "*", "types", ")", ":", "objects", "=", "self", ".", "get_objects_by_type", "(", "*", "types", ")", "return", "objects", "if", "objects", "else", "self", ".", "get_children", "(", "*", "types", ")" ]
Get objects if children already been read or get children. Use this method for fast access to objects in case of static configurations. :param types: requested object types. :return: all children of the specified types.
[ "Get", "objects", "if", "children", "already", "been", "read", "or", "get", "children", "." ]
python
train
rigetti/pyquil
pyquil/quilatom.py
https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/quilatom.py#L124-L146
def unpack_classical_reg(c): """ Get the address for a classical register. :param c: A list of length 2, a pair, a string (to be interpreted as name[0]), or a MemoryReference. :return: The address as a MemoryReference. """ if isinstance(c, list) or isinstance(c, tuple): if len(c) > 2 or len(c) == 0: raise ValueError("if c is a list/tuple, it should be of length <= 2") if len(c) == 1: c = (c[0], 0) if not isinstance(c[0], str): raise ValueError("if c is a list/tuple, its first member should be a string") if not isinstance(c[1], int): raise ValueError("if c is a list/tuple, its second member should be an int") return MemoryReference(c[0], c[1]) if isinstance(c, MemoryReference): return c elif isinstance(c, str): return MemoryReference(c, 0) else: raise TypeError("c should be a list of length 2, a pair, a string, or a MemoryReference")
[ "def", "unpack_classical_reg", "(", "c", ")", ":", "if", "isinstance", "(", "c", ",", "list", ")", "or", "isinstance", "(", "c", ",", "tuple", ")", ":", "if", "len", "(", "c", ")", ">", "2", "or", "len", "(", "c", ")", "==", "0", ":", "raise", "ValueError", "(", "\"if c is a list/tuple, it should be of length <= 2\"", ")", "if", "len", "(", "c", ")", "==", "1", ":", "c", "=", "(", "c", "[", "0", "]", ",", "0", ")", "if", "not", "isinstance", "(", "c", "[", "0", "]", ",", "str", ")", ":", "raise", "ValueError", "(", "\"if c is a list/tuple, its first member should be a string\"", ")", "if", "not", "isinstance", "(", "c", "[", "1", "]", ",", "int", ")", ":", "raise", "ValueError", "(", "\"if c is a list/tuple, its second member should be an int\"", ")", "return", "MemoryReference", "(", "c", "[", "0", "]", ",", "c", "[", "1", "]", ")", "if", "isinstance", "(", "c", ",", "MemoryReference", ")", ":", "return", "c", "elif", "isinstance", "(", "c", ",", "str", ")", ":", "return", "MemoryReference", "(", "c", ",", "0", ")", "else", ":", "raise", "TypeError", "(", "\"c should be a list of length 2, a pair, a string, or a MemoryReference\"", ")" ]
Get the address for a classical register. :param c: A list of length 2, a pair, a string (to be interpreted as name[0]), or a MemoryReference. :return: The address as a MemoryReference.
[ "Get", "the", "address", "for", "a", "classical", "register", "." ]
python
train
awslabs/sockeye
sockeye/decoder.py
https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/decoder.py#L116-L133
def decode_step(self, step: int, target_embed_prev: mx.sym.Symbol, source_encoded_max_length: int, *states: mx.sym.Symbol) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, List[mx.sym.Symbol]]: """ Decodes a single time step given the current step, the previous embedded target word, and previous decoder states. Returns decoder representation for the next prediction, attention probabilities, and next decoder states. Implementations can maintain an arbitrary number of states. :param step: Global step of inference procedure, starts with 1. :param target_embed_prev: Previous target word embedding. Shape: (batch_size, target_num_embed). :param source_encoded_max_length: Length of encoded source time dimension. :param states: Arbitrary list of decoder states. :return: logit inputs, attention probabilities, next decoder states. """ pass
[ "def", "decode_step", "(", "self", ",", "step", ":", "int", ",", "target_embed_prev", ":", "mx", ".", "sym", ".", "Symbol", ",", "source_encoded_max_length", ":", "int", ",", "*", "states", ":", "mx", ".", "sym", ".", "Symbol", ")", "->", "Tuple", "[", "mx", ".", "sym", ".", "Symbol", ",", "mx", ".", "sym", ".", "Symbol", ",", "List", "[", "mx", ".", "sym", ".", "Symbol", "]", "]", ":", "pass" ]
Decodes a single time step given the current step, the previous embedded target word, and previous decoder states. Returns decoder representation for the next prediction, attention probabilities, and next decoder states. Implementations can maintain an arbitrary number of states. :param step: Global step of inference procedure, starts with 1. :param target_embed_prev: Previous target word embedding. Shape: (batch_size, target_num_embed). :param source_encoded_max_length: Length of encoded source time dimension. :param states: Arbitrary list of decoder states. :return: logit inputs, attention probabilities, next decoder states.
[ "Decodes", "a", "single", "time", "step", "given", "the", "current", "step", "the", "previous", "embedded", "target", "word", "and", "previous", "decoder", "states", ".", "Returns", "decoder", "representation", "for", "the", "next", "prediction", "attention", "probabilities", "and", "next", "decoder", "states", ".", "Implementations", "can", "maintain", "an", "arbitrary", "number", "of", "states", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/rl/player_utils.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/player_utils.py#L267-L298
def create_simulated_env( output_dir, grayscale, resize_width_factor, resize_height_factor, frame_stack_size, generative_model, generative_model_params, random_starts=True, which_epoch_data="last", **other_hparams ): """"Create SimulatedEnv with minimal subset of hparams.""" # We need these, to initialize T2TGymEnv, but these values (hopefully) have # no effect on player. a_bit_risky_defaults = { "game": "pong", # assumes that T2TGymEnv has always reward_range (-1,1) "real_batch_size": 1, "rl_env_max_episode_steps": -1, "max_num_noops": 0 } for key in a_bit_risky_defaults: if key not in other_hparams: other_hparams[key] = a_bit_risky_defaults[key] hparams = hparam.HParams( grayscale=grayscale, resize_width_factor=resize_width_factor, resize_height_factor=resize_height_factor, frame_stack_size=frame_stack_size, generative_model=generative_model, generative_model_params=generative_model_params, **other_hparams ) return load_data_and_make_simulated_env( output_dir, wm_dir=None, hparams=hparams, which_epoch_data=which_epoch_data, random_starts=random_starts)
[ "def", "create_simulated_env", "(", "output_dir", ",", "grayscale", ",", "resize_width_factor", ",", "resize_height_factor", ",", "frame_stack_size", ",", "generative_model", ",", "generative_model_params", ",", "random_starts", "=", "True", ",", "which_epoch_data", "=", "\"last\"", ",", "*", "*", "other_hparams", ")", ":", "# We need these, to initialize T2TGymEnv, but these values (hopefully) have", "# no effect on player.", "a_bit_risky_defaults", "=", "{", "\"game\"", ":", "\"pong\"", ",", "# assumes that T2TGymEnv has always reward_range (-1,1)", "\"real_batch_size\"", ":", "1", ",", "\"rl_env_max_episode_steps\"", ":", "-", "1", ",", "\"max_num_noops\"", ":", "0", "}", "for", "key", "in", "a_bit_risky_defaults", ":", "if", "key", "not", "in", "other_hparams", ":", "other_hparams", "[", "key", "]", "=", "a_bit_risky_defaults", "[", "key", "]", "hparams", "=", "hparam", ".", "HParams", "(", "grayscale", "=", "grayscale", ",", "resize_width_factor", "=", "resize_width_factor", ",", "resize_height_factor", "=", "resize_height_factor", ",", "frame_stack_size", "=", "frame_stack_size", ",", "generative_model", "=", "generative_model", ",", "generative_model_params", "=", "generative_model_params", ",", "*", "*", "other_hparams", ")", "return", "load_data_and_make_simulated_env", "(", "output_dir", ",", "wm_dir", "=", "None", ",", "hparams", "=", "hparams", ",", "which_epoch_data", "=", "which_epoch_data", ",", "random_starts", "=", "random_starts", ")" ]
Create SimulatedEnv with minimal subset of hparams.
[ "Create", "SimulatedEnv", "with", "minimal", "subset", "of", "hparams", "." ]
python
train
bkjones/pyrabbit
pyrabbit/api.py
https://github.com/bkjones/pyrabbit/blob/e8a9f74ed5c6bba958994fb9a72c396e6a99ea0f/pyrabbit/api.py#L258-L264
def get_permissions(self): """ :returns: list of dicts, or an empty list if there are no permissions. """ path = Client.urls['all_permissions'] conns = self._call(path, 'GET') return conns
[ "def", "get_permissions", "(", "self", ")", ":", "path", "=", "Client", ".", "urls", "[", "'all_permissions'", "]", "conns", "=", "self", ".", "_call", "(", "path", ",", "'GET'", ")", "return", "conns" ]
:returns: list of dicts, or an empty list if there are no permissions.
[ ":", "returns", ":", "list", "of", "dicts", "or", "an", "empty", "list", "if", "there", "are", "no", "permissions", "." ]
python
train