repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
xeroc/python-graphenelib
graphenebase/account.py
https://github.com/xeroc/python-graphenelib/blob/8bb5396bc79998ee424cf3813af478304173f3a6/graphenebase/account.py#L287-L292
def point(self): """ Return the point for the public key """ string = unhexlify(self.unCompressed()) return ecdsa.VerifyingKey.from_string( string[1:], curve=ecdsa.SECP256k1 ).pubkey.point
[ "def", "point", "(", "self", ")", ":", "string", "=", "unhexlify", "(", "self", ".", "unCompressed", "(", ")", ")", "return", "ecdsa", ".", "VerifyingKey", ".", "from_string", "(", "string", "[", "1", ":", "]", ",", "curve", "=", "ecdsa", ".", "SECP256k1", ")", ".", "pubkey", ".", "point" ]
Return the point for the public key
[ "Return", "the", "point", "for", "the", "public", "key" ]
python
valid
ray-project/ray
python/ray/utils.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/utils.py#L420-L442
def check_oversized_pickle(pickled, name, obj_type, worker): """Send a warning message if the pickled object is too large. Args: pickled: the pickled object. name: name of the pickled object. obj_type: type of the pickled object, can be 'function', 'remote function', 'actor', or 'object'. worker: the worker used to send warning message. """ length = len(pickled) if length <= ray_constants.PICKLE_OBJECT_WARNING_SIZE: return warning_message = ( "Warning: The {} {} has size {} when pickled. " "It will be stored in Redis, which could cause memory issues. " "This may mean that its definition uses a large array or other object." ).format(obj_type, name, length) push_error_to_driver( worker, ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR, warning_message, driver_id=worker.task_driver_id)
[ "def", "check_oversized_pickle", "(", "pickled", ",", "name", ",", "obj_type", ",", "worker", ")", ":", "length", "=", "len", "(", "pickled", ")", "if", "length", "<=", "ray_constants", ".", "PICKLE_OBJECT_WARNING_SIZE", ":", "return", "warning_message", "=", "(", "\"Warning: The {} {} has size {} when pickled. \"", "\"It will be stored in Redis, which could cause memory issues. \"", "\"This may mean that its definition uses a large array or other object.\"", ")", ".", "format", "(", "obj_type", ",", "name", ",", "length", ")", "push_error_to_driver", "(", "worker", ",", "ray_constants", ".", "PICKLING_LARGE_OBJECT_PUSH_ERROR", ",", "warning_message", ",", "driver_id", "=", "worker", ".", "task_driver_id", ")" ]
Send a warning message if the pickled object is too large. Args: pickled: the pickled object. name: name of the pickled object. obj_type: type of the pickled object, can be 'function', 'remote function', 'actor', or 'object'. worker: the worker used to send warning message.
[ "Send", "a", "warning", "message", "if", "the", "pickled", "object", "is", "too", "large", "." ]
python
train
cosven/feeluown-core
fuocore/utils.py
https://github.com/cosven/feeluown-core/blob/62dc64638f62971b16be0a75c0b8c7ae2999869e/fuocore/utils.py#L43-L69
def find_previous(element, l): """ find previous element in a sorted list >>> find_previous(0, [0]) 0 >>> find_previous(2, [1, 1, 3]) 1 >>> find_previous(0, [1, 2]) >>> find_previous(1.5, [1, 2]) 1 >>> find_previous(3, [1, 2]) 2 """ length = len(l) for index, current in enumerate(l): # current is the last element if length - 1 == index: return current # current is the first element if index == 0: if element < current: return None if current <= element < l[index+1]: return current
[ "def", "find_previous", "(", "element", ",", "l", ")", ":", "length", "=", "len", "(", "l", ")", "for", "index", ",", "current", "in", "enumerate", "(", "l", ")", ":", "# current is the last element", "if", "length", "-", "1", "==", "index", ":", "return", "current", "# current is the first element", "if", "index", "==", "0", ":", "if", "element", "<", "current", ":", "return", "None", "if", "current", "<=", "element", "<", "l", "[", "index", "+", "1", "]", ":", "return", "current" ]
find previous element in a sorted list >>> find_previous(0, [0]) 0 >>> find_previous(2, [1, 1, 3]) 1 >>> find_previous(0, [1, 2]) >>> find_previous(1.5, [1, 2]) 1 >>> find_previous(3, [1, 2]) 2
[ "find", "previous", "element", "in", "a", "sorted", "list" ]
python
train
trailofbits/manticore
manticore/native/cpu/x86.py
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/native/cpu/x86.py#L5256-L5279
def PSRLDQ(cpu, dest, src): """ Packed shift right logical double quadword. Shifts the destination operand (first operand) to the right by the number of bytes specified in the count operand (second operand). The empty high-order bytes are cleared (set to all 0s). If the value specified by the count operand is greater than 15, the destination operand is set to all 0s. The destination operand is an XMM register. The count operand is an 8-bit immediate:: TEMP = SRC; if (TEMP > 15) TEMP = 16; DEST = DEST >> (temp * 8); :param cpu: current CPU. :param dest: destination operand. :param src: count operand. """ # TODO(yan): Verify the correctness of truncating SRC like this ( tests # use '-1' as the value temp = Operators.EXTRACT(src.read(), 0, 8) temp = Operators.ITEBV(src.size, temp > 15, 16, temp) dest.write(dest.read() >> (temp * 8))
[ "def", "PSRLDQ", "(", "cpu", ",", "dest", ",", "src", ")", ":", "# TODO(yan): Verify the correctness of truncating SRC like this ( tests", "# use '-1' as the value", "temp", "=", "Operators", ".", "EXTRACT", "(", "src", ".", "read", "(", ")", ",", "0", ",", "8", ")", "temp", "=", "Operators", ".", "ITEBV", "(", "src", ".", "size", ",", "temp", ">", "15", ",", "16", ",", "temp", ")", "dest", ".", "write", "(", "dest", ".", "read", "(", ")", ">>", "(", "temp", "*", "8", ")", ")" ]
Packed shift right logical double quadword. Shifts the destination operand (first operand) to the right by the number of bytes specified in the count operand (second operand). The empty high-order bytes are cleared (set to all 0s). If the value specified by the count operand is greater than 15, the destination operand is set to all 0s. The destination operand is an XMM register. The count operand is an 8-bit immediate:: TEMP = SRC; if (TEMP > 15) TEMP = 16; DEST = DEST >> (temp * 8); :param cpu: current CPU. :param dest: destination operand. :param src: count operand.
[ "Packed", "shift", "right", "logical", "double", "quadword", "." ]
python
valid
KelSolaar/Umbra
umbra/components/factory/components_manager_ui/nodes.py
https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/components/factory/components_manager_ui/nodes.py#L231-L253
def __initialize_node(self, attributes_flags=int(Qt.ItemIsSelectable | Qt.ItemIsEnabled)): """ Initializes the node. :param attributes_flags: Attributes flags. :type attributes_flags: int """ attributes = dir(self.__component) for attribute in attributes: if attribute == "name": continue if not "_Profile__{0}".format(attribute) in attributes: continue value = getattr(self.__component, attribute) value = ", ".join(value) if type(value) in (tuple, list) else value roles = {Qt.DisplayRole: value, Qt.EditRole: value} self[attribute] = umbra.ui.nodes.GraphModelAttribute(attribute, value, roles, attributes_flags) self.update_tool_tip()
[ "def", "__initialize_node", "(", "self", ",", "attributes_flags", "=", "int", "(", "Qt", ".", "ItemIsSelectable", "|", "Qt", ".", "ItemIsEnabled", ")", ")", ":", "attributes", "=", "dir", "(", "self", ".", "__component", ")", "for", "attribute", "in", "attributes", ":", "if", "attribute", "==", "\"name\"", ":", "continue", "if", "not", "\"_Profile__{0}\"", ".", "format", "(", "attribute", ")", "in", "attributes", ":", "continue", "value", "=", "getattr", "(", "self", ".", "__component", ",", "attribute", ")", "value", "=", "\", \"", ".", "join", "(", "value", ")", "if", "type", "(", "value", ")", "in", "(", "tuple", ",", "list", ")", "else", "value", "roles", "=", "{", "Qt", ".", "DisplayRole", ":", "value", ",", "Qt", ".", "EditRole", ":", "value", "}", "self", "[", "attribute", "]", "=", "umbra", ".", "ui", ".", "nodes", ".", "GraphModelAttribute", "(", "attribute", ",", "value", ",", "roles", ",", "attributes_flags", ")", "self", ".", "update_tool_tip", "(", ")" ]
Initializes the node. :param attributes_flags: Attributes flags. :type attributes_flags: int
[ "Initializes", "the", "node", "." ]
python
train
NuGrid/NuGridPy
nugridpy/nugridse.py
https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/nugridse.py#L423-L462
def get_elemental_abunds(self,cycle,index=None): """ returns the elemental abundances for one cycle, either for the whole star or a specific zone depending upon the value of 'index'. Parameters ---------- cycle : string or integer Model to get the abundances for. index : integer or list, optional zone number for which to get elemental abundances. If None the entire abundance profile is returned. If a 1x2 list, the abundances are returned between indices of index[0] and index[1]. The default is None. """ isoabunds=self.se.get(cycle,'iso_massf') A=array(self.se.A) Z=array(self.se.Z) names=self.se.isos Zuq=list(set(Z)) # list of unique Zs Zuq.sort() if index==None: index=[0,len(isoabunds)] if type(index)==list: elemabunds=[] for zone in range(index[0],index[1]): percent=int((zone-index[0])*100./(index[1]-index[0])) sys.stdout.flush() sys.stdout.write("\rgetting elemental abundances " + "...%d%%" % percent) elemabunds.append([sum(isoabunds[zone][where(Z==iZ)]) for iZ in Zuq]) else: elemabunds=[sum(isoabunds[index][where(Z==iZ)]) for iZ in Zuq] return elemabunds
[ "def", "get_elemental_abunds", "(", "self", ",", "cycle", ",", "index", "=", "None", ")", ":", "isoabunds", "=", "self", ".", "se", ".", "get", "(", "cycle", ",", "'iso_massf'", ")", "A", "=", "array", "(", "self", ".", "se", ".", "A", ")", "Z", "=", "array", "(", "self", ".", "se", ".", "Z", ")", "names", "=", "self", ".", "se", ".", "isos", "Zuq", "=", "list", "(", "set", "(", "Z", ")", ")", "# list of unique Zs", "Zuq", ".", "sort", "(", ")", "if", "index", "==", "None", ":", "index", "=", "[", "0", ",", "len", "(", "isoabunds", ")", "]", "if", "type", "(", "index", ")", "==", "list", ":", "elemabunds", "=", "[", "]", "for", "zone", "in", "range", "(", "index", "[", "0", "]", ",", "index", "[", "1", "]", ")", ":", "percent", "=", "int", "(", "(", "zone", "-", "index", "[", "0", "]", ")", "*", "100.", "/", "(", "index", "[", "1", "]", "-", "index", "[", "0", "]", ")", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "sys", ".", "stdout", ".", "write", "(", "\"\\rgetting elemental abundances \"", "+", "\"...%d%%\"", "%", "percent", ")", "elemabunds", ".", "append", "(", "[", "sum", "(", "isoabunds", "[", "zone", "]", "[", "where", "(", "Z", "==", "iZ", ")", "]", ")", "for", "iZ", "in", "Zuq", "]", ")", "else", ":", "elemabunds", "=", "[", "sum", "(", "isoabunds", "[", "index", "]", "[", "where", "(", "Z", "==", "iZ", ")", "]", ")", "for", "iZ", "in", "Zuq", "]", "return", "elemabunds" ]
returns the elemental abundances for one cycle, either for the whole star or a specific zone depending upon the value of 'index'. Parameters ---------- cycle : string or integer Model to get the abundances for. index : integer or list, optional zone number for which to get elemental abundances. If None the entire abundance profile is returned. If a 1x2 list, the abundances are returned between indices of index[0] and index[1]. The default is None.
[ "returns", "the", "elemental", "abundances", "for", "one", "cycle", "either", "for", "the", "whole", "star", "or", "a", "specific", "zone", "depending", "upon", "the", "value", "of", "index", "." ]
python
train
ajenhl/tacl
tacl/data_store.py
https://github.com/ajenhl/tacl/blob/b8a343248e77f1c07a5a4ac133a9ad6e0b4781c2/tacl/data_store.py#L162-L183
def _add_text_size_ngrams(self, text_id, size, ngrams): """Adds `ngrams`, that are of size `size`, to the data store. The added `ngrams` are associated with `text_id`. :param text_id: database ID of text associated with `ngrams` :type text_id: `int` :param size: size of n-grams :type size: `int` :param ngrams: n-grams to be added :type ngrams: `collections.Counter` """ unique_ngrams = len(ngrams) self._logger.info('Adding {} unique {}-grams'.format( unique_ngrams, size)) parameters = [[text_id, ngram, size, count] for ngram, count in ngrams.items()] with self._conn: self._conn.execute(constants.INSERT_TEXT_HAS_NGRAM_SQL, [text_id, size, unique_ngrams]) self._conn.executemany(constants.INSERT_NGRAM_SQL, parameters)
[ "def", "_add_text_size_ngrams", "(", "self", ",", "text_id", ",", "size", ",", "ngrams", ")", ":", "unique_ngrams", "=", "len", "(", "ngrams", ")", "self", ".", "_logger", ".", "info", "(", "'Adding {} unique {}-grams'", ".", "format", "(", "unique_ngrams", ",", "size", ")", ")", "parameters", "=", "[", "[", "text_id", ",", "ngram", ",", "size", ",", "count", "]", "for", "ngram", ",", "count", "in", "ngrams", ".", "items", "(", ")", "]", "with", "self", ".", "_conn", ":", "self", ".", "_conn", ".", "execute", "(", "constants", ".", "INSERT_TEXT_HAS_NGRAM_SQL", ",", "[", "text_id", ",", "size", ",", "unique_ngrams", "]", ")", "self", ".", "_conn", ".", "executemany", "(", "constants", ".", "INSERT_NGRAM_SQL", ",", "parameters", ")" ]
Adds `ngrams`, that are of size `size`, to the data store. The added `ngrams` are associated with `text_id`. :param text_id: database ID of text associated with `ngrams` :type text_id: `int` :param size: size of n-grams :type size: `int` :param ngrams: n-grams to be added :type ngrams: `collections.Counter`
[ "Adds", "ngrams", "that", "are", "of", "size", "size", "to", "the", "data", "store", "." ]
python
train
bids-standard/pybids
bids/reports/report.py
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/reports/report.py#L94-L147
def _report_subject(self, subject, **kwargs): """Write a report for a single subject. Parameters ---------- subject : :obj:`str` Subject ID. Attributes ---------- layout : :obj:`bids.layout.BIDSLayout` Layout object for a BIDS dataset. config : :obj:`dict` Configuration info for methods generation. Returns ------- description : :obj:`str` A publication-ready report of the dataset's data acquisition information. Each scan type is given its own paragraph. """ description_list = [] # Remove sess from kwargs if provided, else set sess as all available sessions = kwargs.pop('session', self.layout.get_sessions(subject=subject, **kwargs)) if not sessions: sessions = [None] elif not isinstance(sessions, list): sessions = [sessions] for ses in sessions: niftis = self.layout.get(subject=subject, extensions='nii.gz', **kwargs) if niftis: description_list.append('For session {0}:'.format(ses)) description_list += parsing.parse_niftis(self.layout, niftis, subject, self.config, session=ses) metadata = self.layout.get_metadata(niftis[0].path) else: raise Exception('No niftis for subject {0}'.format(subject)) # Assume all data were converted the same way and use the last nifti # file's json for conversion information. if 'metadata' not in vars(): raise Exception('No valid jsons found. Cannot generate final ' 'paragraph.') description = '\n\t'.join(description_list) description = description.replace('\tFor session', '\nFor session') description += '\n\n{0}'.format(parsing.final_paragraph(metadata)) return description
[ "def", "_report_subject", "(", "self", ",", "subject", ",", "*", "*", "kwargs", ")", ":", "description_list", "=", "[", "]", "# Remove sess from kwargs if provided, else set sess as all available", "sessions", "=", "kwargs", ".", "pop", "(", "'session'", ",", "self", ".", "layout", ".", "get_sessions", "(", "subject", "=", "subject", ",", "*", "*", "kwargs", ")", ")", "if", "not", "sessions", ":", "sessions", "=", "[", "None", "]", "elif", "not", "isinstance", "(", "sessions", ",", "list", ")", ":", "sessions", "=", "[", "sessions", "]", "for", "ses", "in", "sessions", ":", "niftis", "=", "self", ".", "layout", ".", "get", "(", "subject", "=", "subject", ",", "extensions", "=", "'nii.gz'", ",", "*", "*", "kwargs", ")", "if", "niftis", ":", "description_list", ".", "append", "(", "'For session {0}:'", ".", "format", "(", "ses", ")", ")", "description_list", "+=", "parsing", ".", "parse_niftis", "(", "self", ".", "layout", ",", "niftis", ",", "subject", ",", "self", ".", "config", ",", "session", "=", "ses", ")", "metadata", "=", "self", ".", "layout", ".", "get_metadata", "(", "niftis", "[", "0", "]", ".", "path", ")", "else", ":", "raise", "Exception", "(", "'No niftis for subject {0}'", ".", "format", "(", "subject", ")", ")", "# Assume all data were converted the same way and use the last nifti", "# file's json for conversion information.", "if", "'metadata'", "not", "in", "vars", "(", ")", ":", "raise", "Exception", "(", "'No valid jsons found. Cannot generate final '", "'paragraph.'", ")", "description", "=", "'\\n\\t'", ".", "join", "(", "description_list", ")", "description", "=", "description", ".", "replace", "(", "'\\tFor session'", ",", "'\\nFor session'", ")", "description", "+=", "'\\n\\n{0}'", ".", "format", "(", "parsing", ".", "final_paragraph", "(", "metadata", ")", ")", "return", "description" ]
Write a report for a single subject. Parameters ---------- subject : :obj:`str` Subject ID. Attributes ---------- layout : :obj:`bids.layout.BIDSLayout` Layout object for a BIDS dataset. config : :obj:`dict` Configuration info for methods generation. Returns ------- description : :obj:`str` A publication-ready report of the dataset's data acquisition information. Each scan type is given its own paragraph.
[ "Write", "a", "report", "for", "a", "single", "subject", "." ]
python
train
Qiskit/qiskit-terra
qiskit/validation/base.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/validation/base.py#L227-L233
def _validate(instance): """Validate the internal representation of the instance.""" try: _ = instance.schema.validate(instance.to_dict()) except ValidationError as ex: raise ModelValidationError( ex.messages, ex.field_names, ex.fields, ex.data, **ex.kwargs)
[ "def", "_validate", "(", "instance", ")", ":", "try", ":", "_", "=", "instance", ".", "schema", ".", "validate", "(", "instance", ".", "to_dict", "(", ")", ")", "except", "ValidationError", "as", "ex", ":", "raise", "ModelValidationError", "(", "ex", ".", "messages", ",", "ex", ".", "field_names", ",", "ex", ".", "fields", ",", "ex", ".", "data", ",", "*", "*", "ex", ".", "kwargs", ")" ]
Validate the internal representation of the instance.
[ "Validate", "the", "internal", "representation", "of", "the", "instance", "." ]
python
test
ljcooke/see
see/output.py
https://github.com/ljcooke/see/blob/4cbc67a31c92367977ecb4bbb1f0736fa688a6ba/see/output.py#L117-L131
def column_width(tokens): """ Return a suitable column width to display one or more strings. """ get_len = tools.display_len if PY3 else len lens = sorted(map(get_len, tokens or [])) or [0] width = lens[-1] # adjust for disproportionately long strings if width >= 18: most = lens[int(len(lens) * 0.9)] if most < width + 6: return most return width
[ "def", "column_width", "(", "tokens", ")", ":", "get_len", "=", "tools", ".", "display_len", "if", "PY3", "else", "len", "lens", "=", "sorted", "(", "map", "(", "get_len", ",", "tokens", "or", "[", "]", ")", ")", "or", "[", "0", "]", "width", "=", "lens", "[", "-", "1", "]", "# adjust for disproportionately long strings", "if", "width", ">=", "18", ":", "most", "=", "lens", "[", "int", "(", "len", "(", "lens", ")", "*", "0.9", ")", "]", "if", "most", "<", "width", "+", "6", ":", "return", "most", "return", "width" ]
Return a suitable column width to display one or more strings.
[ "Return", "a", "suitable", "column", "width", "to", "display", "one", "or", "more", "strings", "." ]
python
train
google/openhtf
openhtf/util/conf.py
https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/conf.py#L356-L366
def reset(self): """Reset the loaded state of the configuration to what it was at import. Note that this does *not* reset values set by commandline flags or loaded from --config-file (in fact, any values loaded from --config-file that have been overridden are reset to their value from --config-file). """ # Populate loaded_values with values from --config-file, if it was given. self._loaded_values = {} if self._flags.config_file is not None: self.load_from_file(self._flags.config_file, _allow_undeclared=True)
[ "def", "reset", "(", "self", ")", ":", "# Populate loaded_values with values from --config-file, if it was given.", "self", ".", "_loaded_values", "=", "{", "}", "if", "self", ".", "_flags", ".", "config_file", "is", "not", "None", ":", "self", ".", "load_from_file", "(", "self", ".", "_flags", ".", "config_file", ",", "_allow_undeclared", "=", "True", ")" ]
Reset the loaded state of the configuration to what it was at import. Note that this does *not* reset values set by commandline flags or loaded from --config-file (in fact, any values loaded from --config-file that have been overridden are reset to their value from --config-file).
[ "Reset", "the", "loaded", "state", "of", "the", "configuration", "to", "what", "it", "was", "at", "import", "." ]
python
train
decryptus/sonicprobe
sonicprobe/libs/pworkerpool.py
https://github.com/decryptus/sonicprobe/blob/72f73f3a40d2982d79ad68686e36aa31d94b76f8/sonicprobe/libs/pworkerpool.py#L188-L200
def add(self, nb = 1, name = None): """ Create one or many workers. """ for x in xrange(nb): self.count_lock.acquire() self.shared['workers'] += 1 xid = self.shared['workers'] self.kill_event.clear() self.count_lock.release() w = WorkerProc(xid, self.queue, self.shared, self.tasks, self.life_time, self.count_lock, self.kill_event) w.name = self.get_name(xid, name) w.start()
[ "def", "add", "(", "self", ",", "nb", "=", "1", ",", "name", "=", "None", ")", ":", "for", "x", "in", "xrange", "(", "nb", ")", ":", "self", ".", "count_lock", ".", "acquire", "(", ")", "self", ".", "shared", "[", "'workers'", "]", "+=", "1", "xid", "=", "self", ".", "shared", "[", "'workers'", "]", "self", ".", "kill_event", ".", "clear", "(", ")", "self", ".", "count_lock", ".", "release", "(", ")", "w", "=", "WorkerProc", "(", "xid", ",", "self", ".", "queue", ",", "self", ".", "shared", ",", "self", ".", "tasks", ",", "self", ".", "life_time", ",", "self", ".", "count_lock", ",", "self", ".", "kill_event", ")", "w", ".", "name", "=", "self", ".", "get_name", "(", "xid", ",", "name", ")", "w", ".", "start", "(", ")" ]
Create one or many workers.
[ "Create", "one", "or", "many", "workers", "." ]
python
train
noobermin/pys
pys/__init__.py
https://github.com/noobermin/pys/blob/e01b74210c65eb96d019bb42e0a3c9e6676da943/pys/__init__.py#L130-L138
def parse_ctuple(s,length=2): '''parse a string of acceptable colors into matplotlib, that is, either strings, or three tuples of rgb. Don't quote strings. ''' if parse_utuple(s, colrx_s, length=length) is None: raise ValueError("{} is not a valid color tuple.".format(s)); #quote strings s=quote_subs(s,colorfix=True); return evalt(s);
[ "def", "parse_ctuple", "(", "s", ",", "length", "=", "2", ")", ":", "if", "parse_utuple", "(", "s", ",", "colrx_s", ",", "length", "=", "length", ")", "is", "None", ":", "raise", "ValueError", "(", "\"{} is not a valid color tuple.\"", ".", "format", "(", "s", ")", ")", "#quote strings", "s", "=", "quote_subs", "(", "s", ",", "colorfix", "=", "True", ")", "return", "evalt", "(", "s", ")" ]
parse a string of acceptable colors into matplotlib, that is, either strings, or three tuples of rgb. Don't quote strings.
[ "parse", "a", "string", "of", "acceptable", "colors", "into", "matplotlib", "that", "is", "either", "strings", "or", "three", "tuples", "of", "rgb", ".", "Don", "t", "quote", "strings", "." ]
python
train
google/dotty
efilter/scope.py
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/scope.py#L104-L119
def getmembers_runtime(self): """Gets members (vars) from all scopes using ONLY runtime information. You most likely want to use ScopeStack.getmembers instead. Returns: Set of available vars. Raises: NotImplementedError if any scope fails to implement 'getmembers'. """ names = set() for scope in self.scopes: names.update(structured.getmembers_runtime(scope)) return names
[ "def", "getmembers_runtime", "(", "self", ")", ":", "names", "=", "set", "(", ")", "for", "scope", "in", "self", ".", "scopes", ":", "names", ".", "update", "(", "structured", ".", "getmembers_runtime", "(", "scope", ")", ")", "return", "names" ]
Gets members (vars) from all scopes using ONLY runtime information. You most likely want to use ScopeStack.getmembers instead. Returns: Set of available vars. Raises: NotImplementedError if any scope fails to implement 'getmembers'.
[ "Gets", "members", "(", "vars", ")", "from", "all", "scopes", "using", "ONLY", "runtime", "information", "." ]
python
train
asweigart/pysimplevalidate
src/pysimplevalidate/__init__.py
https://github.com/asweigart/pysimplevalidate/blob/3ca27228abb7355d14bbf8abc225c63366379e44/src/pysimplevalidate/__init__.py#L1122-L1180
def validateYesNo(value, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, yesVal='yes', noVal='no', caseSensitive=False, excMsg=None): """Raises ValidationException if value is not a yes or no response. Returns the yesVal or noVal argument, not value. Note that value can be any case (by default) and can also just match the * value (str): The value being validated as an email address. * blank (bool): If True, a blank string will be accepted. Defaults to False. * strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped. * allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers. * blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation. * caseSensitive (bool): Determines if value must match the case of yesVal and noVal. Defaults to False. * excMsg (str): A custom message to use in the raised ValidationException. >>> import pysimplevalidate as pysv >>> pysv.validateYesNo('y') 'yes' >>> pysv.validateYesNo('YES') 'yes' >>> pysv.validateYesNo('No') 'no' >>> pysv.validateYesNo('OUI', yesVal='oui', noVal='no') 'oui' """ # Validate parameters. TODO - can probably improve this to remove the duplication. _validateGenericParameters(blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes) returnNow, value = _prevalidationCheck(value, blank, strip, allowlistRegexes, blocklistRegexes, excMsg) if returnNow: return value yesVal = str(yesVal) noVal = str(noVal) if len(yesVal) == 0: raise PySimpleValidateException('yesVal argument must be a non-empty string.') if len(noVal) == 0: raise PySimpleValidateException('noVal argument must be a non-empty string.') if (yesVal == noVal) or (not caseSensitive and yesVal.upper() == noVal.upper()): raise PySimpleValidateException('yesVal and noVal arguments must be different.') if (yesVal[0] == noVal[0]) or (not caseSensitive and yesVal[0].upper() == noVal[0].upper()): raise PySimpleValidateException('first character of yesVal and noVal arguments must be different') returnNow, value = _prevalidationCheck(value, blank, strip, allowlistRegexes, blocklistRegexes, excMsg) if returnNow: return value if caseSensitive: if value in (yesVal, yesVal[0]): return yesVal elif value in (noVal, noVal[0]): return noVal else: if value.upper() in (yesVal.upper(), yesVal[0].upper()): return yesVal elif value.upper() in (noVal.upper(), noVal[0].upper()): return noVal _raiseValidationException(_('%r is not a valid %s/%s response.') % (_errstr(value), yesVal, noVal), excMsg)
[ "def", "validateYesNo", "(", "value", ",", "blank", "=", "False", ",", "strip", "=", "None", ",", "allowlistRegexes", "=", "None", ",", "blocklistRegexes", "=", "None", ",", "yesVal", "=", "'yes'", ",", "noVal", "=", "'no'", ",", "caseSensitive", "=", "False", ",", "excMsg", "=", "None", ")", ":", "# Validate parameters. TODO - can probably improve this to remove the duplication.", "_validateGenericParameters", "(", "blank", "=", "blank", ",", "strip", "=", "strip", ",", "allowlistRegexes", "=", "allowlistRegexes", ",", "blocklistRegexes", "=", "blocklistRegexes", ")", "returnNow", ",", "value", "=", "_prevalidationCheck", "(", "value", ",", "blank", ",", "strip", ",", "allowlistRegexes", ",", "blocklistRegexes", ",", "excMsg", ")", "if", "returnNow", ":", "return", "value", "yesVal", "=", "str", "(", "yesVal", ")", "noVal", "=", "str", "(", "noVal", ")", "if", "len", "(", "yesVal", ")", "==", "0", ":", "raise", "PySimpleValidateException", "(", "'yesVal argument must be a non-empty string.'", ")", "if", "len", "(", "noVal", ")", "==", "0", ":", "raise", "PySimpleValidateException", "(", "'noVal argument must be a non-empty string.'", ")", "if", "(", "yesVal", "==", "noVal", ")", "or", "(", "not", "caseSensitive", "and", "yesVal", ".", "upper", "(", ")", "==", "noVal", ".", "upper", "(", ")", ")", ":", "raise", "PySimpleValidateException", "(", "'yesVal and noVal arguments must be different.'", ")", "if", "(", "yesVal", "[", "0", "]", "==", "noVal", "[", "0", "]", ")", "or", "(", "not", "caseSensitive", "and", "yesVal", "[", "0", "]", ".", "upper", "(", ")", "==", "noVal", "[", "0", "]", ".", "upper", "(", ")", ")", ":", "raise", "PySimpleValidateException", "(", "'first character of yesVal and noVal arguments must be different'", ")", "returnNow", ",", "value", "=", "_prevalidationCheck", "(", "value", ",", "blank", ",", "strip", ",", "allowlistRegexes", ",", "blocklistRegexes", ",", "excMsg", ")", "if", "returnNow", ":", "return", "value", "if", "caseSensitive", ":", "if", "value", "in", "(", "yesVal", ",", "yesVal", "[", "0", "]", ")", ":", "return", "yesVal", "elif", "value", "in", "(", "noVal", ",", "noVal", "[", "0", "]", ")", ":", "return", "noVal", "else", ":", "if", "value", ".", "upper", "(", ")", "in", "(", "yesVal", ".", "upper", "(", ")", ",", "yesVal", "[", "0", "]", ".", "upper", "(", ")", ")", ":", "return", "yesVal", "elif", "value", ".", "upper", "(", ")", "in", "(", "noVal", ".", "upper", "(", ")", ",", "noVal", "[", "0", "]", ".", "upper", "(", ")", ")", ":", "return", "noVal", "_raiseValidationException", "(", "_", "(", "'%r is not a valid %s/%s response.'", ")", "%", "(", "_errstr", "(", "value", ")", ",", "yesVal", ",", "noVal", ")", ",", "excMsg", ")" ]
Raises ValidationException if value is not a yes or no response. Returns the yesVal or noVal argument, not value. Note that value can be any case (by default) and can also just match the * value (str): The value being validated as an email address. * blank (bool): If True, a blank string will be accepted. Defaults to False. * strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped. * allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers. * blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation. * caseSensitive (bool): Determines if value must match the case of yesVal and noVal. Defaults to False. * excMsg (str): A custom message to use in the raised ValidationException. >>> import pysimplevalidate as pysv >>> pysv.validateYesNo('y') 'yes' >>> pysv.validateYesNo('YES') 'yes' >>> pysv.validateYesNo('No') 'no' >>> pysv.validateYesNo('OUI', yesVal='oui', noVal='no') 'oui'
[ "Raises", "ValidationException", "if", "value", "is", "not", "a", "yes", "or", "no", "response", ".", "Returns", "the", "yesVal", "or", "noVal", "argument", "not", "value", "." ]
python
train
hydpy-dev/hydpy
hydpy/auxs/anntools.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/auxs/anntools.py#L1471-L1473
def shape(self) -> Tuple[int, ...]: """The shape of array |anntools.SeasonalANN.ratios|.""" return tuple(int(sub) for sub in self.ratios.shape)
[ "def", "shape", "(", "self", ")", "->", "Tuple", "[", "int", ",", "...", "]", ":", "return", "tuple", "(", "int", "(", "sub", ")", "for", "sub", "in", "self", ".", "ratios", ".", "shape", ")" ]
The shape of array |anntools.SeasonalANN.ratios|.
[ "The", "shape", "of", "array", "|anntools", ".", "SeasonalANN", ".", "ratios|", "." ]
python
train
inveniosoftware/invenio-records-rest
invenio_records_rest/serializers/jsonld.py
https://github.com/inveniosoftware/invenio-records-rest/blob/e7b63c5f72cef03d06d3f1b4c12c0d37e3a628b9/invenio_records_rest/serializers/jsonld.py#L70-L76
def transform_search_hit(self, pid, record_hit, links_factory=None, **kwargs): """Transform search result hit into an intermediate representation.""" result = super(JSONLDTransformerMixin, self).transform_search_hit( pid, record_hit, links_factory, **kwargs ) return self.transform_jsonld(result)
[ "def", "transform_search_hit", "(", "self", ",", "pid", ",", "record_hit", ",", "links_factory", "=", "None", ",", "*", "*", "kwargs", ")", ":", "result", "=", "super", "(", "JSONLDTransformerMixin", ",", "self", ")", ".", "transform_search_hit", "(", "pid", ",", "record_hit", ",", "links_factory", ",", "*", "*", "kwargs", ")", "return", "self", ".", "transform_jsonld", "(", "result", ")" ]
Transform search result hit into an intermediate representation.
[ "Transform", "search", "result", "hit", "into", "an", "intermediate", "representation", "." ]
python
train
ArchiveTeam/wpull
wpull/protocol/http/robots.py
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/protocol/http/robots.py#L61-L96
def fetch_robots_txt(self, request: Request, file=None): '''Fetch the robots.txt file for the request. Coroutine. ''' url_info = request.url_info url = URLInfo.parse('{0}://{1}/robots.txt'.format( url_info.scheme, url_info.hostname_with_port)).url if not file: file = wpull.body.new_temp_file(os.getcwd(), hint='robots') with contextlib.closing(file): request = Request(url) session = self._web_client.session(request) while not session.done(): wpull.util.truncate_file(file.name) try: response = yield from session.start() yield from session.download(file=file) except ProtocolError: self._accept_as_blank(url_info) return status_code = response.status_code if 500 <= status_code <= 599: raise ServerError('Server returned error for robots.txt.') if status_code == 200: self._read_content(response, url_info) else: self._accept_as_blank(url_info)
[ "def", "fetch_robots_txt", "(", "self", ",", "request", ":", "Request", ",", "file", "=", "None", ")", ":", "url_info", "=", "request", ".", "url_info", "url", "=", "URLInfo", ".", "parse", "(", "'{0}://{1}/robots.txt'", ".", "format", "(", "url_info", ".", "scheme", ",", "url_info", ".", "hostname_with_port", ")", ")", ".", "url", "if", "not", "file", ":", "file", "=", "wpull", ".", "body", ".", "new_temp_file", "(", "os", ".", "getcwd", "(", ")", ",", "hint", "=", "'robots'", ")", "with", "contextlib", ".", "closing", "(", "file", ")", ":", "request", "=", "Request", "(", "url", ")", "session", "=", "self", ".", "_web_client", ".", "session", "(", "request", ")", "while", "not", "session", ".", "done", "(", ")", ":", "wpull", ".", "util", ".", "truncate_file", "(", "file", ".", "name", ")", "try", ":", "response", "=", "yield", "from", "session", ".", "start", "(", ")", "yield", "from", "session", ".", "download", "(", "file", "=", "file", ")", "except", "ProtocolError", ":", "self", ".", "_accept_as_blank", "(", "url_info", ")", "return", "status_code", "=", "response", ".", "status_code", "if", "500", "<=", "status_code", "<=", "599", ":", "raise", "ServerError", "(", "'Server returned error for robots.txt.'", ")", "if", "status_code", "==", "200", ":", "self", ".", "_read_content", "(", "response", ",", "url_info", ")", "else", ":", "self", ".", "_accept_as_blank", "(", "url_info", ")" ]
Fetch the robots.txt file for the request. Coroutine.
[ "Fetch", "the", "robots", ".", "txt", "file", "for", "the", "request", "." ]
python
train
RomelTorres/alpha_vantage
alpha_vantage/alphavantage.py
https://github.com/RomelTorres/alpha_vantage/blob/4e0b5057e520e3e3de69cf947301765817290121/alpha_vantage/alphavantage.py#L214-L241
def map_to_matype(self, matype): """ Convert to the alpha vantage math type integer. It returns an integer correspondent to the type of math to apply to a function. It raises ValueError if an integer greater than the supported math types is given. Keyword Arguments: matype: The math type of the alpha vantage api. It accepts integers or a string representing the math type. * 0 = Simple Moving Average (SMA), * 1 = Exponential Moving Average (EMA), * 2 = Weighted Moving Average (WMA), * 3 = Double Exponential Moving Average (DEMA), * 4 = Triple Exponential Moving Average (TEMA), * 5 = Triangular Moving Average (TRIMA), * 6 = T3 Moving Average, * 7 = Kaufman Adaptive Moving Average (KAMA), * 8 = MESA Adaptive Moving Average (MAMA) """ # Check if it is an integer or a string try: value = int(matype) if abs(value) > len(AlphaVantage._ALPHA_VANTAGE_MATH_MAP): raise ValueError("The value {} is not supported".format(value)) except ValueError: value = AlphaVantage._ALPHA_VANTAGE_MATH_MAP.index(matype) return value
[ "def", "map_to_matype", "(", "self", ",", "matype", ")", ":", "# Check if it is an integer or a string", "try", ":", "value", "=", "int", "(", "matype", ")", "if", "abs", "(", "value", ")", ">", "len", "(", "AlphaVantage", ".", "_ALPHA_VANTAGE_MATH_MAP", ")", ":", "raise", "ValueError", "(", "\"The value {} is not supported\"", ".", "format", "(", "value", ")", ")", "except", "ValueError", ":", "value", "=", "AlphaVantage", ".", "_ALPHA_VANTAGE_MATH_MAP", ".", "index", "(", "matype", ")", "return", "value" ]
Convert to the alpha vantage math type integer. It returns an integer correspondent to the type of math to apply to a function. It raises ValueError if an integer greater than the supported math types is given. Keyword Arguments: matype: The math type of the alpha vantage api. It accepts integers or a string representing the math type. * 0 = Simple Moving Average (SMA), * 1 = Exponential Moving Average (EMA), * 2 = Weighted Moving Average (WMA), * 3 = Double Exponential Moving Average (DEMA), * 4 = Triple Exponential Moving Average (TEMA), * 5 = Triangular Moving Average (TRIMA), * 6 = T3 Moving Average, * 7 = Kaufman Adaptive Moving Average (KAMA), * 8 = MESA Adaptive Moving Average (MAMA)
[ "Convert", "to", "the", "alpha", "vantage", "math", "type", "integer", ".", "It", "returns", "an", "integer", "correspondent", "to", "the", "type", "of", "math", "to", "apply", "to", "a", "function", ".", "It", "raises", "ValueError", "if", "an", "integer", "greater", "than", "the", "supported", "math", "types", "is", "given", "." ]
python
train
saltstack/salt
salt/utils/openstack/neutron.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/openstack/neutron.py#L872-L878
def delete_firewall_rule(self, firewall_rule): ''' Deletes the specified firewall rule ''' firewall_rule_id = self._find_firewall_rule_id(firewall_rule) ret = self.network_conn.delete_firewall_rule(firewall_rule_id) return ret if ret else True
[ "def", "delete_firewall_rule", "(", "self", ",", "firewall_rule", ")", ":", "firewall_rule_id", "=", "self", ".", "_find_firewall_rule_id", "(", "firewall_rule", ")", "ret", "=", "self", ".", "network_conn", ".", "delete_firewall_rule", "(", "firewall_rule_id", ")", "return", "ret", "if", "ret", "else", "True" ]
Deletes the specified firewall rule
[ "Deletes", "the", "specified", "firewall", "rule" ]
python
train
bwohlberg/sporco
sporco/fista/fista.py
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/fista/fista.py#L397-L407
def proximal_step(self, grad=None): """Compute proximal update (gradient descent + regularization).""" if grad is None: grad = self.eval_grad() V = self.Y - (1. / self.L) * grad self.X = self.eval_proxop(V) return grad
[ "def", "proximal_step", "(", "self", ",", "grad", "=", "None", ")", ":", "if", "grad", "is", "None", ":", "grad", "=", "self", ".", "eval_grad", "(", ")", "V", "=", "self", ".", "Y", "-", "(", "1.", "/", "self", ".", "L", ")", "*", "grad", "self", ".", "X", "=", "self", ".", "eval_proxop", "(", "V", ")", "return", "grad" ]
Compute proximal update (gradient descent + regularization).
[ "Compute", "proximal", "update", "(", "gradient", "descent", "+", "regularization", ")", "." ]
python
train
globality-corp/microcosm-flask
microcosm_flask/audit.py
https://github.com/globality-corp/microcosm-flask/blob/c2eaf57f03e7d041eea343751a4a90fcc80df418/microcosm_flask/audit.py#L59-L65
def should_skip_logging(func): """ Should we skip logging for this handler? """ disabled = strtobool(request.headers.get("x-request-nolog", "false")) return disabled or getattr(func, SKIP_LOGGING, False)
[ "def", "should_skip_logging", "(", "func", ")", ":", "disabled", "=", "strtobool", "(", "request", ".", "headers", ".", "get", "(", "\"x-request-nolog\"", ",", "\"false\"", ")", ")", "return", "disabled", "or", "getattr", "(", "func", ",", "SKIP_LOGGING", ",", "False", ")" ]
Should we skip logging for this handler?
[ "Should", "we", "skip", "logging", "for", "this", "handler?" ]
python
train
apache/spark
python/pyspark/heapq3.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/heapq3.py#L477-L481
def _heapify_max(x): """Transform list into a maxheap, in-place, in O(len(x)) time.""" n = len(x) for i in reversed(range(n//2)): _siftup_max(x, i)
[ "def", "_heapify_max", "(", "x", ")", ":", "n", "=", "len", "(", "x", ")", "for", "i", "in", "reversed", "(", "range", "(", "n", "//", "2", ")", ")", ":", "_siftup_max", "(", "x", ",", "i", ")" ]
Transform list into a maxheap, in-place, in O(len(x)) time.
[ "Transform", "list", "into", "a", "maxheap", "in", "-", "place", "in", "O", "(", "len", "(", "x", "))", "time", "." ]
python
train
alpha-xone/xbbg
xbbg/io/storage.py
https://github.com/alpha-xone/xbbg/blob/70226eb19a72a08144b5d8cea9db4913200f7bc5/xbbg/io/storage.py#L39-L134
def ref_file( ticker: str, fld: str, has_date=False, cache=False, ext='parq', **kwargs ) -> str: """ Data file location for Bloomberg reference data Args: ticker: ticker name fld: field has_date: whether add current date to data file cache: if has_date is True, whether to load file from latest cached ext: file extension **kwargs: other overrides passed to ref function Returns: file location Examples: >>> import shutil >>> >>> os.environ['BBG_ROOT'] = '' >>> ref_file('BLT LN Equity', fld='Crncy') == '' True >>> os.environ['BBG_ROOT'] = '/data/bbg' >>> ref_file('BLT LN Equity', fld='Crncy', cache=True) '/data/bbg/Equity/BLT LN Equity/Crncy/ovrd=None.parq' >>> ref_file('BLT LN Equity', fld='Crncy') '' >>> cur_dt = utils.cur_time(tz=utils.DEFAULT_TZ) >>> ref_file( ... 'BLT LN Equity', fld='DVD_Hist_All', has_date=True, cache=True, ... ).replace(cur_dt, '[cur_date]') '/data/bbg/Equity/BLT LN Equity/DVD_Hist_All/asof=[cur_date], ovrd=None.parq' >>> ref_file( ... 'BLT LN Equity', fld='DVD_Hist_All', has_date=True, ... cache=True, DVD_Start_Dt='20180101', ... ).replace(cur_dt, '[cur_date]')[:-5] '/data/bbg/Equity/BLT LN Equity/DVD_Hist_All/asof=[cur_date], DVD_Start_Dt=20180101' >>> sample = 'asof=2018-11-02, DVD_Start_Dt=20180101, DVD_End_Dt=20180501.pkl' >>> root_path = 'xbbg/tests/data' >>> sub_path = f'{root_path}/Equity/AAPL US Equity/DVD_Hist_All' >>> os.environ['BBG_ROOT'] = root_path >>> for tmp_file in files.all_files(sub_path): os.remove(tmp_file) >>> files.create_folder(sub_path) >>> sample in shutil.copy(f'{root_path}/{sample}', sub_path) True >>> new_file = ref_file( ... 'AAPL US Equity', 'DVD_Hist_All', DVD_Start_Dt='20180101', ... has_date=True, cache=True, ext='pkl' ... ) >>> new_file.split('/')[-1] == f'asof={cur_dt}, DVD_Start_Dt=20180101.pkl' True >>> old_file = 'asof=2018-11-02, DVD_Start_Dt=20180101, DVD_End_Dt=20180501.pkl' >>> old_full = '/'.join(new_file.split('/')[:-1] + [old_file]) >>> updated_file = old_full.replace('2018-11-02', cur_dt) >>> updated_file in shutil.copy(old_full, updated_file) True >>> exist_file = ref_file( ... 'AAPL US Equity', 'DVD_Hist_All', DVD_Start_Dt='20180101', ... has_date=True, cache=True, ext='pkl' ... ) >>> exist_file == updated_file False >>> exist_file = ref_file( ... 'AAPL US Equity', 'DVD_Hist_All', DVD_Start_Dt='20180101', ... DVD_End_Dt='20180501', has_date=True, cache=True, ext='pkl' ... ) >>> exist_file == updated_file True """ data_path = os.environ.get(assist.BBG_ROOT, '').replace('\\', '/') if (not data_path) or (not cache): return '' proper_ticker = ticker.replace('/', '_') cache_days = kwargs.pop('cache_days', 10) root = f'{data_path}/{ticker.split()[-1]}/{proper_ticker}/{fld}' if len(kwargs) > 0: info = utils.to_str(kwargs)[1:-1].replace('|', '_') else: info = 'ovrd=None' # Check date info if has_date: cur_dt = utils.cur_time() missing = f'{root}/asof={cur_dt}, {info}.{ext}' to_find = re.compile(rf'{root}/asof=(.*), {info}\.pkl') cur_files = list(filter(to_find.match, sorted( files.all_files(path_name=root, keyword=info, ext=ext) ))) if len(cur_files) > 0: upd_dt = to_find.match(cur_files[-1]).group(1) diff = pd.Timestamp('today') - pd.Timestamp(upd_dt) if diff >= pd.Timedelta(days=cache_days): return missing return sorted(cur_files)[-1] else: return missing else: return f'{root}/{info}.{ext}'
[ "def", "ref_file", "(", "ticker", ":", "str", ",", "fld", ":", "str", ",", "has_date", "=", "False", ",", "cache", "=", "False", ",", "ext", "=", "'parq'", ",", "*", "*", "kwargs", ")", "->", "str", ":", "data_path", "=", "os", ".", "environ", ".", "get", "(", "assist", ".", "BBG_ROOT", ",", "''", ")", ".", "replace", "(", "'\\\\'", ",", "'/'", ")", "if", "(", "not", "data_path", ")", "or", "(", "not", "cache", ")", ":", "return", "''", "proper_ticker", "=", "ticker", ".", "replace", "(", "'/'", ",", "'_'", ")", "cache_days", "=", "kwargs", ".", "pop", "(", "'cache_days'", ",", "10", ")", "root", "=", "f'{data_path}/{ticker.split()[-1]}/{proper_ticker}/{fld}'", "if", "len", "(", "kwargs", ")", ">", "0", ":", "info", "=", "utils", ".", "to_str", "(", "kwargs", ")", "[", "1", ":", "-", "1", "]", ".", "replace", "(", "'|'", ",", "'_'", ")", "else", ":", "info", "=", "'ovrd=None'", "# Check date info", "if", "has_date", ":", "cur_dt", "=", "utils", ".", "cur_time", "(", ")", "missing", "=", "f'{root}/asof={cur_dt}, {info}.{ext}'", "to_find", "=", "re", ".", "compile", "(", "rf'{root}/asof=(.*), {info}\\.pkl'", ")", "cur_files", "=", "list", "(", "filter", "(", "to_find", ".", "match", ",", "sorted", "(", "files", ".", "all_files", "(", "path_name", "=", "root", ",", "keyword", "=", "info", ",", "ext", "=", "ext", ")", ")", ")", ")", "if", "len", "(", "cur_files", ")", ">", "0", ":", "upd_dt", "=", "to_find", ".", "match", "(", "cur_files", "[", "-", "1", "]", ")", ".", "group", "(", "1", ")", "diff", "=", "pd", ".", "Timestamp", "(", "'today'", ")", "-", "pd", ".", "Timestamp", "(", "upd_dt", ")", "if", "diff", ">=", "pd", ".", "Timedelta", "(", "days", "=", "cache_days", ")", ":", "return", "missing", "return", "sorted", "(", "cur_files", ")", "[", "-", "1", "]", "else", ":", "return", "missing", "else", ":", "return", "f'{root}/{info}.{ext}'" ]
Data file location for Bloomberg reference data Args: ticker: ticker name fld: field has_date: whether add current date to data file cache: if has_date is True, whether to load file from latest cached ext: file extension **kwargs: other overrides passed to ref function Returns: file location Examples: >>> import shutil >>> >>> os.environ['BBG_ROOT'] = '' >>> ref_file('BLT LN Equity', fld='Crncy') == '' True >>> os.environ['BBG_ROOT'] = '/data/bbg' >>> ref_file('BLT LN Equity', fld='Crncy', cache=True) '/data/bbg/Equity/BLT LN Equity/Crncy/ovrd=None.parq' >>> ref_file('BLT LN Equity', fld='Crncy') '' >>> cur_dt = utils.cur_time(tz=utils.DEFAULT_TZ) >>> ref_file( ... 'BLT LN Equity', fld='DVD_Hist_All', has_date=True, cache=True, ... ).replace(cur_dt, '[cur_date]') '/data/bbg/Equity/BLT LN Equity/DVD_Hist_All/asof=[cur_date], ovrd=None.parq' >>> ref_file( ... 'BLT LN Equity', fld='DVD_Hist_All', has_date=True, ... cache=True, DVD_Start_Dt='20180101', ... ).replace(cur_dt, '[cur_date]')[:-5] '/data/bbg/Equity/BLT LN Equity/DVD_Hist_All/asof=[cur_date], DVD_Start_Dt=20180101' >>> sample = 'asof=2018-11-02, DVD_Start_Dt=20180101, DVD_End_Dt=20180501.pkl' >>> root_path = 'xbbg/tests/data' >>> sub_path = f'{root_path}/Equity/AAPL US Equity/DVD_Hist_All' >>> os.environ['BBG_ROOT'] = root_path >>> for tmp_file in files.all_files(sub_path): os.remove(tmp_file) >>> files.create_folder(sub_path) >>> sample in shutil.copy(f'{root_path}/{sample}', sub_path) True >>> new_file = ref_file( ... 'AAPL US Equity', 'DVD_Hist_All', DVD_Start_Dt='20180101', ... has_date=True, cache=True, ext='pkl' ... ) >>> new_file.split('/')[-1] == f'asof={cur_dt}, DVD_Start_Dt=20180101.pkl' True >>> old_file = 'asof=2018-11-02, DVD_Start_Dt=20180101, DVD_End_Dt=20180501.pkl' >>> old_full = '/'.join(new_file.split('/')[:-1] + [old_file]) >>> updated_file = old_full.replace('2018-11-02', cur_dt) >>> updated_file in shutil.copy(old_full, updated_file) True >>> exist_file = ref_file( ... 'AAPL US Equity', 'DVD_Hist_All', DVD_Start_Dt='20180101', ... has_date=True, cache=True, ext='pkl' ... ) >>> exist_file == updated_file False >>> exist_file = ref_file( ... 'AAPL US Equity', 'DVD_Hist_All', DVD_Start_Dt='20180101', ... DVD_End_Dt='20180501', has_date=True, cache=True, ext='pkl' ... ) >>> exist_file == updated_file True
[ "Data", "file", "location", "for", "Bloomberg", "reference", "data" ]
python
valid
hongtaocai/googlefinance
googlefinance/__init__.py
https://github.com/hongtaocai/googlefinance/blob/9f703d8d4e00d645320d49186eee4520341ec273/googlefinance/__init__.py#L84-L105
def getQuotes(symbols): ''' get real-time quotes (index, last trade price, last trade time, etc) for stocks, using google api: http://finance.google.com/finance/info?client=ig&q=symbols Unlike python package 'yahoo-finance' (15 min delay), There is no delay for NYSE and NASDAQ stocks in 'googlefinance' package. example: quotes = getQuotes('AAPL') return: [{u'Index': u'NASDAQ', u'LastTradeWithCurrency': u'129.09', u'LastTradeDateTime': u'2015-03-02T16:04:29Z', u'LastTradePrice': u'129.09', u'Yield': u'1.46', u'LastTradeTime': u'4:04PM EST', u'LastTradeDateTimeLong': u'Mar 2, 4:04PM EST', u'Dividend': u'0.47', u'StockSymbol': u'AAPL', u'ID': u'22144'}] quotes = getQuotes(['AAPL', 'GOOG']) return: [{u'Index': u'NASDAQ', u'LastTradeWithCurrency': u'129.09', u'LastTradeDateTime': u'2015-03-02T16:04:29Z', u'LastTradePrice': u'129.09', u'Yield': u'1.46', u'LastTradeTime': u'4:04PM EST', u'LastTradeDateTimeLong': u'Mar 2, 4:04PM EST', u'Dividend': u'0.47', u'StockSymbol': u'AAPL', u'ID': u'22144'}, {u'Index': u'NASDAQ', u'LastTradeWithCurrency': u'571.34', u'LastTradeDateTime': u'2015-03-02T16:04:29Z', u'LastTradePrice': u'571.34', u'Yield': u'', u'LastTradeTime': u'4:04PM EST', u'LastTradeDateTimeLong': u'Mar 2, 4:04PM EST', u'Dividend': u'', u'StockSymbol': u'GOOG', u'ID': u'304466804484872'}] :param symbols: a single symbol or a list of stock symbols :return: real-time quotes list ''' if type(symbols) == type('str'): symbols = [symbols] content = json.loads(request(symbols)) return replaceKeys(content);
[ "def", "getQuotes", "(", "symbols", ")", ":", "if", "type", "(", "symbols", ")", "==", "type", "(", "'str'", ")", ":", "symbols", "=", "[", "symbols", "]", "content", "=", "json", ".", "loads", "(", "request", "(", "symbols", ")", ")", "return", "replaceKeys", "(", "content", ")" ]
get real-time quotes (index, last trade price, last trade time, etc) for stocks, using google api: http://finance.google.com/finance/info?client=ig&q=symbols Unlike python package 'yahoo-finance' (15 min delay), There is no delay for NYSE and NASDAQ stocks in 'googlefinance' package. example: quotes = getQuotes('AAPL') return: [{u'Index': u'NASDAQ', u'LastTradeWithCurrency': u'129.09', u'LastTradeDateTime': u'2015-03-02T16:04:29Z', u'LastTradePrice': u'129.09', u'Yield': u'1.46', u'LastTradeTime': u'4:04PM EST', u'LastTradeDateTimeLong': u'Mar 2, 4:04PM EST', u'Dividend': u'0.47', u'StockSymbol': u'AAPL', u'ID': u'22144'}] quotes = getQuotes(['AAPL', 'GOOG']) return: [{u'Index': u'NASDAQ', u'LastTradeWithCurrency': u'129.09', u'LastTradeDateTime': u'2015-03-02T16:04:29Z', u'LastTradePrice': u'129.09', u'Yield': u'1.46', u'LastTradeTime': u'4:04PM EST', u'LastTradeDateTimeLong': u'Mar 2, 4:04PM EST', u'Dividend': u'0.47', u'StockSymbol': u'AAPL', u'ID': u'22144'}, {u'Index': u'NASDAQ', u'LastTradeWithCurrency': u'571.34', u'LastTradeDateTime': u'2015-03-02T16:04:29Z', u'LastTradePrice': u'571.34', u'Yield': u'', u'LastTradeTime': u'4:04PM EST', u'LastTradeDateTimeLong': u'Mar 2, 4:04PM EST', u'Dividend': u'', u'StockSymbol': u'GOOG', u'ID': u'304466804484872'}] :param symbols: a single symbol or a list of stock symbols :return: real-time quotes list
[ "get", "real", "-", "time", "quotes", "(", "index", "last", "trade", "price", "last", "trade", "time", "etc", ")", "for", "stocks", "using", "google", "api", ":", "http", ":", "//", "finance", ".", "google", ".", "com", "/", "finance", "/", "info?client", "=", "ig&q", "=", "symbols" ]
python
train
ToucanToco/toucan-data-sdk
toucan_data_sdk/sdk.py
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/sdk.py#L194-L213
def extract(data): """ Args: data (str | byte): Returns: dict: Dict[str, DataFrame] """ _, tmp_file_path = tempfile.mkstemp() try: with open(tmp_file_path, 'wb') as tmp_file: tmp_file.write(data) if zipfile.is_zipfile(tmp_file_path): return extract_zip(tmp_file_path) else: raise DataSdkError('Unsupported file type') finally: shutil.rmtree(tmp_file_path, ignore_errors=True)
[ "def", "extract", "(", "data", ")", ":", "_", ",", "tmp_file_path", "=", "tempfile", ".", "mkstemp", "(", ")", "try", ":", "with", "open", "(", "tmp_file_path", ",", "'wb'", ")", "as", "tmp_file", ":", "tmp_file", ".", "write", "(", "data", ")", "if", "zipfile", ".", "is_zipfile", "(", "tmp_file_path", ")", ":", "return", "extract_zip", "(", "tmp_file_path", ")", "else", ":", "raise", "DataSdkError", "(", "'Unsupported file type'", ")", "finally", ":", "shutil", ".", "rmtree", "(", "tmp_file_path", ",", "ignore_errors", "=", "True", ")" ]
Args: data (str | byte): Returns: dict: Dict[str, DataFrame]
[ "Args", ":", "data", "(", "str", "|", "byte", ")", ":" ]
python
test
giancosta86/Iris
info/gianlucacosta/iris/versioning.py
https://github.com/giancosta86/Iris/blob/b3d92cca5cce3653519bd032346b211c46a57d05/info/gianlucacosta/iris/versioning.py#L117-L143
def getFriendlyString(self): """ Returns the version, printed in a friendly way. More precisely, it trims trailing zero components. """ if self._friendlyString is not None: return self._friendlyString resultComponents = [ self.getIntMajor(), self.getIntMinor(), self.getIntBuild(), self.getIntRevision() ] for i in range(len(resultComponents) - 1, -1, -1): if resultComponents[i] == 0: del resultComponents[i] else: break result = ".".join(map(str, resultComponents)) self._friendlyString = result return result
[ "def", "getFriendlyString", "(", "self", ")", ":", "if", "self", ".", "_friendlyString", "is", "not", "None", ":", "return", "self", ".", "_friendlyString", "resultComponents", "=", "[", "self", ".", "getIntMajor", "(", ")", ",", "self", ".", "getIntMinor", "(", ")", ",", "self", ".", "getIntBuild", "(", ")", ",", "self", ".", "getIntRevision", "(", ")", "]", "for", "i", "in", "range", "(", "len", "(", "resultComponents", ")", "-", "1", ",", "-", "1", ",", "-", "1", ")", ":", "if", "resultComponents", "[", "i", "]", "==", "0", ":", "del", "resultComponents", "[", "i", "]", "else", ":", "break", "result", "=", "\".\"", ".", "join", "(", "map", "(", "str", ",", "resultComponents", ")", ")", "self", ".", "_friendlyString", "=", "result", "return", "result" ]
Returns the version, printed in a friendly way. More precisely, it trims trailing zero components.
[ "Returns", "the", "version", "printed", "in", "a", "friendly", "way", "." ]
python
train
HazyResearch/metal
metal/logging/logger.py
https://github.com/HazyResearch/metal/blob/c24e3772e25ac6d0917b8b7af4c1bcb92928f84a/metal/logging/logger.py#L180-L203
def print_to_screen(self, metrics_dict): """Print all metrics in metrics_dict to screen""" score_strings = defaultdict(list) for split_metric, value in metrics_dict.items(): split, metric = split_metric.split("/", 1) if isinstance(value, float): score_strings[split].append(f"{metric}={value:0.3f}") else: score_strings[split].append(f"{metric}={value}") header = f"{self.unit_total} {self.log_unit[:3]}" if self.log_unit != "epochs": epochs = self.example_total / self.epoch_size header += f" ({epochs:0.2f} epo)" string = f"[{header}]:" if score_strings["train"]: train_scores = f"{', '.join(score_strings['train'])}" string += f" TRAIN:[{train_scores}]" if score_strings["valid"]: valid_scores = f"{', '.join(score_strings['valid'])}" string += f" VALID:[{valid_scores}]" print(string)
[ "def", "print_to_screen", "(", "self", ",", "metrics_dict", ")", ":", "score_strings", "=", "defaultdict", "(", "list", ")", "for", "split_metric", ",", "value", "in", "metrics_dict", ".", "items", "(", ")", ":", "split", ",", "metric", "=", "split_metric", ".", "split", "(", "\"/\"", ",", "1", ")", "if", "isinstance", "(", "value", ",", "float", ")", ":", "score_strings", "[", "split", "]", ".", "append", "(", "f\"{metric}={value:0.3f}\"", ")", "else", ":", "score_strings", "[", "split", "]", ".", "append", "(", "f\"{metric}={value}\"", ")", "header", "=", "f\"{self.unit_total} {self.log_unit[:3]}\"", "if", "self", ".", "log_unit", "!=", "\"epochs\"", ":", "epochs", "=", "self", ".", "example_total", "/", "self", ".", "epoch_size", "header", "+=", "f\" ({epochs:0.2f} epo)\"", "string", "=", "f\"[{header}]:\"", "if", "score_strings", "[", "\"train\"", "]", ":", "train_scores", "=", "f\"{', '.join(score_strings['train'])}\"", "string", "+=", "f\" TRAIN:[{train_scores}]\"", "if", "score_strings", "[", "\"valid\"", "]", ":", "valid_scores", "=", "f\"{', '.join(score_strings['valid'])}\"", "string", "+=", "f\" VALID:[{valid_scores}]\"", "print", "(", "string", ")" ]
Print all metrics in metrics_dict to screen
[ "Print", "all", "metrics", "in", "metrics_dict", "to", "screen" ]
python
train
persephone-tools/persephone
persephone/model.py
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/model.py#L195-L236
def transcribe(self, restore_model_path: Optional[str]=None) -> None: """ Transcribes an untranscribed dataset. Similar to eval() except no reference translation is assumed, thus no LER is calculated. """ saver = tf.train.Saver() with tf.Session(config=allow_growth_config) as sess: if restore_model_path: saver.restore(sess, restore_model_path) else: if self.saved_model_path: saver.restore(sess, self.saved_model_path) else: raise PersephoneException("No model to use for transcription.") batch_gen = self.corpus_reader.untranscribed_batch_gen() hyp_batches = [] for batch_i, batch in enumerate(batch_gen): batch_x, batch_x_lens, feat_fn_batch = batch feed_dict = {self.batch_x: batch_x, self.batch_x_lens: batch_x_lens} [dense_decoded] = sess.run([self.dense_decoded], feed_dict=feed_dict) hyps = self.corpus_reader.human_readable(dense_decoded) # Prepare dir for transcription hyps_dir = os.path.join(self.exp_dir, "transcriptions") if not os.path.isdir(hyps_dir): os.mkdir(hyps_dir) hyp_batches.append((hyps,feat_fn_batch)) with open(os.path.join(hyps_dir, "hyps.txt"), "w", encoding=ENCODING) as hyps_f: for hyp_batch, fn_batch in hyp_batches: for hyp, fn in zip(hyp_batch, fn_batch): print(fn, file=hyps_f) print(" ".join(hyp), file=hyps_f) print("", file=hyps_f)
[ "def", "transcribe", "(", "self", ",", "restore_model_path", ":", "Optional", "[", "str", "]", "=", "None", ")", "->", "None", ":", "saver", "=", "tf", ".", "train", ".", "Saver", "(", ")", "with", "tf", ".", "Session", "(", "config", "=", "allow_growth_config", ")", "as", "sess", ":", "if", "restore_model_path", ":", "saver", ".", "restore", "(", "sess", ",", "restore_model_path", ")", "else", ":", "if", "self", ".", "saved_model_path", ":", "saver", ".", "restore", "(", "sess", ",", "self", ".", "saved_model_path", ")", "else", ":", "raise", "PersephoneException", "(", "\"No model to use for transcription.\"", ")", "batch_gen", "=", "self", ".", "corpus_reader", ".", "untranscribed_batch_gen", "(", ")", "hyp_batches", "=", "[", "]", "for", "batch_i", ",", "batch", "in", "enumerate", "(", "batch_gen", ")", ":", "batch_x", ",", "batch_x_lens", ",", "feat_fn_batch", "=", "batch", "feed_dict", "=", "{", "self", ".", "batch_x", ":", "batch_x", ",", "self", ".", "batch_x_lens", ":", "batch_x_lens", "}", "[", "dense_decoded", "]", "=", "sess", ".", "run", "(", "[", "self", ".", "dense_decoded", "]", ",", "feed_dict", "=", "feed_dict", ")", "hyps", "=", "self", ".", "corpus_reader", ".", "human_readable", "(", "dense_decoded", ")", "# Prepare dir for transcription", "hyps_dir", "=", "os", ".", "path", ".", "join", "(", "self", ".", "exp_dir", ",", "\"transcriptions\"", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "hyps_dir", ")", ":", "os", ".", "mkdir", "(", "hyps_dir", ")", "hyp_batches", ".", "append", "(", "(", "hyps", ",", "feat_fn_batch", ")", ")", "with", "open", "(", "os", ".", "path", ".", "join", "(", "hyps_dir", ",", "\"hyps.txt\"", ")", ",", "\"w\"", ",", "encoding", "=", "ENCODING", ")", "as", "hyps_f", ":", "for", "hyp_batch", ",", "fn_batch", "in", "hyp_batches", ":", "for", "hyp", ",", "fn", "in", "zip", "(", "hyp_batch", ",", "fn_batch", ")", ":", "print", "(", "fn", ",", "file", "=", "hyps_f", ")", "print", "(", "\" \"", ".", "join", "(", "hyp", ")", ",", "file", "=", "hyps_f", ")", "print", "(", "\"\"", ",", "file", "=", "hyps_f", ")" ]
Transcribes an untranscribed dataset. Similar to eval() except no reference translation is assumed, thus no LER is calculated.
[ "Transcribes", "an", "untranscribed", "dataset", ".", "Similar", "to", "eval", "()", "except", "no", "reference", "translation", "is", "assumed", "thus", "no", "LER", "is", "calculated", "." ]
python
train
tanghaibao/goatools
goatools/godag_plot.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/godag_plot.py#L226-L242
def _get_node_text(self, goid, goobj): """Return a string to be printed in a GO term box.""" txt = [] # Header line: "GO:0036464 L04 D06" txt.append(self.pltvars.fmthdr.format( GO=goobj.id.replace("GO:", "GO"), level=goobj.level, depth=goobj.depth)) # GO name line: "cytoplamic ribonucleoprotein" name = goobj.name.replace(",", "\n") txt.append(name) # study info line: "24 genes" study_txt = self._get_study_txt(goid) if study_txt is not None: txt.append(study_txt) # return text string return "\n".join(txt)
[ "def", "_get_node_text", "(", "self", ",", "goid", ",", "goobj", ")", ":", "txt", "=", "[", "]", "# Header line: \"GO:0036464 L04 D06\"", "txt", ".", "append", "(", "self", ".", "pltvars", ".", "fmthdr", ".", "format", "(", "GO", "=", "goobj", ".", "id", ".", "replace", "(", "\"GO:\"", ",", "\"GO\"", ")", ",", "level", "=", "goobj", ".", "level", ",", "depth", "=", "goobj", ".", "depth", ")", ")", "# GO name line: \"cytoplamic ribonucleoprotein\"", "name", "=", "goobj", ".", "name", ".", "replace", "(", "\",\"", ",", "\"\\n\"", ")", "txt", ".", "append", "(", "name", ")", "# study info line: \"24 genes\"", "study_txt", "=", "self", ".", "_get_study_txt", "(", "goid", ")", "if", "study_txt", "is", "not", "None", ":", "txt", ".", "append", "(", "study_txt", ")", "# return text string", "return", "\"\\n\"", ".", "join", "(", "txt", ")" ]
Return a string to be printed in a GO term box.
[ "Return", "a", "string", "to", "be", "printed", "in", "a", "GO", "term", "box", "." ]
python
train
hydroshare/hs_restclient
hs_restclient/__init__.py
https://github.com/hydroshare/hs_restclient/blob/9cd106238b512e01ecd3e33425fe48c13b7f63d5/hs_restclient/__init__.py#L1135-L1148
def updateReferenceURL(self, pid, name, ref_url, path=""): """Update a Referenced Content File (.url) :param pid: The HydroShare ID of the resource for which the file should be updated :param name: Filename for the referenced file :param ref_url: url to be updated in the referenced file :param path: Optional, defaults to contents directory if not provided. Folder path for the file to be updated in :return: JsonResponse on success or HttpResponse with error status code on error :raises: HydroShareNotAuthorized if user is not authorized to perform action. :raises: HydroShareNotFound if the resource or resource file was not found. :raises: HydroShareHTTPException if an unexpected HTTP response code is encountered. """ return self.updateReferencedFile(pid, path, name, ref_url)
[ "def", "updateReferenceURL", "(", "self", ",", "pid", ",", "name", ",", "ref_url", ",", "path", "=", "\"\"", ")", ":", "return", "self", ".", "updateReferencedFile", "(", "pid", ",", "path", ",", "name", ",", "ref_url", ")" ]
Update a Referenced Content File (.url) :param pid: The HydroShare ID of the resource for which the file should be updated :param name: Filename for the referenced file :param ref_url: url to be updated in the referenced file :param path: Optional, defaults to contents directory if not provided. Folder path for the file to be updated in :return: JsonResponse on success or HttpResponse with error status code on error :raises: HydroShareNotAuthorized if user is not authorized to perform action. :raises: HydroShareNotFound if the resource or resource file was not found. :raises: HydroShareHTTPException if an unexpected HTTP response code is encountered.
[ "Update", "a", "Referenced", "Content", "File", "(", ".", "url", ")" ]
python
train
loli/medpy
medpy/metric/binary.py
https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/metric/binary.py#L118-L163
def precision(result, reference): """ Precison. Parameters ---------- result : array_like Input data containing objects. Can be any type but will be converted into binary: background where 0, object everywhere else. reference : array_like Input data containing objects. Can be any type but will be converted into binary: background where 0, object everywhere else. Returns ------- precision : float The precision between two binary datasets, here mostly binary objects in images, which is defined as the fraction of retrieved instances that are relevant. The precision is not symmetric. See also -------- :func:`recall` Notes ----- Not symmetric. The inverse of the precision is :func:`recall`. High precision means that an algorithm returned substantially more relevant results than irrelevant. References ---------- .. [1] http://en.wikipedia.org/wiki/Precision_and_recall .. [2] http://en.wikipedia.org/wiki/Confusion_matrix#Table_of_confusion """ result = numpy.atleast_1d(result.astype(numpy.bool)) reference = numpy.atleast_1d(reference.astype(numpy.bool)) tp = numpy.count_nonzero(result & reference) fp = numpy.count_nonzero(result & ~reference) try: precision = tp / float(tp + fp) except ZeroDivisionError: precision = 0.0 return precision
[ "def", "precision", "(", "result", ",", "reference", ")", ":", "result", "=", "numpy", ".", "atleast_1d", "(", "result", ".", "astype", "(", "numpy", ".", "bool", ")", ")", "reference", "=", "numpy", ".", "atleast_1d", "(", "reference", ".", "astype", "(", "numpy", ".", "bool", ")", ")", "tp", "=", "numpy", ".", "count_nonzero", "(", "result", "&", "reference", ")", "fp", "=", "numpy", ".", "count_nonzero", "(", "result", "&", "~", "reference", ")", "try", ":", "precision", "=", "tp", "/", "float", "(", "tp", "+", "fp", ")", "except", "ZeroDivisionError", ":", "precision", "=", "0.0", "return", "precision" ]
Precison. Parameters ---------- result : array_like Input data containing objects. Can be any type but will be converted into binary: background where 0, object everywhere else. reference : array_like Input data containing objects. Can be any type but will be converted into binary: background where 0, object everywhere else. Returns ------- precision : float The precision between two binary datasets, here mostly binary objects in images, which is defined as the fraction of retrieved instances that are relevant. The precision is not symmetric. See also -------- :func:`recall` Notes ----- Not symmetric. The inverse of the precision is :func:`recall`. High precision means that an algorithm returned substantially more relevant results than irrelevant. References ---------- .. [1] http://en.wikipedia.org/wiki/Precision_and_recall .. [2] http://en.wikipedia.org/wiki/Confusion_matrix#Table_of_confusion
[ "Precison", ".", "Parameters", "----------", "result", ":", "array_like", "Input", "data", "containing", "objects", ".", "Can", "be", "any", "type", "but", "will", "be", "converted", "into", "binary", ":", "background", "where", "0", "object", "everywhere", "else", ".", "reference", ":", "array_like", "Input", "data", "containing", "objects", ".", "Can", "be", "any", "type", "but", "will", "be", "converted", "into", "binary", ":", "background", "where", "0", "object", "everywhere", "else", ".", "Returns", "-------", "precision", ":", "float", "The", "precision", "between", "two", "binary", "datasets", "here", "mostly", "binary", "objects", "in", "images", "which", "is", "defined", "as", "the", "fraction", "of", "retrieved", "instances", "that", "are", "relevant", ".", "The", "precision", "is", "not", "symmetric", ".", "See", "also", "--------", ":", "func", ":", "recall", "Notes", "-----", "Not", "symmetric", ".", "The", "inverse", "of", "the", "precision", "is", ":", "func", ":", "recall", ".", "High", "precision", "means", "that", "an", "algorithm", "returned", "substantially", "more", "relevant", "results", "than", "irrelevant", ".", "References", "----------", "..", "[", "1", "]", "http", ":", "//", "en", ".", "wikipedia", ".", "org", "/", "wiki", "/", "Precision_and_recall", "..", "[", "2", "]", "http", ":", "//", "en", ".", "wikipedia", ".", "org", "/", "wiki", "/", "Confusion_matrix#Table_of_confusion" ]
python
train
hadrianl/huobi
huobitrade/utils.py
https://github.com/hadrianl/huobi/blob/bbfa2036703ee84a76d5d8e9f89c25fc8a55f2c7/huobitrade/utils.py#L272-L297
def api_key_post(params, request_path, _async=False): """ from 火币demo, 构造post请求并调用post方法 :param params: :param request_path: :return: """ method = 'POST' timestamp = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S') params_to_sign = { 'AccessKeyId': ACCESS_KEY, 'SignatureMethod': 'HmacSHA256', 'SignatureVersion': '2', 'Timestamp': timestamp } host_url = TRADE_URL host_name = urllib.parse.urlparse(host_url).hostname host_name = host_name.lower() secret_sign = createSign(params_to_sign, method, host_name, request_path, SECRET_KEY) params_to_sign['Signature'] = secret_sign if PRIVATE_KEY: params_to_sign['PrivateSignature'] = createPrivateSign(secret_sign, PRIVATE_KEY) url = host_url + request_path + '?' + urllib.parse.urlencode(params_to_sign) return http_post_request(url, params, _async=_async)
[ "def", "api_key_post", "(", "params", ",", "request_path", ",", "_async", "=", "False", ")", ":", "method", "=", "'POST'", "timestamp", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", ".", "strftime", "(", "'%Y-%m-%dT%H:%M:%S'", ")", "params_to_sign", "=", "{", "'AccessKeyId'", ":", "ACCESS_KEY", ",", "'SignatureMethod'", ":", "'HmacSHA256'", ",", "'SignatureVersion'", ":", "'2'", ",", "'Timestamp'", ":", "timestamp", "}", "host_url", "=", "TRADE_URL", "host_name", "=", "urllib", ".", "parse", ".", "urlparse", "(", "host_url", ")", ".", "hostname", "host_name", "=", "host_name", ".", "lower", "(", ")", "secret_sign", "=", "createSign", "(", "params_to_sign", ",", "method", ",", "host_name", ",", "request_path", ",", "SECRET_KEY", ")", "params_to_sign", "[", "'Signature'", "]", "=", "secret_sign", "if", "PRIVATE_KEY", ":", "params_to_sign", "[", "'PrivateSignature'", "]", "=", "createPrivateSign", "(", "secret_sign", ",", "PRIVATE_KEY", ")", "url", "=", "host_url", "+", "request_path", "+", "'?'", "+", "urllib", ".", "parse", ".", "urlencode", "(", "params_to_sign", ")", "return", "http_post_request", "(", "url", ",", "params", ",", "_async", "=", "_async", ")" ]
from 火币demo, 构造post请求并调用post方法 :param params: :param request_path: :return:
[ "from", "火币demo", "构造post请求并调用post方法", ":", "param", "params", ":", ":", "param", "request_path", ":", ":", "return", ":" ]
python
train
ev3dev/ev3dev-lang-python
ev3dev2/motor.py
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/motor.py#L797-L804
def stop(self, **kwargs): """ Stop any of the run commands before they are complete using the action specified by `stop_action`. """ for key in kwargs: setattr(self, key, kwargs[key]) self.command = self.COMMAND_STOP
[ "def", "stop", "(", "self", ",", "*", "*", "kwargs", ")", ":", "for", "key", "in", "kwargs", ":", "setattr", "(", "self", ",", "key", ",", "kwargs", "[", "key", "]", ")", "self", ".", "command", "=", "self", ".", "COMMAND_STOP" ]
Stop any of the run commands before they are complete using the action specified by `stop_action`.
[ "Stop", "any", "of", "the", "run", "commands", "before", "they", "are", "complete", "using", "the", "action", "specified", "by", "stop_action", "." ]
python
train
raphaelvallat/pingouin
pingouin/pairwise.py
https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/pairwise.py#L411-L562
def pairwise_tukey(dv=None, between=None, data=None, alpha=.05, tail='two-sided', effsize='hedges'): '''Pairwise Tukey-HSD post-hoc test. Parameters ---------- dv : string Name of column containing the dependant variable. between: string Name of column containing the between factor. data : pandas DataFrame DataFrame alpha : float Significance level tail : string Indicates whether to return the 'two-sided' or 'one-sided' p-values effsize : string or None Effect size type. Available methods are :: 'none' : no effect size 'cohen' : Unbiased Cohen d 'hedges' : Hedges g 'glass': Glass delta 'eta-square' : Eta-square 'odds-ratio' : Odds ratio 'AUC' : Area Under the Curve Returns ------- stats : DataFrame Stats summary :: 'A' : Name of first measurement 'B' : Name of second measurement 'mean(A)' : Mean of first measurement 'mean(B)' : Mean of second measurement 'diff' : Mean difference 'SE' : Standard error 'tail' : indicate whether the p-values are one-sided or two-sided 'T' : T-values 'p-tukey' : Tukey-HSD corrected p-values 'efsize' : effect sizes 'eftype' : type of effect size Notes ----- Tukey HSD post-hoc is best for balanced one-way ANOVA. It has been proven to be conservative for one-way ANOVA with unequal sample sizes. However, it is not robust if the groups have unequal variances, in which case the Games-Howell test is more adequate. Tukey HSD is not valid for repeated measures ANOVA. Note that when the sample sizes are unequal, this function actually performs the Tukey-Kramer test (which allows for unequal sample sizes). The T-values are defined as: .. math:: t = \\frac{\\overline{x}_i - \\overline{x}_j} {\\sqrt{2 \\cdot MS_w / n}} where :math:`\\overline{x}_i` and :math:`\\overline{x}_j` are the means of the first and second group, respectively, :math:`MS_w` the mean squares of the error (computed using ANOVA) and :math:`n` the sample size. If the sample sizes are unequal, the Tukey-Kramer procedure is automatically used: .. math:: t = \\frac{\\overline{x}_i - \\overline{x}_j}{\\sqrt{\\frac{MS_w}{n_i} + \\frac{MS_w}{n_j}}} where :math:`n_i` and :math:`n_j` are the sample sizes of the first and second group, respectively. The p-values are then approximated using the Studentized range distribution :math:`Q(\\sqrt2*|t_i|, r, N - r)` where :math:`r` is the total number of groups and :math:`N` is the total sample size. Note that the p-values might be slightly different than those obtained using R or Matlab since the studentized range approximation is done using the Gleason (1999) algorithm, which is more efficient and accurate than the algorithms used in Matlab or R. References ---------- .. [1] Tukey, John W. "Comparing individual means in the analysis of variance." Biometrics (1949): 99-114. .. [2] Gleason, John R. "An accurate, non-iterative approximation for studentized range quantiles." Computational statistics & data analysis 31.2 (1999): 147-158. Examples -------- Pairwise Tukey post-hocs on the pain threshold dataset. >>> from pingouin import pairwise_tukey, read_dataset >>> df = read_dataset('anova') >>> pt = pairwise_tukey(dv='Pain threshold', between='Hair color', data=df) ''' from pingouin.external.qsturng import psturng # First compute the ANOVA aov = anova(dv=dv, data=data, between=between, detailed=True) df = aov.loc[1, 'DF'] ng = aov.loc[0, 'DF'] + 1 grp = data.groupby(between)[dv] n = grp.count().values gmeans = grp.mean().values gvar = aov.loc[1, 'MS'] / n # Pairwise combinations g1, g2 = np.array(list(combinations(np.arange(ng), 2))).T mn = gmeans[g1] - gmeans[g2] se = np.sqrt(gvar[g1] + gvar[g2]) tval = mn / se # Critical values and p-values # from pingouin.external.qsturng import qsturng # crit = qsturng(1 - alpha, ng, df) / np.sqrt(2) pval = psturng(np.sqrt(2) * np.abs(tval), ng, df) pval *= 0.5 if tail == 'one-sided' else 1 # Uncorrected p-values # from scipy.stats import t # punc = t.sf(np.abs(tval), n[g1].size + n[g2].size - 2) * 2 # Effect size d = tval * np.sqrt(1 / n[g1] + 1 / n[g2]) ef = convert_effsize(d, 'cohen', effsize, n[g1], n[g2]) # Create dataframe # Careful: pd.unique does NOT sort whereas numpy does stats = pd.DataFrame({ 'A': np.unique(data[between])[g1], 'B': np.unique(data[between])[g2], 'mean(A)': gmeans[g1], 'mean(B)': gmeans[g2], 'diff': mn, 'SE': np.round(se, 3), 'tail': tail, 'T': np.round(tval, 3), # 'alpha': alpha, # 'crit': np.round(crit, 3), 'p-tukey': pval, 'efsize': np.round(ef, 3), 'eftype': effsize, }) return stats
[ "def", "pairwise_tukey", "(", "dv", "=", "None", ",", "between", "=", "None", ",", "data", "=", "None", ",", "alpha", "=", ".05", ",", "tail", "=", "'two-sided'", ",", "effsize", "=", "'hedges'", ")", ":", "from", "pingouin", ".", "external", ".", "qsturng", "import", "psturng", "# First compute the ANOVA", "aov", "=", "anova", "(", "dv", "=", "dv", ",", "data", "=", "data", ",", "between", "=", "between", ",", "detailed", "=", "True", ")", "df", "=", "aov", ".", "loc", "[", "1", ",", "'DF'", "]", "ng", "=", "aov", ".", "loc", "[", "0", ",", "'DF'", "]", "+", "1", "grp", "=", "data", ".", "groupby", "(", "between", ")", "[", "dv", "]", "n", "=", "grp", ".", "count", "(", ")", ".", "values", "gmeans", "=", "grp", ".", "mean", "(", ")", ".", "values", "gvar", "=", "aov", ".", "loc", "[", "1", ",", "'MS'", "]", "/", "n", "# Pairwise combinations", "g1", ",", "g2", "=", "np", ".", "array", "(", "list", "(", "combinations", "(", "np", ".", "arange", "(", "ng", ")", ",", "2", ")", ")", ")", ".", "T", "mn", "=", "gmeans", "[", "g1", "]", "-", "gmeans", "[", "g2", "]", "se", "=", "np", ".", "sqrt", "(", "gvar", "[", "g1", "]", "+", "gvar", "[", "g2", "]", ")", "tval", "=", "mn", "/", "se", "# Critical values and p-values", "# from pingouin.external.qsturng import qsturng", "# crit = qsturng(1 - alpha, ng, df) / np.sqrt(2)", "pval", "=", "psturng", "(", "np", ".", "sqrt", "(", "2", ")", "*", "np", ".", "abs", "(", "tval", ")", ",", "ng", ",", "df", ")", "pval", "*=", "0.5", "if", "tail", "==", "'one-sided'", "else", "1", "# Uncorrected p-values", "# from scipy.stats import t", "# punc = t.sf(np.abs(tval), n[g1].size + n[g2].size - 2) * 2", "# Effect size", "d", "=", "tval", "*", "np", ".", "sqrt", "(", "1", "/", "n", "[", "g1", "]", "+", "1", "/", "n", "[", "g2", "]", ")", "ef", "=", "convert_effsize", "(", "d", ",", "'cohen'", ",", "effsize", ",", "n", "[", "g1", "]", ",", "n", "[", "g2", "]", ")", "# Create dataframe", "# Careful: pd.unique does NOT sort whereas numpy does", "stats", "=", "pd", ".", "DataFrame", "(", "{", "'A'", ":", "np", ".", "unique", "(", "data", "[", "between", "]", ")", "[", "g1", "]", ",", "'B'", ":", "np", ".", "unique", "(", "data", "[", "between", "]", ")", "[", "g2", "]", ",", "'mean(A)'", ":", "gmeans", "[", "g1", "]", ",", "'mean(B)'", ":", "gmeans", "[", "g2", "]", ",", "'diff'", ":", "mn", ",", "'SE'", ":", "np", ".", "round", "(", "se", ",", "3", ")", ",", "'tail'", ":", "tail", ",", "'T'", ":", "np", ".", "round", "(", "tval", ",", "3", ")", ",", "# 'alpha': alpha,", "# 'crit': np.round(crit, 3),", "'p-tukey'", ":", "pval", ",", "'efsize'", ":", "np", ".", "round", "(", "ef", ",", "3", ")", ",", "'eftype'", ":", "effsize", ",", "}", ")", "return", "stats" ]
Pairwise Tukey-HSD post-hoc test. Parameters ---------- dv : string Name of column containing the dependant variable. between: string Name of column containing the between factor. data : pandas DataFrame DataFrame alpha : float Significance level tail : string Indicates whether to return the 'two-sided' or 'one-sided' p-values effsize : string or None Effect size type. Available methods are :: 'none' : no effect size 'cohen' : Unbiased Cohen d 'hedges' : Hedges g 'glass': Glass delta 'eta-square' : Eta-square 'odds-ratio' : Odds ratio 'AUC' : Area Under the Curve Returns ------- stats : DataFrame Stats summary :: 'A' : Name of first measurement 'B' : Name of second measurement 'mean(A)' : Mean of first measurement 'mean(B)' : Mean of second measurement 'diff' : Mean difference 'SE' : Standard error 'tail' : indicate whether the p-values are one-sided or two-sided 'T' : T-values 'p-tukey' : Tukey-HSD corrected p-values 'efsize' : effect sizes 'eftype' : type of effect size Notes ----- Tukey HSD post-hoc is best for balanced one-way ANOVA. It has been proven to be conservative for one-way ANOVA with unequal sample sizes. However, it is not robust if the groups have unequal variances, in which case the Games-Howell test is more adequate. Tukey HSD is not valid for repeated measures ANOVA. Note that when the sample sizes are unequal, this function actually performs the Tukey-Kramer test (which allows for unequal sample sizes). The T-values are defined as: .. math:: t = \\frac{\\overline{x}_i - \\overline{x}_j} {\\sqrt{2 \\cdot MS_w / n}} where :math:`\\overline{x}_i` and :math:`\\overline{x}_j` are the means of the first and second group, respectively, :math:`MS_w` the mean squares of the error (computed using ANOVA) and :math:`n` the sample size. If the sample sizes are unequal, the Tukey-Kramer procedure is automatically used: .. math:: t = \\frac{\\overline{x}_i - \\overline{x}_j}{\\sqrt{\\frac{MS_w}{n_i} + \\frac{MS_w}{n_j}}} where :math:`n_i` and :math:`n_j` are the sample sizes of the first and second group, respectively. The p-values are then approximated using the Studentized range distribution :math:`Q(\\sqrt2*|t_i|, r, N - r)` where :math:`r` is the total number of groups and :math:`N` is the total sample size. Note that the p-values might be slightly different than those obtained using R or Matlab since the studentized range approximation is done using the Gleason (1999) algorithm, which is more efficient and accurate than the algorithms used in Matlab or R. References ---------- .. [1] Tukey, John W. "Comparing individual means in the analysis of variance." Biometrics (1949): 99-114. .. [2] Gleason, John R. "An accurate, non-iterative approximation for studentized range quantiles." Computational statistics & data analysis 31.2 (1999): 147-158. Examples -------- Pairwise Tukey post-hocs on the pain threshold dataset. >>> from pingouin import pairwise_tukey, read_dataset >>> df = read_dataset('anova') >>> pt = pairwise_tukey(dv='Pain threshold', between='Hair color', data=df)
[ "Pairwise", "Tukey", "-", "HSD", "post", "-", "hoc", "test", "." ]
python
train
spyder-ide/spyder
spyder/plugins/ipythonconsole/plugin.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/ipythonconsole/plugin.py#L1096-L1104
def get_client_for_file(self, filename): """Get client associated with a given file.""" client = None for idx, cl in enumerate(self.get_clients()): if self.filenames[idx] == filename: self.tabwidget.setCurrentIndex(idx) client = cl break return client
[ "def", "get_client_for_file", "(", "self", ",", "filename", ")", ":", "client", "=", "None", "for", "idx", ",", "cl", "in", "enumerate", "(", "self", ".", "get_clients", "(", ")", ")", ":", "if", "self", ".", "filenames", "[", "idx", "]", "==", "filename", ":", "self", ".", "tabwidget", ".", "setCurrentIndex", "(", "idx", ")", "client", "=", "cl", "break", "return", "client" ]
Get client associated with a given file.
[ "Get", "client", "associated", "with", "a", "given", "file", "." ]
python
train
gitpython-developers/GitPython
git/objects/util.py
https://github.com/gitpython-developers/GitPython/blob/1f66e25c25cde2423917ee18c4704fff83b837d1/git/objects/util.py#L44-L65
def get_object_type_by_name(object_type_name): """ :return: type suitable to handle the given object type name. Use the type to create new instances. :param object_type_name: Member of TYPES :raise ValueError: In case object_type_name is unknown""" if object_type_name == b"commit": from . import commit return commit.Commit elif object_type_name == b"tag": from . import tag return tag.TagObject elif object_type_name == b"blob": from . import blob return blob.Blob elif object_type_name == b"tree": from . import tree return tree.Tree else: raise ValueError("Cannot handle unknown object type: %s" % object_type_name)
[ "def", "get_object_type_by_name", "(", "object_type_name", ")", ":", "if", "object_type_name", "==", "b\"commit\"", ":", "from", ".", "import", "commit", "return", "commit", ".", "Commit", "elif", "object_type_name", "==", "b\"tag\"", ":", "from", ".", "import", "tag", "return", "tag", ".", "TagObject", "elif", "object_type_name", "==", "b\"blob\"", ":", "from", ".", "import", "blob", "return", "blob", ".", "Blob", "elif", "object_type_name", "==", "b\"tree\"", ":", "from", ".", "import", "tree", "return", "tree", ".", "Tree", "else", ":", "raise", "ValueError", "(", "\"Cannot handle unknown object type: %s\"", "%", "object_type_name", ")" ]
:return: type suitable to handle the given object type name. Use the type to create new instances. :param object_type_name: Member of TYPES :raise ValueError: In case object_type_name is unknown
[ ":", "return", ":", "type", "suitable", "to", "handle", "the", "given", "object", "type", "name", ".", "Use", "the", "type", "to", "create", "new", "instances", "." ]
python
train
epfl-lts2/pygsp
pygsp/optimization.py
https://github.com/epfl-lts2/pygsp/blob/8ce5bde39206129287375af24fdbcd7edddca8c5/pygsp/optimization.py#L25-L96
def prox_tv(x, gamma, G, A=None, At=None, nu=1, tol=10e-4, maxit=200, use_matrix=True): r""" Total Variation proximal operator for graphs. This function computes the TV proximal operator for graphs. The TV norm is the one norm of the gradient. The gradient is defined in the function :meth:`pygsp.graphs.Graph.grad`. This function requires the PyUNLocBoX to be executed. This function solves: :math:`sol = \min_{z} \frac{1}{2} \|x - z\|_2^2 + \gamma \|x\|_{TV}` Parameters ---------- x: int Input signal gamma: ndarray Regularization parameter G: graph object Graphs structure A: lambda function Forward operator, this parameter allows to solve the following problem: :math:`sol = \min_{z} \frac{1}{2} \|x - z\|_2^2 + \gamma \| A x\|_{TV}` (default = Id) At: lambda function Adjoint operator. (default = Id) nu: float Bound on the norm of the operator (default = 1) tol: float Stops criterion for the loop. The algorithm will stop if : :math:`\frac{n(t) - n(t - 1)} {n(t)} < tol` where :math:`n(t) = f(x) + 0.5 \|x-y\|_2^2` is the objective function at iteration :math:`t` (default = :math:`10e-4`) maxit: int Maximum iteration. (default = 200) use_matrix: bool If a matrix should be used. (default = True) Returns ------- sol: solution Examples -------- """ if A is None: def A(x): return x if At is None: def At(x): return x tight = 0 l1_nu = 2 * G.lmax * nu if use_matrix: def l1_a(x): return G.Diff * A(x) def l1_at(x): return G.Diff * At(D.T * x) else: def l1_a(x): return G.grad(A(x)) def l1_at(x): return G.div(x) functions, _ = _import_pyunlocbox() functions.norm_l1(x, gamma, A=l1_a, At=l1_at, tight=tight, maxit=maxit, verbose=verbose, tol=tol)
[ "def", "prox_tv", "(", "x", ",", "gamma", ",", "G", ",", "A", "=", "None", ",", "At", "=", "None", ",", "nu", "=", "1", ",", "tol", "=", "10e-4", ",", "maxit", "=", "200", ",", "use_matrix", "=", "True", ")", ":", "if", "A", "is", "None", ":", "def", "A", "(", "x", ")", ":", "return", "x", "if", "At", "is", "None", ":", "def", "At", "(", "x", ")", ":", "return", "x", "tight", "=", "0", "l1_nu", "=", "2", "*", "G", ".", "lmax", "*", "nu", "if", "use_matrix", ":", "def", "l1_a", "(", "x", ")", ":", "return", "G", ".", "Diff", "*", "A", "(", "x", ")", "def", "l1_at", "(", "x", ")", ":", "return", "G", ".", "Diff", "*", "At", "(", "D", ".", "T", "*", "x", ")", "else", ":", "def", "l1_a", "(", "x", ")", ":", "return", "G", ".", "grad", "(", "A", "(", "x", ")", ")", "def", "l1_at", "(", "x", ")", ":", "return", "G", ".", "div", "(", "x", ")", "functions", ",", "_", "=", "_import_pyunlocbox", "(", ")", "functions", ".", "norm_l1", "(", "x", ",", "gamma", ",", "A", "=", "l1_a", ",", "At", "=", "l1_at", ",", "tight", "=", "tight", ",", "maxit", "=", "maxit", ",", "verbose", "=", "verbose", ",", "tol", "=", "tol", ")" ]
r""" Total Variation proximal operator for graphs. This function computes the TV proximal operator for graphs. The TV norm is the one norm of the gradient. The gradient is defined in the function :meth:`pygsp.graphs.Graph.grad`. This function requires the PyUNLocBoX to be executed. This function solves: :math:`sol = \min_{z} \frac{1}{2} \|x - z\|_2^2 + \gamma \|x\|_{TV}` Parameters ---------- x: int Input signal gamma: ndarray Regularization parameter G: graph object Graphs structure A: lambda function Forward operator, this parameter allows to solve the following problem: :math:`sol = \min_{z} \frac{1}{2} \|x - z\|_2^2 + \gamma \| A x\|_{TV}` (default = Id) At: lambda function Adjoint operator. (default = Id) nu: float Bound on the norm of the operator (default = 1) tol: float Stops criterion for the loop. The algorithm will stop if : :math:`\frac{n(t) - n(t - 1)} {n(t)} < tol` where :math:`n(t) = f(x) + 0.5 \|x-y\|_2^2` is the objective function at iteration :math:`t` (default = :math:`10e-4`) maxit: int Maximum iteration. (default = 200) use_matrix: bool If a matrix should be used. (default = True) Returns ------- sol: solution Examples --------
[ "r", "Total", "Variation", "proximal", "operator", "for", "graphs", "." ]
python
train
jupyter-widgets/jupyterlab-sidecar
setupbase.py
https://github.com/jupyter-widgets/jupyterlab-sidecar/blob/8889d09f1a0933e2cbee06d4874f720b075b29e8/setupbase.py#L616-L630
def _iexplode_path(path): """Iterate over all the parts of a path. Splits path recursively with os.path.split(). """ (head, tail) = os.path.split(path) if not head or (not tail and head == path): if head: yield head if tail or not head: yield tail return for p in _iexplode_path(head): yield p yield tail
[ "def", "_iexplode_path", "(", "path", ")", ":", "(", "head", ",", "tail", ")", "=", "os", ".", "path", ".", "split", "(", "path", ")", "if", "not", "head", "or", "(", "not", "tail", "and", "head", "==", "path", ")", ":", "if", "head", ":", "yield", "head", "if", "tail", "or", "not", "head", ":", "yield", "tail", "return", "for", "p", "in", "_iexplode_path", "(", "head", ")", ":", "yield", "p", "yield", "tail" ]
Iterate over all the parts of a path. Splits path recursively with os.path.split().
[ "Iterate", "over", "all", "the", "parts", "of", "a", "path", "." ]
python
test
marshmallow-code/apispec
src/apispec/yaml_utils.py
https://github.com/marshmallow-code/apispec/blob/e92ceffd12b2e392b8d199ed314bd2a7e6512dff/src/apispec/yaml_utils.py#L32-L47
def load_yaml_from_docstring(docstring): """Loads YAML from docstring.""" split_lines = trim_docstring(docstring).split("\n") # Cut YAML from rest of docstring for index, line in enumerate(split_lines): line = line.strip() if line.startswith("---"): cut_from = index break else: return {} yaml_string = "\n".join(split_lines[cut_from:]) yaml_string = dedent(yaml_string) return yaml.safe_load(yaml_string) or {}
[ "def", "load_yaml_from_docstring", "(", "docstring", ")", ":", "split_lines", "=", "trim_docstring", "(", "docstring", ")", ".", "split", "(", "\"\\n\"", ")", "# Cut YAML from rest of docstring", "for", "index", ",", "line", "in", "enumerate", "(", "split_lines", ")", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "line", ".", "startswith", "(", "\"---\"", ")", ":", "cut_from", "=", "index", "break", "else", ":", "return", "{", "}", "yaml_string", "=", "\"\\n\"", ".", "join", "(", "split_lines", "[", "cut_from", ":", "]", ")", "yaml_string", "=", "dedent", "(", "yaml_string", ")", "return", "yaml", ".", "safe_load", "(", "yaml_string", ")", "or", "{", "}" ]
Loads YAML from docstring.
[ "Loads", "YAML", "from", "docstring", "." ]
python
train
Microsoft/nni
examples/trials/weight_sharing/ga_squad/attention.py
https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/examples/trials/weight_sharing/ga_squad/attention.py#L106-L160
def get_prob(self, src, tgt, mask, pre_compute, return_logits=False): ''' :param s: [src_sequence_length, batch_size, src_dim] :param h: [batch_size, tgt_dim] or [tgt_sequence_length, batch_size, tgt_dim] :param mask: [src_sequence_length, batch_size]\ or [tgt_sequence_length, src_sequence_length, batch_sizse] :param pre_compute: [src_sequence_length, batch_size, hidden_dim] :return: [src_sequence_length, batch_size]\ or [tgt_sequence_length, src_sequence_length, batch_size] ''' s_shape = src.get_shape().as_list() h_shape = tgt.get_shape().as_list() src_dim = s_shape[-1] tgt_dim = h_shape[-1] assert src_dim is not None, 'src dimension must be defined' assert tgt_dim is not None, 'tgt dimension must be defined' self._define_params(src_dim, tgt_dim) if len(h_shape) == 2: tgt = tf.expand_dims(tgt, 0) if pre_compute is None: pre_compute = self.get_pre_compute(src) buf0 = pre_compute buf1 = tf.tensordot(tgt, self.var['U'], axes=[[2], [0]]) buf2 = tf.tanh(tf.expand_dims(buf0, 0) + tf.expand_dims(buf1, 1)) if not self.is_vanilla: xh1 = tgt xh2 = tgt s1 = src if self.need_padding: xh1 = tf.tensordot(xh1, self.var['V_t'], 1) xh2 = tf.tensordot(xh2, self.var['S_t'], 1) s1 = tf.tensordot(s1, self.var['V_s'], 1) if not self.is_identity_transform: xh1 = tf.tensordot(xh1, self.var['T'], 1) xh2 = tf.tensordot(xh2, self.var['T'], 1) buf3 = tf.expand_dims(s1, 0) * tf.expand_dims(xh1, 1) buf3 = tf.tanh(tf.tensordot(buf3, self.var['V'], axes=[[3], [0]])) buf = tf.reshape(tf.tanh(buf2 + buf3), shape=tf.shape(buf3)) else: buf = buf2 v = self.var['v'] e = tf.tensordot(buf, v, [[3], [0]]) e = tf.squeeze(e, axis=[3]) tmp = tf.reshape(e + (mask - 1) * 10000.0, shape=tf.shape(e)) prob = tf.nn.softmax(tmp, 1) if len(h_shape) == 2: prob = tf.squeeze(prob, axis=[0]) tmp = tf.squeeze(tmp, axis=[0]) if return_logits: return prob, tmp return prob
[ "def", "get_prob", "(", "self", ",", "src", ",", "tgt", ",", "mask", ",", "pre_compute", ",", "return_logits", "=", "False", ")", ":", "s_shape", "=", "src", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", "h_shape", "=", "tgt", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", "src_dim", "=", "s_shape", "[", "-", "1", "]", "tgt_dim", "=", "h_shape", "[", "-", "1", "]", "assert", "src_dim", "is", "not", "None", ",", "'src dimension must be defined'", "assert", "tgt_dim", "is", "not", "None", ",", "'tgt dimension must be defined'", "self", ".", "_define_params", "(", "src_dim", ",", "tgt_dim", ")", "if", "len", "(", "h_shape", ")", "==", "2", ":", "tgt", "=", "tf", ".", "expand_dims", "(", "tgt", ",", "0", ")", "if", "pre_compute", "is", "None", ":", "pre_compute", "=", "self", ".", "get_pre_compute", "(", "src", ")", "buf0", "=", "pre_compute", "buf1", "=", "tf", ".", "tensordot", "(", "tgt", ",", "self", ".", "var", "[", "'U'", "]", ",", "axes", "=", "[", "[", "2", "]", ",", "[", "0", "]", "]", ")", "buf2", "=", "tf", ".", "tanh", "(", "tf", ".", "expand_dims", "(", "buf0", ",", "0", ")", "+", "tf", ".", "expand_dims", "(", "buf1", ",", "1", ")", ")", "if", "not", "self", ".", "is_vanilla", ":", "xh1", "=", "tgt", "xh2", "=", "tgt", "s1", "=", "src", "if", "self", ".", "need_padding", ":", "xh1", "=", "tf", ".", "tensordot", "(", "xh1", ",", "self", ".", "var", "[", "'V_t'", "]", ",", "1", ")", "xh2", "=", "tf", ".", "tensordot", "(", "xh2", ",", "self", ".", "var", "[", "'S_t'", "]", ",", "1", ")", "s1", "=", "tf", ".", "tensordot", "(", "s1", ",", "self", ".", "var", "[", "'V_s'", "]", ",", "1", ")", "if", "not", "self", ".", "is_identity_transform", ":", "xh1", "=", "tf", ".", "tensordot", "(", "xh1", ",", "self", ".", "var", "[", "'T'", "]", ",", "1", ")", "xh2", "=", "tf", ".", "tensordot", "(", "xh2", ",", "self", ".", "var", "[", "'T'", "]", ",", "1", ")", "buf3", "=", "tf", ".", "expand_dims", "(", "s1", ",", "0", ")", "*", "tf", ".", "expand_dims", "(", "xh1", ",", "1", ")", "buf3", "=", "tf", ".", "tanh", "(", "tf", ".", "tensordot", "(", "buf3", ",", "self", ".", "var", "[", "'V'", "]", ",", "axes", "=", "[", "[", "3", "]", ",", "[", "0", "]", "]", ")", ")", "buf", "=", "tf", ".", "reshape", "(", "tf", ".", "tanh", "(", "buf2", "+", "buf3", ")", ",", "shape", "=", "tf", ".", "shape", "(", "buf3", ")", ")", "else", ":", "buf", "=", "buf2", "v", "=", "self", ".", "var", "[", "'v'", "]", "e", "=", "tf", ".", "tensordot", "(", "buf", ",", "v", ",", "[", "[", "3", "]", ",", "[", "0", "]", "]", ")", "e", "=", "tf", ".", "squeeze", "(", "e", ",", "axis", "=", "[", "3", "]", ")", "tmp", "=", "tf", ".", "reshape", "(", "e", "+", "(", "mask", "-", "1", ")", "*", "10000.0", ",", "shape", "=", "tf", ".", "shape", "(", "e", ")", ")", "prob", "=", "tf", ".", "nn", ".", "softmax", "(", "tmp", ",", "1", ")", "if", "len", "(", "h_shape", ")", "==", "2", ":", "prob", "=", "tf", ".", "squeeze", "(", "prob", ",", "axis", "=", "[", "0", "]", ")", "tmp", "=", "tf", ".", "squeeze", "(", "tmp", ",", "axis", "=", "[", "0", "]", ")", "if", "return_logits", ":", "return", "prob", ",", "tmp", "return", "prob" ]
:param s: [src_sequence_length, batch_size, src_dim] :param h: [batch_size, tgt_dim] or [tgt_sequence_length, batch_size, tgt_dim] :param mask: [src_sequence_length, batch_size]\ or [tgt_sequence_length, src_sequence_length, batch_sizse] :param pre_compute: [src_sequence_length, batch_size, hidden_dim] :return: [src_sequence_length, batch_size]\ or [tgt_sequence_length, src_sequence_length, batch_size]
[ ":", "param", "s", ":", "[", "src_sequence_length", "batch_size", "src_dim", "]", ":", "param", "h", ":", "[", "batch_size", "tgt_dim", "]", "or", "[", "tgt_sequence_length", "batch_size", "tgt_dim", "]", ":", "param", "mask", ":", "[", "src_sequence_length", "batch_size", "]", "\\", "or", "[", "tgt_sequence_length", "src_sequence_length", "batch_sizse", "]", ":", "param", "pre_compute", ":", "[", "src_sequence_length", "batch_size", "hidden_dim", "]", ":", "return", ":", "[", "src_sequence_length", "batch_size", "]", "\\", "or", "[", "tgt_sequence_length", "src_sequence_length", "batch_size", "]" ]
python
train
theolind/pymysensors
mqtt.py
https://github.com/theolind/pymysensors/blob/a139ab6e2f6b71ebaf37282f69bfd0f7fe6193b6/mqtt.py#L18-L20
def publish(self, topic, payload, qos, retain): """Publish an MQTT message.""" self._mqttc.publish(topic, payload, qos, retain)
[ "def", "publish", "(", "self", ",", "topic", ",", "payload", ",", "qos", ",", "retain", ")", ":", "self", ".", "_mqttc", ".", "publish", "(", "topic", ",", "payload", ",", "qos", ",", "retain", ")" ]
Publish an MQTT message.
[ "Publish", "an", "MQTT", "message", "." ]
python
train
HazyResearch/metal
metal/contrib/backends/wrapper.py
https://github.com/HazyResearch/metal/blob/c24e3772e25ac6d0917b8b7af4c1bcb92928f84a/metal/contrib/backends/wrapper.py#L190-L208
def _build_vocab(self, sentences, markers=[]): """ Initalize symbol table dictionary :param sentences: :param markers: :return: """ from snorkel.learning.pytorch.rnn.utils import SymbolTable vocab = Counter() for sent in sentences: for w in sent: vocab[w] += 1 word_dict = SymbolTable() list(map(word_dict.get, vocab)) list(map(word_dict.get, markers)) return word_dict
[ "def", "_build_vocab", "(", "self", ",", "sentences", ",", "markers", "=", "[", "]", ")", ":", "from", "snorkel", ".", "learning", ".", "pytorch", ".", "rnn", ".", "utils", "import", "SymbolTable", "vocab", "=", "Counter", "(", ")", "for", "sent", "in", "sentences", ":", "for", "w", "in", "sent", ":", "vocab", "[", "w", "]", "+=", "1", "word_dict", "=", "SymbolTable", "(", ")", "list", "(", "map", "(", "word_dict", ".", "get", ",", "vocab", ")", ")", "list", "(", "map", "(", "word_dict", ".", "get", ",", "markers", ")", ")", "return", "word_dict" ]
Initalize symbol table dictionary :param sentences: :param markers: :return:
[ "Initalize", "symbol", "table", "dictionary" ]
python
train
BernardFW/bernard
src/bernard/layers/definitions.py
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/layers/definitions.py#L75-L80
async def become(self, layer_type: Type[L], request: 'Request') -> L: """ Transform this layer into another layer type """ raise ValueError('Cannot become "{}"'.format(layer_type.__name__))
[ "async", "def", "become", "(", "self", ",", "layer_type", ":", "Type", "[", "L", "]", ",", "request", ":", "'Request'", ")", "->", "L", ":", "raise", "ValueError", "(", "'Cannot become \"{}\"'", ".", "format", "(", "layer_type", ".", "__name__", ")", ")" ]
Transform this layer into another layer type
[ "Transform", "this", "layer", "into", "another", "layer", "type" ]
python
train
pandas-dev/pandas
pandas/io/excel/_util.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/excel/_util.py#L176-L204
def _fill_mi_header(row, control_row): """Forward fill blank entries in row but only inside the same parent index. Used for creating headers in Multiindex. Parameters ---------- row : list List of items in a single row. control_row : list of bool Helps to determine if particular column is in same parent index as the previous value. Used to stop propagation of empty cells between different indexes. Returns ---------- Returns changed row and control_row """ last = row[0] for i in range(1, len(row)): if not control_row[i]: last = row[i] if row[i] == '' or row[i] is None: row[i] = last else: control_row[i] = False last = row[i] return row, control_row
[ "def", "_fill_mi_header", "(", "row", ",", "control_row", ")", ":", "last", "=", "row", "[", "0", "]", "for", "i", "in", "range", "(", "1", ",", "len", "(", "row", ")", ")", ":", "if", "not", "control_row", "[", "i", "]", ":", "last", "=", "row", "[", "i", "]", "if", "row", "[", "i", "]", "==", "''", "or", "row", "[", "i", "]", "is", "None", ":", "row", "[", "i", "]", "=", "last", "else", ":", "control_row", "[", "i", "]", "=", "False", "last", "=", "row", "[", "i", "]", "return", "row", ",", "control_row" ]
Forward fill blank entries in row but only inside the same parent index. Used for creating headers in Multiindex. Parameters ---------- row : list List of items in a single row. control_row : list of bool Helps to determine if particular column is in same parent index as the previous value. Used to stop propagation of empty cells between different indexes. Returns ---------- Returns changed row and control_row
[ "Forward", "fill", "blank", "entries", "in", "row", "but", "only", "inside", "the", "same", "parent", "index", "." ]
python
train
UDST/urbansim
urbansim/models/regression.py
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/regression.py#L178-L204
def _model_fit_to_table(fit): """ Produce a pandas DataFrame of model fit results from a statsmodels fit result object. Parameters ---------- fit : statsmodels.regression.linear_model.RegressionResults Returns ------- fit_parameters : pandas.DataFrame Will have columns 'Coefficient', 'Std. Error', and 'T-Score'. Index will be model terms. This frame will also have non-standard attributes .rsquared and .rsquared_adj with the same meaning and value as on `fit`. """ fit_parameters = pd.DataFrame( {'Coefficient': fit.params, 'Std. Error': fit.bse, 'T-Score': fit.tvalues}) fit_parameters.rsquared = fit.rsquared fit_parameters.rsquared_adj = fit.rsquared_adj return fit_parameters
[ "def", "_model_fit_to_table", "(", "fit", ")", ":", "fit_parameters", "=", "pd", ".", "DataFrame", "(", "{", "'Coefficient'", ":", "fit", ".", "params", ",", "'Std. Error'", ":", "fit", ".", "bse", ",", "'T-Score'", ":", "fit", ".", "tvalues", "}", ")", "fit_parameters", ".", "rsquared", "=", "fit", ".", "rsquared", "fit_parameters", ".", "rsquared_adj", "=", "fit", ".", "rsquared_adj", "return", "fit_parameters" ]
Produce a pandas DataFrame of model fit results from a statsmodels fit result object. Parameters ---------- fit : statsmodels.regression.linear_model.RegressionResults Returns ------- fit_parameters : pandas.DataFrame Will have columns 'Coefficient', 'Std. Error', and 'T-Score'. Index will be model terms. This frame will also have non-standard attributes .rsquared and .rsquared_adj with the same meaning and value as on `fit`.
[ "Produce", "a", "pandas", "DataFrame", "of", "model", "fit", "results", "from", "a", "statsmodels", "fit", "result", "object", "." ]
python
train
robinandeer/puzzle
puzzle/utils/get_info.py
https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/utils/get_info.py#L121-L139
def get_cytoband_coord(chrom, pos): """Get the cytoband coordinate for a position Args: chrom(str): A chromosome pos(int): The position Returns: cytoband """ chrom = chrom.strip('chr') pos = int(pos) result = None logger.debug("Finding Cytoband for chrom:{0} pos:{1}".format(chrom, pos)) if chrom in CYTOBANDS: for interval in CYTOBANDS[chrom][pos]: result = "{0}{1}".format(chrom, interval.data) return result
[ "def", "get_cytoband_coord", "(", "chrom", ",", "pos", ")", ":", "chrom", "=", "chrom", ".", "strip", "(", "'chr'", ")", "pos", "=", "int", "(", "pos", ")", "result", "=", "None", "logger", ".", "debug", "(", "\"Finding Cytoband for chrom:{0} pos:{1}\"", ".", "format", "(", "chrom", ",", "pos", ")", ")", "if", "chrom", "in", "CYTOBANDS", ":", "for", "interval", "in", "CYTOBANDS", "[", "chrom", "]", "[", "pos", "]", ":", "result", "=", "\"{0}{1}\"", ".", "format", "(", "chrom", ",", "interval", ".", "data", ")", "return", "result" ]
Get the cytoband coordinate for a position Args: chrom(str): A chromosome pos(int): The position Returns: cytoband
[ "Get", "the", "cytoband", "coordinate", "for", "a", "position" ]
python
train
okfn/ofs
ofs/command.py
https://github.com/okfn/ofs/blob/c110cbecd7d0ae7e877963914a1a5af030cd6d45/ofs/command.py#L87-L187
def proxy_upload(self, path, filename, content_type=None, content_encoding=None, cb=None, num_cb=None): """ This is the main function that uploads. We assume the bucket and key (== path) exists. What we do here is simple. Calculate the headers we will need, (e.g. md5, content-type, etc). Then we ask the self.get_proxy_config method to fill in the authentication information and tell us which remote host we should talk to for the upload. From there, the rest is ripped from boto.key.Key.send_file """ from boto.connection import AWSAuthConnection import mimetypes from hashlib import md5 import base64 BufferSize = 65536 ## set to something very small to make sure ## chunking is working properly fp = open(filename) headers = { 'Content-Type': content_type } if content_type is None: content_type = mimetypes.guess_type(filename)[0] or "text/plain" headers['Content-Type'] = content_type if content_encoding is not None: headers['Content-Encoding'] = content_encoding m = md5() fp.seek(0) s = fp.read(BufferSize) while s: m.update(s) s = fp.read(BufferSize) self.size = fp.tell() fp.seek(0) self.md5 = m.hexdigest() headers['Content-MD5'] = base64.encodestring(m.digest()).rstrip('\n') headers['Content-Length'] = str(self.size) headers['Expect'] = '100-Continue' host, headers = self.get_proxy_config(headers, path) ### how to do this same thing with curl instead... print("curl -i --trace-ascii foo.log -T %s -H %s https://%s%s" % ( filename, " -H ".join("'%s: %s'" % (k,v) for k,v in headers.items()), host, path )) def sender(http_conn, method, path, data, headers): http_conn.putrequest(method, path) for key in headers: http_conn.putheader(key, headers[key]) http_conn.endheaders() fp.seek(0) http_conn.set_debuglevel(0) ### XXX set to e.g. 4 to see what going on if cb: if num_cb > 2: cb_count = self.size / BufferSize / (num_cb-2) elif num_cb < 0: cb_count = -1 else: cb_count = 0 i = total_bytes = 0 cb(total_bytes, self.size) l = fp.read(BufferSize) while len(l) > 0: http_conn.send(l) if cb: total_bytes += len(l) i += 1 if i == cb_count or cb_count == -1: cb(total_bytes, self.size) i = 0 l = fp.read(BufferSize) if cb: cb(total_bytes, self.size) response = http_conn.getresponse() body = response.read() fp.seek(0) if response.status == 500 or response.status == 503 or \ response.getheader('location'): # we'll try again return response elif response.status >= 200 and response.status <= 299: self.etag = response.getheader('etag') if self.etag != '"%s"' % self.md5: raise Exception('ETag from S3 did not match computed MD5') return response else: #raise provider.storage_response_error( # response.status, response.reason, body) raise Exception(response.status, response.reason, body) awsc = AWSAuthConnection(host, aws_access_key_id="key_id", aws_secret_access_key="secret") awsc._mexe('PUT', path, None, headers, sender=sender)
[ "def", "proxy_upload", "(", "self", ",", "path", ",", "filename", ",", "content_type", "=", "None", ",", "content_encoding", "=", "None", ",", "cb", "=", "None", ",", "num_cb", "=", "None", ")", ":", "from", "boto", ".", "connection", "import", "AWSAuthConnection", "import", "mimetypes", "from", "hashlib", "import", "md5", "import", "base64", "BufferSize", "=", "65536", "## set to something very small to make sure", "## chunking is working properly", "fp", "=", "open", "(", "filename", ")", "headers", "=", "{", "'Content-Type'", ":", "content_type", "}", "if", "content_type", "is", "None", ":", "content_type", "=", "mimetypes", ".", "guess_type", "(", "filename", ")", "[", "0", "]", "or", "\"text/plain\"", "headers", "[", "'Content-Type'", "]", "=", "content_type", "if", "content_encoding", "is", "not", "None", ":", "headers", "[", "'Content-Encoding'", "]", "=", "content_encoding", "m", "=", "md5", "(", ")", "fp", ".", "seek", "(", "0", ")", "s", "=", "fp", ".", "read", "(", "BufferSize", ")", "while", "s", ":", "m", ".", "update", "(", "s", ")", "s", "=", "fp", ".", "read", "(", "BufferSize", ")", "self", ".", "size", "=", "fp", ".", "tell", "(", ")", "fp", ".", "seek", "(", "0", ")", "self", ".", "md5", "=", "m", ".", "hexdigest", "(", ")", "headers", "[", "'Content-MD5'", "]", "=", "base64", ".", "encodestring", "(", "m", ".", "digest", "(", ")", ")", ".", "rstrip", "(", "'\\n'", ")", "headers", "[", "'Content-Length'", "]", "=", "str", "(", "self", ".", "size", ")", "headers", "[", "'Expect'", "]", "=", "'100-Continue'", "host", ",", "headers", "=", "self", ".", "get_proxy_config", "(", "headers", ",", "path", ")", "### how to do this same thing with curl instead...", "print", "(", "\"curl -i --trace-ascii foo.log -T %s -H %s https://%s%s\"", "%", "(", "filename", ",", "\" -H \"", ".", "join", "(", "\"'%s: %s'\"", "%", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "headers", ".", "items", "(", ")", ")", ",", "host", ",", "path", ")", ")", "def", "sender", "(", "http_conn", ",", "method", ",", "path", ",", "data", ",", "headers", ")", ":", "http_conn", ".", "putrequest", "(", "method", ",", "path", ")", "for", "key", "in", "headers", ":", "http_conn", ".", "putheader", "(", "key", ",", "headers", "[", "key", "]", ")", "http_conn", ".", "endheaders", "(", ")", "fp", ".", "seek", "(", "0", ")", "http_conn", ".", "set_debuglevel", "(", "0", ")", "### XXX set to e.g. 4 to see what going on", "if", "cb", ":", "if", "num_cb", ">", "2", ":", "cb_count", "=", "self", ".", "size", "/", "BufferSize", "/", "(", "num_cb", "-", "2", ")", "elif", "num_cb", "<", "0", ":", "cb_count", "=", "-", "1", "else", ":", "cb_count", "=", "0", "i", "=", "total_bytes", "=", "0", "cb", "(", "total_bytes", ",", "self", ".", "size", ")", "l", "=", "fp", ".", "read", "(", "BufferSize", ")", "while", "len", "(", "l", ")", ">", "0", ":", "http_conn", ".", "send", "(", "l", ")", "if", "cb", ":", "total_bytes", "+=", "len", "(", "l", ")", "i", "+=", "1", "if", "i", "==", "cb_count", "or", "cb_count", "==", "-", "1", ":", "cb", "(", "total_bytes", ",", "self", ".", "size", ")", "i", "=", "0", "l", "=", "fp", ".", "read", "(", "BufferSize", ")", "if", "cb", ":", "cb", "(", "total_bytes", ",", "self", ".", "size", ")", "response", "=", "http_conn", ".", "getresponse", "(", ")", "body", "=", "response", ".", "read", "(", ")", "fp", ".", "seek", "(", "0", ")", "if", "response", ".", "status", "==", "500", "or", "response", ".", "status", "==", "503", "or", "response", ".", "getheader", "(", "'location'", ")", ":", "# we'll try again", "return", "response", "elif", "response", ".", "status", ">=", "200", "and", "response", ".", "status", "<=", "299", ":", "self", ".", "etag", "=", "response", ".", "getheader", "(", "'etag'", ")", "if", "self", ".", "etag", "!=", "'\"%s\"'", "%", "self", ".", "md5", ":", "raise", "Exception", "(", "'ETag from S3 did not match computed MD5'", ")", "return", "response", "else", ":", "#raise provider.storage_response_error(", "# response.status, response.reason, body)", "raise", "Exception", "(", "response", ".", "status", ",", "response", ".", "reason", ",", "body", ")", "awsc", "=", "AWSAuthConnection", "(", "host", ",", "aws_access_key_id", "=", "\"key_id\"", ",", "aws_secret_access_key", "=", "\"secret\"", ")", "awsc", ".", "_mexe", "(", "'PUT'", ",", "path", ",", "None", ",", "headers", ",", "sender", "=", "sender", ")" ]
This is the main function that uploads. We assume the bucket and key (== path) exists. What we do here is simple. Calculate the headers we will need, (e.g. md5, content-type, etc). Then we ask the self.get_proxy_config method to fill in the authentication information and tell us which remote host we should talk to for the upload. From there, the rest is ripped from boto.key.Key.send_file
[ "This", "is", "the", "main", "function", "that", "uploads", ".", "We", "assume", "the", "bucket", "and", "key", "(", "==", "path", ")", "exists", ".", "What", "we", "do", "here", "is", "simple", ".", "Calculate", "the", "headers", "we", "will", "need", "(", "e", ".", "g", ".", "md5", "content", "-", "type", "etc", ")", ".", "Then", "we", "ask", "the", "self", ".", "get_proxy_config", "method", "to", "fill", "in", "the", "authentication", "information", "and", "tell", "us", "which", "remote", "host", "we", "should", "talk", "to", "for", "the", "upload", ".", "From", "there", "the", "rest", "is", "ripped", "from", "boto", ".", "key", ".", "Key", ".", "send_file" ]
python
train
warner/magic-wormhole
src/wormhole/cli/cli.py
https://github.com/warner/magic-wormhole/blob/995d3f546a33eec4f64df929848d86937d2003a7/src/wormhole/cli/cli.py#L332-L341
def ssh_invite(ctx, code_length, user, **kwargs): """ Add a public-key to a ~/.ssh/authorized_keys file """ for name, value in kwargs.items(): setattr(ctx.obj, name, value) from . import cmd_ssh ctx.obj.code_length = code_length ctx.obj.ssh_user = user return go(cmd_ssh.invite, ctx.obj)
[ "def", "ssh_invite", "(", "ctx", ",", "code_length", ",", "user", ",", "*", "*", "kwargs", ")", ":", "for", "name", ",", "value", "in", "kwargs", ".", "items", "(", ")", ":", "setattr", "(", "ctx", ".", "obj", ",", "name", ",", "value", ")", "from", ".", "import", "cmd_ssh", "ctx", ".", "obj", ".", "code_length", "=", "code_length", "ctx", ".", "obj", ".", "ssh_user", "=", "user", "return", "go", "(", "cmd_ssh", ".", "invite", ",", "ctx", ".", "obj", ")" ]
Add a public-key to a ~/.ssh/authorized_keys file
[ "Add", "a", "public", "-", "key", "to", "a", "~", "/", ".", "ssh", "/", "authorized_keys", "file" ]
python
train
fprimex/zdesk
zdesk/zdesk_api.py
https://github.com/fprimex/zdesk/blob/851611c13b4d530e9df31390b3ec709baf0a0188/zdesk/zdesk_api.py#L2919-L2922
def resource_collection_create(self, data, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/resource_collections#create-a-resource-collection" api_path = "/api/v2/resource_collections.json" return self.call(api_path, method="POST", data=data, **kwargs)
[ "def", "resource_collection_create", "(", "self", ",", "data", ",", "*", "*", "kwargs", ")", ":", "api_path", "=", "\"/api/v2/resource_collections.json\"", "return", "self", ".", "call", "(", "api_path", ",", "method", "=", "\"POST\"", ",", "data", "=", "data", ",", "*", "*", "kwargs", ")" ]
https://developer.zendesk.com/rest_api/docs/core/resource_collections#create-a-resource-collection
[ "https", ":", "//", "developer", ".", "zendesk", ".", "com", "/", "rest_api", "/", "docs", "/", "core", "/", "resource_collections#create", "-", "a", "-", "resource", "-", "collection" ]
python
train
django-fluent/django-fluent-contents
fluent_contents/extensions/pluginpool.py
https://github.com/django-fluent/django-fluent-contents/blob/896f14add58471b98d7aa295b2c9e6abedec9003/fluent_contents/extensions/pluginpool.py#L113-L126
def get_allowed_plugins(self, placeholder_slot): """ Return the plugins which are supported in the given placeholder name. """ # See if there is a limit imposed. slot_config = appsettings.FLUENT_CONTENTS_PLACEHOLDER_CONFIG.get(placeholder_slot) or {} plugins = slot_config.get('plugins') if not plugins: return self.get_plugins() else: try: return self.get_plugins_by_name(*plugins) except PluginNotFound as e: raise PluginNotFound(str(e) + " Update the plugin list of the FLUENT_CONTENTS_PLACEHOLDER_CONFIG['{0}'] setting.".format(placeholder_slot))
[ "def", "get_allowed_plugins", "(", "self", ",", "placeholder_slot", ")", ":", "# See if there is a limit imposed.", "slot_config", "=", "appsettings", ".", "FLUENT_CONTENTS_PLACEHOLDER_CONFIG", ".", "get", "(", "placeholder_slot", ")", "or", "{", "}", "plugins", "=", "slot_config", ".", "get", "(", "'plugins'", ")", "if", "not", "plugins", ":", "return", "self", ".", "get_plugins", "(", ")", "else", ":", "try", ":", "return", "self", ".", "get_plugins_by_name", "(", "*", "plugins", ")", "except", "PluginNotFound", "as", "e", ":", "raise", "PluginNotFound", "(", "str", "(", "e", ")", "+", "\" Update the plugin list of the FLUENT_CONTENTS_PLACEHOLDER_CONFIG['{0}'] setting.\"", ".", "format", "(", "placeholder_slot", ")", ")" ]
Return the plugins which are supported in the given placeholder name.
[ "Return", "the", "plugins", "which", "are", "supported", "in", "the", "given", "placeholder", "name", "." ]
python
train
mattimck/python-exist
exist/auth.py
https://github.com/mattimck/python-exist/blob/2c4be9d176d8e8007c4e020ee7cd6263a2096abb/exist/auth.py#L138-L157
def index(self, state, code=None, error=None): """ Receive a Exist response containing a verification code. Use the code to fetch the access_token. """ error = None if code: try: auth_token = self.fetch_token(code, state) except MissingTokenError: error = self._fmt_failure( 'Missing access token parameter.</br>Please check that ' 'you are using the correct client_secret') except MismatchingStateError: error = self._fmt_failure('CSRF Warning! Mismatching state') else: error = self._fmt_failure('Unknown error while authenticating') # Use a thread to shutdown cherrypy so we can return HTML first self._shutdown_cherrypy() return error if error else self.success_html % (auth_token)
[ "def", "index", "(", "self", ",", "state", ",", "code", "=", "None", ",", "error", "=", "None", ")", ":", "error", "=", "None", "if", "code", ":", "try", ":", "auth_token", "=", "self", ".", "fetch_token", "(", "code", ",", "state", ")", "except", "MissingTokenError", ":", "error", "=", "self", ".", "_fmt_failure", "(", "'Missing access token parameter.</br>Please check that '", "'you are using the correct client_secret'", ")", "except", "MismatchingStateError", ":", "error", "=", "self", ".", "_fmt_failure", "(", "'CSRF Warning! Mismatching state'", ")", "else", ":", "error", "=", "self", ".", "_fmt_failure", "(", "'Unknown error while authenticating'", ")", "# Use a thread to shutdown cherrypy so we can return HTML first", "self", ".", "_shutdown_cherrypy", "(", ")", "return", "error", "if", "error", "else", "self", ".", "success_html", "%", "(", "auth_token", ")" ]
Receive a Exist response containing a verification code. Use the code to fetch the access_token.
[ "Receive", "a", "Exist", "response", "containing", "a", "verification", "code", ".", "Use", "the", "code", "to", "fetch", "the", "access_token", "." ]
python
train
cmheisel/basecampreporting
src/basecampreporting/basecamp.py
https://github.com/cmheisel/basecampreporting/blob/88ecfc6e835608650ff6be23cbf2421d224c122b/src/basecampreporting/basecamp.py#L289-L298
def create_comment(self, post_id, body): """ Create a new comment, associating it with a specific message. """ path = '/msg/create_comment' req = ET.Element('request') comment = ET.SubElement(req, 'comment') ET.SubElement(comment, 'post-id').text = str(int(post_id)) ET.SubElement(comment, 'body').text = str(body) return self._request(path, req)
[ "def", "create_comment", "(", "self", ",", "post_id", ",", "body", ")", ":", "path", "=", "'/msg/create_comment'", "req", "=", "ET", ".", "Element", "(", "'request'", ")", "comment", "=", "ET", ".", "SubElement", "(", "req", ",", "'comment'", ")", "ET", ".", "SubElement", "(", "comment", ",", "'post-id'", ")", ".", "text", "=", "str", "(", "int", "(", "post_id", ")", ")", "ET", ".", "SubElement", "(", "comment", ",", "'body'", ")", ".", "text", "=", "str", "(", "body", ")", "return", "self", ".", "_request", "(", "path", ",", "req", ")" ]
Create a new comment, associating it with a specific message.
[ "Create", "a", "new", "comment", "associating", "it", "with", "a", "specific", "message", "." ]
python
train
gwastro/pycbc
pycbc/workflow/core.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/workflow/core.py#L281-L302
def add_ini_profile(self, cp, sec): """Add profile from configuration file. Parameters ----------- cp : ConfigParser object The ConfigParser object holding the workflow configuration settings sec : string The section containing options for this job. """ for opt in cp.options(sec): namespace = opt.split('|')[0] if namespace == 'pycbc' or namespace == 'container': continue value = string.strip(cp.get(sec, opt)) key = opt.split('|')[1] self.add_profile(namespace, key, value, force=True) # Remove if Pegasus can apply this hint in the TC if namespace == 'hints' and key == 'execution.site': self.execution_site = value
[ "def", "add_ini_profile", "(", "self", ",", "cp", ",", "sec", ")", ":", "for", "opt", "in", "cp", ".", "options", "(", "sec", ")", ":", "namespace", "=", "opt", ".", "split", "(", "'|'", ")", "[", "0", "]", "if", "namespace", "==", "'pycbc'", "or", "namespace", "==", "'container'", ":", "continue", "value", "=", "string", ".", "strip", "(", "cp", ".", "get", "(", "sec", ",", "opt", ")", ")", "key", "=", "opt", ".", "split", "(", "'|'", ")", "[", "1", "]", "self", ".", "add_profile", "(", "namespace", ",", "key", ",", "value", ",", "force", "=", "True", ")", "# Remove if Pegasus can apply this hint in the TC", "if", "namespace", "==", "'hints'", "and", "key", "==", "'execution.site'", ":", "self", ".", "execution_site", "=", "value" ]
Add profile from configuration file. Parameters ----------- cp : ConfigParser object The ConfigParser object holding the workflow configuration settings sec : string The section containing options for this job.
[ "Add", "profile", "from", "configuration", "file", "." ]
python
train
JukeboxPipeline/jukebox-core
src/jukeboxcore/addons/guerilla/guerillamgmt.py
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/addons/guerilla/guerillamgmt.py#L966-L980
def setup_task_signals(self, ): """Setup the signals for the task page :returns: None :rtype: None :raises: None """ log.debug("Setting up task page signals.") self.task_user_view_pb.clicked.connect(self.task_view_user) self.task_user_add_pb.clicked.connect(self.task_add_user) self.task_user_remove_pb.clicked.connect(self.task_remove_user) self.task_dep_view_pb.clicked.connect(self.task_view_dep) self.task_link_view_pb.clicked.connect(self.task_view_link) self.task_deadline_de.dateChanged.connect(self.task_save) self.task_status_cb.currentIndexChanged.connect(self.task_save)
[ "def", "setup_task_signals", "(", "self", ",", ")", ":", "log", ".", "debug", "(", "\"Setting up task page signals.\"", ")", "self", ".", "task_user_view_pb", ".", "clicked", ".", "connect", "(", "self", ".", "task_view_user", ")", "self", ".", "task_user_add_pb", ".", "clicked", ".", "connect", "(", "self", ".", "task_add_user", ")", "self", ".", "task_user_remove_pb", ".", "clicked", ".", "connect", "(", "self", ".", "task_remove_user", ")", "self", ".", "task_dep_view_pb", ".", "clicked", ".", "connect", "(", "self", ".", "task_view_dep", ")", "self", ".", "task_link_view_pb", ".", "clicked", ".", "connect", "(", "self", ".", "task_view_link", ")", "self", ".", "task_deadline_de", ".", "dateChanged", ".", "connect", "(", "self", ".", "task_save", ")", "self", ".", "task_status_cb", ".", "currentIndexChanged", ".", "connect", "(", "self", ".", "task_save", ")" ]
Setup the signals for the task page :returns: None :rtype: None :raises: None
[ "Setup", "the", "signals", "for", "the", "task", "page" ]
python
train
jermnelson/flask-fedora-commons
flask_fedora_commons/__init__.py
https://github.com/jermnelson/flask-fedora-commons/blob/81cee0d8c9e79fa2bdd1a101facb9e8c0f307af4/flask_fedora_commons/__init__.py#L464-L503
def replace(self, entity_id, property_name, old_value, value): """Method replaces a triple for the given entity/subject. Property name is from the schema.org vocabulary. Args: entity_id(string): Unique ID of Fedora object property_name(string): Prefix and property name i.e. schema:name old_value(string): Literal or URI of old value value(string): Literal or new value """ if not entity_id.startswith("http"): entity_uri = '/'.join([self.base_url, self.transaction, entity_id]) else: entity_uri = entity_id sparql_template = Template("""$prefix DELETE { <$entity> $prop_name $old_value } INSERT { <$entity> $prop_name $new_value } WHERE { }""") sparql = sparql_template.substitute( prefix=build_prefixes(self.namespaces), entity=entity_uri, prop_name=property_name, old_value=self.__value_format__(old_value), new_value=self.__value_format__(value)) update_request = urllib.request.Request( entity_uri, data=sparql.encode(), method='PATCH', headers={'Content-Type': 'application/sparql-update'}) response = urllib.request.urlopen(update_request) if response.code < 400: return True return False
[ "def", "replace", "(", "self", ",", "entity_id", ",", "property_name", ",", "old_value", ",", "value", ")", ":", "if", "not", "entity_id", ".", "startswith", "(", "\"http\"", ")", ":", "entity_uri", "=", "'/'", ".", "join", "(", "[", "self", ".", "base_url", ",", "self", ".", "transaction", ",", "entity_id", "]", ")", "else", ":", "entity_uri", "=", "entity_id", "sparql_template", "=", "Template", "(", "\"\"\"$prefix\n DELETE {\n <$entity> $prop_name $old_value\n } INSERT {\n <$entity> $prop_name $new_value\n } WHERE {\n }\"\"\"", ")", "sparql", "=", "sparql_template", ".", "substitute", "(", "prefix", "=", "build_prefixes", "(", "self", ".", "namespaces", ")", ",", "entity", "=", "entity_uri", ",", "prop_name", "=", "property_name", ",", "old_value", "=", "self", ".", "__value_format__", "(", "old_value", ")", ",", "new_value", "=", "self", ".", "__value_format__", "(", "value", ")", ")", "update_request", "=", "urllib", ".", "request", ".", "Request", "(", "entity_uri", ",", "data", "=", "sparql", ".", "encode", "(", ")", ",", "method", "=", "'PATCH'", ",", "headers", "=", "{", "'Content-Type'", ":", "'application/sparql-update'", "}", ")", "response", "=", "urllib", ".", "request", ".", "urlopen", "(", "update_request", ")", "if", "response", ".", "code", "<", "400", ":", "return", "True", "return", "False" ]
Method replaces a triple for the given entity/subject. Property name is from the schema.org vocabulary. Args: entity_id(string): Unique ID of Fedora object property_name(string): Prefix and property name i.e. schema:name old_value(string): Literal or URI of old value value(string): Literal or new value
[ "Method", "replaces", "a", "triple", "for", "the", "given", "entity", "/", "subject", ".", "Property", "name", "is", "from", "the", "schema", ".", "org", "vocabulary", "." ]
python
train
ecederstrand/exchangelib
exchangelib/properties.py
https://github.com/ecederstrand/exchangelib/blob/736347b337c239fcd6d592db5b29e819f753c1ba/exchangelib/properties.py#L560-L603
def to_server_timezone(self, timezones, for_year): """Returns the Microsoft timezone ID corresponding to this timezone. There may not be a match at all, and there may be multiple matches. If so, we return a random timezone ID. :param timezones: A list of server timezones, as returned by list(account.protocol.get_timezones(return_full_timezone_data=True)) :param for_year: :return: A Microsoft timezone ID, as a string """ candidates = set() for tz_id, tz_name, tz_periods, tz_transitions, tz_transitions_groups in timezones: candidate = self.from_server_timezone(tz_periods, tz_transitions, tz_transitions_groups, for_year) if candidate == self: log.debug('Found exact candidate: %s (%s)', tz_id, tz_name) # We prefer this timezone over anything else. Return immediately. return tz_id # Reduce list based on base bias and standard / daylight bias values if candidate.bias != self.bias: continue if candidate.standard_time is None: if self.standard_time is not None: continue else: if self.standard_time is None: continue if candidate.standard_time.bias != self.standard_time.bias: continue if candidate.daylight_time is None: if self.daylight_time is not None: continue else: if self.daylight_time is None: continue if candidate.daylight_time.bias != self.daylight_time.bias: continue log.debug('Found candidate with matching biases: %s (%s)', tz_id, tz_name) candidates.add(tz_id) if not candidates: raise ValueError('No server timezones match this timezone definition') if len(candidates) == 1: log.info('Could not find an exact timezone match for %s. Selecting the best candidate', self) else: log.warning('Could not find an exact timezone match for %s. Selecting a random candidate', self) return candidates.pop()
[ "def", "to_server_timezone", "(", "self", ",", "timezones", ",", "for_year", ")", ":", "candidates", "=", "set", "(", ")", "for", "tz_id", ",", "tz_name", ",", "tz_periods", ",", "tz_transitions", ",", "tz_transitions_groups", "in", "timezones", ":", "candidate", "=", "self", ".", "from_server_timezone", "(", "tz_periods", ",", "tz_transitions", ",", "tz_transitions_groups", ",", "for_year", ")", "if", "candidate", "==", "self", ":", "log", ".", "debug", "(", "'Found exact candidate: %s (%s)'", ",", "tz_id", ",", "tz_name", ")", "# We prefer this timezone over anything else. Return immediately.", "return", "tz_id", "# Reduce list based on base bias and standard / daylight bias values", "if", "candidate", ".", "bias", "!=", "self", ".", "bias", ":", "continue", "if", "candidate", ".", "standard_time", "is", "None", ":", "if", "self", ".", "standard_time", "is", "not", "None", ":", "continue", "else", ":", "if", "self", ".", "standard_time", "is", "None", ":", "continue", "if", "candidate", ".", "standard_time", ".", "bias", "!=", "self", ".", "standard_time", ".", "bias", ":", "continue", "if", "candidate", ".", "daylight_time", "is", "None", ":", "if", "self", ".", "daylight_time", "is", "not", "None", ":", "continue", "else", ":", "if", "self", ".", "daylight_time", "is", "None", ":", "continue", "if", "candidate", ".", "daylight_time", ".", "bias", "!=", "self", ".", "daylight_time", ".", "bias", ":", "continue", "log", ".", "debug", "(", "'Found candidate with matching biases: %s (%s)'", ",", "tz_id", ",", "tz_name", ")", "candidates", ".", "add", "(", "tz_id", ")", "if", "not", "candidates", ":", "raise", "ValueError", "(", "'No server timezones match this timezone definition'", ")", "if", "len", "(", "candidates", ")", "==", "1", ":", "log", ".", "info", "(", "'Could not find an exact timezone match for %s. Selecting the best candidate'", ",", "self", ")", "else", ":", "log", ".", "warning", "(", "'Could not find an exact timezone match for %s. Selecting a random candidate'", ",", "self", ")", "return", "candidates", ".", "pop", "(", ")" ]
Returns the Microsoft timezone ID corresponding to this timezone. There may not be a match at all, and there may be multiple matches. If so, we return a random timezone ID. :param timezones: A list of server timezones, as returned by list(account.protocol.get_timezones(return_full_timezone_data=True)) :param for_year: :return: A Microsoft timezone ID, as a string
[ "Returns", "the", "Microsoft", "timezone", "ID", "corresponding", "to", "this", "timezone", ".", "There", "may", "not", "be", "a", "match", "at", "all", "and", "there", "may", "be", "multiple", "matches", ".", "If", "so", "we", "return", "a", "random", "timezone", "ID", "." ]
python
train
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/fda3bb39181437b6b8a0aa0185f21ae5f14385dd/autopep8.py#L2695-L2734
def _reflow_lines(parsed_tokens, indentation, max_line_length, start_on_prefix_line): """Reflow the lines so that it looks nice.""" if unicode(parsed_tokens[0]) == 'def': # A function definition gets indented a bit more. continued_indent = indentation + ' ' * 2 * DEFAULT_INDENT_SIZE else: continued_indent = indentation + ' ' * DEFAULT_INDENT_SIZE break_after_open_bracket = not start_on_prefix_line lines = ReformattedLines(max_line_length) lines.add_indent(len(indentation.lstrip('\r\n'))) if not start_on_prefix_line: # If splitting after the opening bracket will cause the first element # to be aligned weirdly, don't try it. first_token = get_item(parsed_tokens, 0) second_token = get_item(parsed_tokens, 1) if ( first_token and second_token and unicode(second_token)[0] == '(' and len(indentation) + len(first_token) + 1 == len(continued_indent) ): return None for item in parsed_tokens: lines.add_space_if_needed(unicode(item), equal=True) save_continued_indent = continued_indent if start_on_prefix_line and isinstance(item, Container): start_on_prefix_line = False continued_indent = ' ' * (lines.current_size() + 1) item.reflow(lines, continued_indent, break_after_open_bracket) continued_indent = save_continued_indent return lines.emit()
[ "def", "_reflow_lines", "(", "parsed_tokens", ",", "indentation", ",", "max_line_length", ",", "start_on_prefix_line", ")", ":", "if", "unicode", "(", "parsed_tokens", "[", "0", "]", ")", "==", "'def'", ":", "# A function definition gets indented a bit more.", "continued_indent", "=", "indentation", "+", "' '", "*", "2", "*", "DEFAULT_INDENT_SIZE", "else", ":", "continued_indent", "=", "indentation", "+", "' '", "*", "DEFAULT_INDENT_SIZE", "break_after_open_bracket", "=", "not", "start_on_prefix_line", "lines", "=", "ReformattedLines", "(", "max_line_length", ")", "lines", ".", "add_indent", "(", "len", "(", "indentation", ".", "lstrip", "(", "'\\r\\n'", ")", ")", ")", "if", "not", "start_on_prefix_line", ":", "# If splitting after the opening bracket will cause the first element", "# to be aligned weirdly, don't try it.", "first_token", "=", "get_item", "(", "parsed_tokens", ",", "0", ")", "second_token", "=", "get_item", "(", "parsed_tokens", ",", "1", ")", "if", "(", "first_token", "and", "second_token", "and", "unicode", "(", "second_token", ")", "[", "0", "]", "==", "'('", "and", "len", "(", "indentation", ")", "+", "len", "(", "first_token", ")", "+", "1", "==", "len", "(", "continued_indent", ")", ")", ":", "return", "None", "for", "item", "in", "parsed_tokens", ":", "lines", ".", "add_space_if_needed", "(", "unicode", "(", "item", ")", ",", "equal", "=", "True", ")", "save_continued_indent", "=", "continued_indent", "if", "start_on_prefix_line", "and", "isinstance", "(", "item", ",", "Container", ")", ":", "start_on_prefix_line", "=", "False", "continued_indent", "=", "' '", "*", "(", "lines", ".", "current_size", "(", ")", "+", "1", ")", "item", ".", "reflow", "(", "lines", ",", "continued_indent", ",", "break_after_open_bracket", ")", "continued_indent", "=", "save_continued_indent", "return", "lines", ".", "emit", "(", ")" ]
Reflow the lines so that it looks nice.
[ "Reflow", "the", "lines", "so", "that", "it", "looks", "nice", "." ]
python
train
rosenbrockc/fortpy
fortpy/code.py
https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/code.py#L431-L478
def tree_find(self, symbol, origin, attribute): """Finds the code element corresponding to specified symbol by searching all modules in the parser. :arg symbol: the name of the code element to find. :arg origin: an instance of a Module element who owns the text that is generate the find search. :arg attribute: one of ['dependencies', 'publics', 'members', 'types', 'executables', 'interfaces'] that specifies which collection in the module should house the symbol's element. """ #The symbol must be accessible to the origin module, otherwise #it wouldn't compile. Start there, first looking at the origin #itself and then the other modules that it depends on. #Since we will be referring to this multiple times, might as #well get a pointer to it. oattr = origin.collection(attribute) base = None lorigin = None if symbol in oattr: base = oattr[symbol] lorigin = origin else: for module in origin.dependencies: usespec = module.split(".") if len(usespec) > 1: if usespec[1] == symbol: #The dependency is to a specific element in the module, #and it matches. lorigin = self.get(usespec[0]) else: lorigin = None else: #The dependency is to the entire module! lorigin = self.get(usespec[0]) #If we have code for the origin, we can search for the #actual base object that we are interested in if lorigin is not None: lattr = lorigin.collection(attribute) if symbol in lattr: base = lattr[symbol] break #By now, we either have the item we were after or we don't have #code for the module it needs return (base, lorigin)
[ "def", "tree_find", "(", "self", ",", "symbol", ",", "origin", ",", "attribute", ")", ":", "#The symbol must be accessible to the origin module, otherwise", "#it wouldn't compile. Start there, first looking at the origin", "#itself and then the other modules that it depends on.", "#Since we will be referring to this multiple times, might as ", "#well get a pointer to it.", "oattr", "=", "origin", ".", "collection", "(", "attribute", ")", "base", "=", "None", "lorigin", "=", "None", "if", "symbol", "in", "oattr", ":", "base", "=", "oattr", "[", "symbol", "]", "lorigin", "=", "origin", "else", ":", "for", "module", "in", "origin", ".", "dependencies", ":", "usespec", "=", "module", ".", "split", "(", "\".\"", ")", "if", "len", "(", "usespec", ")", ">", "1", ":", "if", "usespec", "[", "1", "]", "==", "symbol", ":", "#The dependency is to a specific element in the module,", "#and it matches.", "lorigin", "=", "self", ".", "get", "(", "usespec", "[", "0", "]", ")", "else", ":", "lorigin", "=", "None", "else", ":", "#The dependency is to the entire module!", "lorigin", "=", "self", ".", "get", "(", "usespec", "[", "0", "]", ")", "#If we have code for the origin, we can search for the", "#actual base object that we are interested in", "if", "lorigin", "is", "not", "None", ":", "lattr", "=", "lorigin", ".", "collection", "(", "attribute", ")", "if", "symbol", "in", "lattr", ":", "base", "=", "lattr", "[", "symbol", "]", "break", "#By now, we either have the item we were after or we don't have", "#code for the module it needs", "return", "(", "base", ",", "lorigin", ")" ]
Finds the code element corresponding to specified symbol by searching all modules in the parser. :arg symbol: the name of the code element to find. :arg origin: an instance of a Module element who owns the text that is generate the find search. :arg attribute: one of ['dependencies', 'publics', 'members', 'types', 'executables', 'interfaces'] that specifies which collection in the module should house the symbol's element.
[ "Finds", "the", "code", "element", "corresponding", "to", "specified", "symbol", "by", "searching", "all", "modules", "in", "the", "parser", "." ]
python
train
sethmlarson/virtualbox-python
virtualbox/library.py
https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L16853-L16869
def create_host_only_network_interface(self): """Creates a new adapter for Host Only Networking. out host_interface of type :class:`IHostNetworkInterface` Created host interface object. return progress of type :class:`IProgress` Progress object to track the operation completion. raises :class:`OleErrorInvalidarg` Host network interface @a name already exists. """ (progress, host_interface) = self._call("createHostOnlyNetworkInterface") progress = IProgress(progress) host_interface = IHostNetworkInterface(host_interface) return (progress, host_interface)
[ "def", "create_host_only_network_interface", "(", "self", ")", ":", "(", "progress", ",", "host_interface", ")", "=", "self", ".", "_call", "(", "\"createHostOnlyNetworkInterface\"", ")", "progress", "=", "IProgress", "(", "progress", ")", "host_interface", "=", "IHostNetworkInterface", "(", "host_interface", ")", "return", "(", "progress", ",", "host_interface", ")" ]
Creates a new adapter for Host Only Networking. out host_interface of type :class:`IHostNetworkInterface` Created host interface object. return progress of type :class:`IProgress` Progress object to track the operation completion. raises :class:`OleErrorInvalidarg` Host network interface @a name already exists.
[ "Creates", "a", "new", "adapter", "for", "Host", "Only", "Networking", "." ]
python
train
apple/turicreate
deps/src/libxml2-2.9.1/python/libxml2.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L7153-L7160
def validateElement(self, doc, elem): """Try to validate the subtree under an element """ if doc is None: doc__o = None else: doc__o = doc._o if elem is None: elem__o = None else: elem__o = elem._o ret = libxml2mod.xmlValidateElement(self._o, doc__o, elem__o) return ret
[ "def", "validateElement", "(", "self", ",", "doc", ",", "elem", ")", ":", "if", "doc", "is", "None", ":", "doc__o", "=", "None", "else", ":", "doc__o", "=", "doc", ".", "_o", "if", "elem", "is", "None", ":", "elem__o", "=", "None", "else", ":", "elem__o", "=", "elem", ".", "_o", "ret", "=", "libxml2mod", ".", "xmlValidateElement", "(", "self", ".", "_o", ",", "doc__o", ",", "elem__o", ")", "return", "ret" ]
Try to validate the subtree under an element
[ "Try", "to", "validate", "the", "subtree", "under", "an", "element" ]
python
train
intuition-io/intuition
intuition/api/algorithm.py
https://github.com/intuition-io/intuition/blob/cd517e6b3b315a743eb4d0d0dc294e264ab913ce/intuition/api/algorithm.py#L114-L128
def process_orders(self, orderbook): ''' Default and costant orders processor. Overwrite it for more sophisticated strategies ''' for stock, alloc in orderbook.iteritems(): self.logger.info('{}: Ordered {} {} stocks'.format( self.datetime, stock, alloc)) if isinstance(alloc, int): self.order(stock, alloc) elif isinstance(alloc, float) and \ alloc >= -1 and alloc <= 1: self.order_percent(stock, alloc) else: self.logger.warning( '{}: invalid order for {}: {})' .format(self.datetime, stock, alloc))
[ "def", "process_orders", "(", "self", ",", "orderbook", ")", ":", "for", "stock", ",", "alloc", "in", "orderbook", ".", "iteritems", "(", ")", ":", "self", ".", "logger", ".", "info", "(", "'{}: Ordered {} {} stocks'", ".", "format", "(", "self", ".", "datetime", ",", "stock", ",", "alloc", ")", ")", "if", "isinstance", "(", "alloc", ",", "int", ")", ":", "self", ".", "order", "(", "stock", ",", "alloc", ")", "elif", "isinstance", "(", "alloc", ",", "float", ")", "and", "alloc", ">=", "-", "1", "and", "alloc", "<=", "1", ":", "self", ".", "order_percent", "(", "stock", ",", "alloc", ")", "else", ":", "self", ".", "logger", ".", "warning", "(", "'{}: invalid order for {}: {})'", ".", "format", "(", "self", ".", "datetime", ",", "stock", ",", "alloc", ")", ")" ]
Default and costant orders processor. Overwrite it for more sophisticated strategies
[ "Default", "and", "costant", "orders", "processor", ".", "Overwrite", "it", "for", "more", "sophisticated", "strategies" ]
python
train
datadotworld/data.world-py
datadotworld/client/_swagger/apis/datasets_api.py
https://github.com/datadotworld/data.world-py/blob/ffaeb115f358731ab0b805b0c43b7ff2e3cf0a77/datadotworld/client/_swagger/apis/datasets_api.py#L282-L307
def delete_dataset(self, owner, id, **kwargs): """ Delete a dataset Permanently deletes a dataset and all data associated with it. This operation cannot be undone, although a new dataset may be created with the same id. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.delete_dataset(owner, id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str owner: User name and unique identifier of the creator of a dataset or project. For example, in the URL: [https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), jonloyens is the unique identifier of the owner. (required) :param str id: Dataset unique identifier. For example, in the URL:[https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), an-intro-to-dataworld-dataset is the unique identifier of the dataset. (required) :return: SuccessMessage If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.delete_dataset_with_http_info(owner, id, **kwargs) else: (data) = self.delete_dataset_with_http_info(owner, id, **kwargs) return data
[ "def", "delete_dataset", "(", "self", ",", "owner", ",", "id", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'callback'", ")", ":", "return", "self", ".", "delete_dataset_with_http_info", "(", "owner", ",", "id", ",", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "self", ".", "delete_dataset_with_http_info", "(", "owner", ",", "id", ",", "*", "*", "kwargs", ")", "return", "data" ]
Delete a dataset Permanently deletes a dataset and all data associated with it. This operation cannot be undone, although a new dataset may be created with the same id. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.delete_dataset(owner, id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str owner: User name and unique identifier of the creator of a dataset or project. For example, in the URL: [https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), jonloyens is the unique identifier of the owner. (required) :param str id: Dataset unique identifier. For example, in the URL:[https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), an-intro-to-dataworld-dataset is the unique identifier of the dataset. (required) :return: SuccessMessage If the method is called asynchronously, returns the request thread.
[ "Delete", "a", "dataset", "Permanently", "deletes", "a", "dataset", "and", "all", "data", "associated", "with", "it", ".", "This", "operation", "cannot", "be", "undone", "although", "a", "new", "dataset", "may", "be", "created", "with", "the", "same", "id", ".", "This", "method", "makes", "a", "synchronous", "HTTP", "request", "by", "default", ".", "To", "make", "an", "asynchronous", "HTTP", "request", "please", "define", "a", "callback", "function", "to", "be", "invoked", "when", "receiving", "the", "response", ".", ">>>", "def", "callback_function", "(", "response", ")", ":", ">>>", "pprint", "(", "response", ")", ">>>", ">>>", "thread", "=", "api", ".", "delete_dataset", "(", "owner", "id", "callback", "=", "callback_function", ")" ]
python
train
brocade/pynos
pynos/versions/base/yang/ietf_netconf.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/base/yang/ietf_netconf.py#L484-L494
def commit_input_confirmed(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") commit = ET.Element("commit") config = commit input = ET.SubElement(commit, "input") confirmed = ET.SubElement(input, "confirmed") callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "commit_input_confirmed", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "commit", "=", "ET", ".", "Element", "(", "\"commit\"", ")", "config", "=", "commit", "input", "=", "ET", ".", "SubElement", "(", "commit", ",", "\"input\"", ")", "confirmed", "=", "ET", ".", "SubElement", "(", "input", ",", "\"confirmed\"", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
chaimleib/intervaltree
intervaltree/intervaltree.py
https://github.com/chaimleib/intervaltree/blob/ffb2b1667f8b832e89324a75a175be8440504c9d/intervaltree/intervaltree.py#L1084-L1091
def containsi(self, begin, end, data=None): """ Shortcut for (Interval(begin, end, data) in tree). Completes in O(1) time. :rtype: bool """ return Interval(begin, end, data) in self
[ "def", "containsi", "(", "self", ",", "begin", ",", "end", ",", "data", "=", "None", ")", ":", "return", "Interval", "(", "begin", ",", "end", ",", "data", ")", "in", "self" ]
Shortcut for (Interval(begin, end, data) in tree). Completes in O(1) time. :rtype: bool
[ "Shortcut", "for", "(", "Interval", "(", "begin", "end", "data", ")", "in", "tree", ")", "." ]
python
train
angr/angr
angr/storage/memory.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/storage/memory.py#L341-L365
def set_state(self, state): """ Call the set_state method in SimStatePlugin class, and then perform the delayed initialization. :param state: The SimState instance """ SimStatePlugin.set_state(self, state) # Delayed initialization stack_region_map, generic_region_map = self._temp_stack_region_map, self._temp_generic_region_map if stack_region_map or generic_region_map: # Inherited from its parent self._stack_region_map = stack_region_map.copy() self._generic_region_map = generic_region_map.copy() else: if not self._abstract_backer and o.REGION_MAPPING in self.state.options: # Only the top-level SimMemory instance can have region maps. self._stack_region_map = RegionMap(True) self._generic_region_map = RegionMap(False) else: self._stack_region_map = None self._generic_region_map = None
[ "def", "set_state", "(", "self", ",", "state", ")", ":", "SimStatePlugin", ".", "set_state", "(", "self", ",", "state", ")", "# Delayed initialization", "stack_region_map", ",", "generic_region_map", "=", "self", ".", "_temp_stack_region_map", ",", "self", ".", "_temp_generic_region_map", "if", "stack_region_map", "or", "generic_region_map", ":", "# Inherited from its parent", "self", ".", "_stack_region_map", "=", "stack_region_map", ".", "copy", "(", ")", "self", ".", "_generic_region_map", "=", "generic_region_map", ".", "copy", "(", ")", "else", ":", "if", "not", "self", ".", "_abstract_backer", "and", "o", ".", "REGION_MAPPING", "in", "self", ".", "state", ".", "options", ":", "# Only the top-level SimMemory instance can have region maps.", "self", ".", "_stack_region_map", "=", "RegionMap", "(", "True", ")", "self", ".", "_generic_region_map", "=", "RegionMap", "(", "False", ")", "else", ":", "self", ".", "_stack_region_map", "=", "None", "self", ".", "_generic_region_map", "=", "None" ]
Call the set_state method in SimStatePlugin class, and then perform the delayed initialization. :param state: The SimState instance
[ "Call", "the", "set_state", "method", "in", "SimStatePlugin", "class", "and", "then", "perform", "the", "delayed", "initialization", "." ]
python
train
nvdv/vprof
vprof/stats_server.py
https://github.com/nvdv/vprof/blob/4c3ff78f8920ab10cb9c00b14143452aa09ff6bb/vprof/stats_server.py#L57-L66
def do_GET(self): """Handles HTTP GET requests.""" handler = self.uri_map.get(self.path) or self._handle_other content, content_type = handler() compressed_content = gzip.compress(content) self._send_response( 200, headers=(('Content-type', '%s; charset=utf-8' % content_type), ('Content-Encoding', 'gzip'), ('Content-Length', len(compressed_content)))) self.wfile.write(compressed_content)
[ "def", "do_GET", "(", "self", ")", ":", "handler", "=", "self", ".", "uri_map", ".", "get", "(", "self", ".", "path", ")", "or", "self", ".", "_handle_other", "content", ",", "content_type", "=", "handler", "(", ")", "compressed_content", "=", "gzip", ".", "compress", "(", "content", ")", "self", ".", "_send_response", "(", "200", ",", "headers", "=", "(", "(", "'Content-type'", ",", "'%s; charset=utf-8'", "%", "content_type", ")", ",", "(", "'Content-Encoding'", ",", "'gzip'", ")", ",", "(", "'Content-Length'", ",", "len", "(", "compressed_content", ")", ")", ")", ")", "self", ".", "wfile", ".", "write", "(", "compressed_content", ")" ]
Handles HTTP GET requests.
[ "Handles", "HTTP", "GET", "requests", "." ]
python
test
jtwhite79/pyemu
pyemu/en.py
https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/en.py#L600-L611
def copy(self): """ overload of Ensemble.copy() Returns ------- ParameterEnsemble : ParameterEnsemble """ df = super(Ensemble,self).copy() pe = ParameterEnsemble.from_dataframe(df=df,pst=self.pst.get()) pe.__istransformed = self.istransformed return pe
[ "def", "copy", "(", "self", ")", ":", "df", "=", "super", "(", "Ensemble", ",", "self", ")", ".", "copy", "(", ")", "pe", "=", "ParameterEnsemble", ".", "from_dataframe", "(", "df", "=", "df", ",", "pst", "=", "self", ".", "pst", ".", "get", "(", ")", ")", "pe", ".", "__istransformed", "=", "self", ".", "istransformed", "return", "pe" ]
overload of Ensemble.copy() Returns ------- ParameterEnsemble : ParameterEnsemble
[ "overload", "of", "Ensemble", ".", "copy", "()" ]
python
train
Clinical-Genomics/scout
scout/adapter/mongo/variant_loader.py
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/adapter/mongo/variant_loader.py#L491-L609
def load_variants(self, case_obj, variant_type='clinical', category='snv', rank_threshold=None, chrom=None, start=None, end=None, gene_obj=None, build='37'): """Load variants for a case into scout. Load the variants for a specific analysis type and category into scout. If no region is specified, load all variants above rank score threshold If region or gene is specified, load all variants from that region disregarding variant rank(if not specified) Args: case_obj(dict): A case from the scout database variant_type(str): 'clinical' or 'research'. Default: 'clinical' category(str): 'snv', 'str' or 'sv'. Default: 'snv' rank_threshold(float): Only load variants above this score. Default: 0 chrom(str): Load variants from a certain chromosome start(int): Specify the start position end(int): Specify the end position gene_obj(dict): A gene object from the database Returns: nr_inserted(int) """ # We need the institute object institute_id = self.institute(institute_id=case_obj['owner'])['_id'] nr_inserted = 0 variant_file = None if variant_type == 'clinical': if category == 'snv': variant_file = case_obj['vcf_files'].get('vcf_snv') elif category == 'sv': variant_file = case_obj['vcf_files'].get('vcf_sv') elif category == 'str': LOG.debug('Attempt to load STR VCF.') variant_file = case_obj['vcf_files'].get('vcf_str') elif category == 'cancer': # Currently this implies a paired tumor normal variant_file = case_obj['vcf_files'].get('vcf_cancer') elif variant_type == 'research': if category == 'snv': variant_file = case_obj['vcf_files'].get('vcf_snv_research') elif category == 'sv': variant_file = case_obj['vcf_files'].get('vcf_sv_research') elif category == 'cancer': variant_file = case_obj['vcf_files'].get('vcf_cancer_research') if not variant_file: raise SyntaxError("Vcf file does not seem to exist") # Check if there are any variants in file try: vcf_obj = VCF(variant_file) var = next(vcf_obj) except StopIteration as err: LOG.warning("Variant file %s does not include any variants", variant_file) return nr_inserted # We need to reload the file vcf_obj = VCF(variant_file) # Parse the neccessary headers from vcf file rank_results_header = parse_rank_results_header(vcf_obj) vep_header = parse_vep_header(vcf_obj) # This is a dictionary to tell where ind are in vcf individual_positions = {} for i, ind in enumerate(vcf_obj.samples): individual_positions[ind] = i # Dictionary for cancer analysis sample_info = {} if category == 'cancer': for ind in case_obj['individuals']: if ind['phenotype'] == 2: sample_info[ind['individual_id']] = 'case' else: sample_info[ind['individual_id']] = 'control' # Check if a region scould be uploaded region = "" if gene_obj: chrom = gene_obj['chromosome'] # Add same padding as VEP start = max(gene_obj['start'] - 5000, 0) end = gene_obj['end'] + 5000 if chrom: # We want to load all variants in the region regardless of rank score rank_threshold = rank_threshold or -1000 if not (start and end): raise SyntaxError("Specify chrom start and end") region = "{0}:{1}-{2}".format(chrom, start, end) else: rank_threshold = rank_threshold or 0 variants = vcf_obj(region) try: nr_inserted = self._load_variants( variants=variants, variant_type=variant_type, case_obj=case_obj, individual_positions=individual_positions, rank_threshold=rank_threshold, institute_id=institute_id, build=build, rank_results_header=rank_results_header, vep_header=vep_header, category=category, sample_info = sample_info ) except Exception as error: LOG.exception('unexpected error') LOG.warning("Deleting inserted variants") self.delete_variants(case_obj['_id'], variant_type) raise error self.update_variant_rank(case_obj, variant_type, category=category) return nr_inserted
[ "def", "load_variants", "(", "self", ",", "case_obj", ",", "variant_type", "=", "'clinical'", ",", "category", "=", "'snv'", ",", "rank_threshold", "=", "None", ",", "chrom", "=", "None", ",", "start", "=", "None", ",", "end", "=", "None", ",", "gene_obj", "=", "None", ",", "build", "=", "'37'", ")", ":", "# We need the institute object", "institute_id", "=", "self", ".", "institute", "(", "institute_id", "=", "case_obj", "[", "'owner'", "]", ")", "[", "'_id'", "]", "nr_inserted", "=", "0", "variant_file", "=", "None", "if", "variant_type", "==", "'clinical'", ":", "if", "category", "==", "'snv'", ":", "variant_file", "=", "case_obj", "[", "'vcf_files'", "]", ".", "get", "(", "'vcf_snv'", ")", "elif", "category", "==", "'sv'", ":", "variant_file", "=", "case_obj", "[", "'vcf_files'", "]", ".", "get", "(", "'vcf_sv'", ")", "elif", "category", "==", "'str'", ":", "LOG", ".", "debug", "(", "'Attempt to load STR VCF.'", ")", "variant_file", "=", "case_obj", "[", "'vcf_files'", "]", ".", "get", "(", "'vcf_str'", ")", "elif", "category", "==", "'cancer'", ":", "# Currently this implies a paired tumor normal", "variant_file", "=", "case_obj", "[", "'vcf_files'", "]", ".", "get", "(", "'vcf_cancer'", ")", "elif", "variant_type", "==", "'research'", ":", "if", "category", "==", "'snv'", ":", "variant_file", "=", "case_obj", "[", "'vcf_files'", "]", ".", "get", "(", "'vcf_snv_research'", ")", "elif", "category", "==", "'sv'", ":", "variant_file", "=", "case_obj", "[", "'vcf_files'", "]", ".", "get", "(", "'vcf_sv_research'", ")", "elif", "category", "==", "'cancer'", ":", "variant_file", "=", "case_obj", "[", "'vcf_files'", "]", ".", "get", "(", "'vcf_cancer_research'", ")", "if", "not", "variant_file", ":", "raise", "SyntaxError", "(", "\"Vcf file does not seem to exist\"", ")", "# Check if there are any variants in file", "try", ":", "vcf_obj", "=", "VCF", "(", "variant_file", ")", "var", "=", "next", "(", "vcf_obj", ")", "except", "StopIteration", "as", "err", ":", "LOG", ".", "warning", "(", "\"Variant file %s does not include any variants\"", ",", "variant_file", ")", "return", "nr_inserted", "# We need to reload the file", "vcf_obj", "=", "VCF", "(", "variant_file", ")", "# Parse the neccessary headers from vcf file", "rank_results_header", "=", "parse_rank_results_header", "(", "vcf_obj", ")", "vep_header", "=", "parse_vep_header", "(", "vcf_obj", ")", "# This is a dictionary to tell where ind are in vcf", "individual_positions", "=", "{", "}", "for", "i", ",", "ind", "in", "enumerate", "(", "vcf_obj", ".", "samples", ")", ":", "individual_positions", "[", "ind", "]", "=", "i", "# Dictionary for cancer analysis", "sample_info", "=", "{", "}", "if", "category", "==", "'cancer'", ":", "for", "ind", "in", "case_obj", "[", "'individuals'", "]", ":", "if", "ind", "[", "'phenotype'", "]", "==", "2", ":", "sample_info", "[", "ind", "[", "'individual_id'", "]", "]", "=", "'case'", "else", ":", "sample_info", "[", "ind", "[", "'individual_id'", "]", "]", "=", "'control'", "# Check if a region scould be uploaded", "region", "=", "\"\"", "if", "gene_obj", ":", "chrom", "=", "gene_obj", "[", "'chromosome'", "]", "# Add same padding as VEP", "start", "=", "max", "(", "gene_obj", "[", "'start'", "]", "-", "5000", ",", "0", ")", "end", "=", "gene_obj", "[", "'end'", "]", "+", "5000", "if", "chrom", ":", "# We want to load all variants in the region regardless of rank score", "rank_threshold", "=", "rank_threshold", "or", "-", "1000", "if", "not", "(", "start", "and", "end", ")", ":", "raise", "SyntaxError", "(", "\"Specify chrom start and end\"", ")", "region", "=", "\"{0}:{1}-{2}\"", ".", "format", "(", "chrom", ",", "start", ",", "end", ")", "else", ":", "rank_threshold", "=", "rank_threshold", "or", "0", "variants", "=", "vcf_obj", "(", "region", ")", "try", ":", "nr_inserted", "=", "self", ".", "_load_variants", "(", "variants", "=", "variants", ",", "variant_type", "=", "variant_type", ",", "case_obj", "=", "case_obj", ",", "individual_positions", "=", "individual_positions", ",", "rank_threshold", "=", "rank_threshold", ",", "institute_id", "=", "institute_id", ",", "build", "=", "build", ",", "rank_results_header", "=", "rank_results_header", ",", "vep_header", "=", "vep_header", ",", "category", "=", "category", ",", "sample_info", "=", "sample_info", ")", "except", "Exception", "as", "error", ":", "LOG", ".", "exception", "(", "'unexpected error'", ")", "LOG", ".", "warning", "(", "\"Deleting inserted variants\"", ")", "self", ".", "delete_variants", "(", "case_obj", "[", "'_id'", "]", ",", "variant_type", ")", "raise", "error", "self", ".", "update_variant_rank", "(", "case_obj", ",", "variant_type", ",", "category", "=", "category", ")", "return", "nr_inserted" ]
Load variants for a case into scout. Load the variants for a specific analysis type and category into scout. If no region is specified, load all variants above rank score threshold If region or gene is specified, load all variants from that region disregarding variant rank(if not specified) Args: case_obj(dict): A case from the scout database variant_type(str): 'clinical' or 'research'. Default: 'clinical' category(str): 'snv', 'str' or 'sv'. Default: 'snv' rank_threshold(float): Only load variants above this score. Default: 0 chrom(str): Load variants from a certain chromosome start(int): Specify the start position end(int): Specify the end position gene_obj(dict): A gene object from the database Returns: nr_inserted(int)
[ "Load", "variants", "for", "a", "case", "into", "scout", "." ]
python
test
viewflow/django-fsm
django_fsm/__init__.py
https://github.com/viewflow/django-fsm/blob/c86cd3eb949467626ffc68249ad001746333c38e/django_fsm/__init__.py#L181-L192
def conditions_met(self, instance, state): """ Check if all conditions have been met """ transition = self.get_transition(state) if transition is None: return False elif transition.conditions is None: return True else: return all(map(lambda condition: condition(instance), transition.conditions))
[ "def", "conditions_met", "(", "self", ",", "instance", ",", "state", ")", ":", "transition", "=", "self", ".", "get_transition", "(", "state", ")", "if", "transition", "is", "None", ":", "return", "False", "elif", "transition", ".", "conditions", "is", "None", ":", "return", "True", "else", ":", "return", "all", "(", "map", "(", "lambda", "condition", ":", "condition", "(", "instance", ")", ",", "transition", ".", "conditions", ")", ")" ]
Check if all conditions have been met
[ "Check", "if", "all", "conditions", "have", "been", "met" ]
python
train
ergoithz/browsepy
browsepy/manager.py
https://github.com/ergoithz/browsepy/blob/1612a930ef220fae507e1b152c531707e555bd92/browsepy/manager.py#L99-L127
def import_plugin(self, plugin): ''' Import plugin by given name, looking at :attr:`namespaces`. :param plugin: plugin module name :type plugin: str :raises PluginNotFoundError: if not found on any namespace ''' names = [ '%s%s%s' % (namespace, '' if namespace[-1] == '_' else '.', plugin) if namespace else plugin for namespace in self.namespaces ] for name in names: if name in sys.modules: return sys.modules[name] for name in names: try: __import__(name) return sys.modules[name] except (ImportError, KeyError): pass raise PluginNotFoundError( 'No plugin module %r found, tried %r' % (plugin, names), plugin, names)
[ "def", "import_plugin", "(", "self", ",", "plugin", ")", ":", "names", "=", "[", "'%s%s%s'", "%", "(", "namespace", ",", "''", "if", "namespace", "[", "-", "1", "]", "==", "'_'", "else", "'.'", ",", "plugin", ")", "if", "namespace", "else", "plugin", "for", "namespace", "in", "self", ".", "namespaces", "]", "for", "name", "in", "names", ":", "if", "name", "in", "sys", ".", "modules", ":", "return", "sys", ".", "modules", "[", "name", "]", "for", "name", "in", "names", ":", "try", ":", "__import__", "(", "name", ")", "return", "sys", ".", "modules", "[", "name", "]", "except", "(", "ImportError", ",", "KeyError", ")", ":", "pass", "raise", "PluginNotFoundError", "(", "'No plugin module %r found, tried %r'", "%", "(", "plugin", ",", "names", ")", ",", "plugin", ",", "names", ")" ]
Import plugin by given name, looking at :attr:`namespaces`. :param plugin: plugin module name :type plugin: str :raises PluginNotFoundError: if not found on any namespace
[ "Import", "plugin", "by", "given", "name", "looking", "at", ":", "attr", ":", "namespaces", "." ]
python
train
Robpol86/libnl
example_scan_access_points.py
https://github.com/Robpol86/libnl/blob/274e9fdaa39822d06ef70b799ed4a95937a4d923/example_scan_access_points.py#L102-L118
def callback_trigger(msg, arg): """Called when the kernel is done scanning. Only signals if it was successful or if it failed. No other data. Positional arguments: msg -- nl_msg class instance containing the data sent by the kernel. arg -- mutable integer (ctypes.c_int()) to update with results. Returns: An integer, value of NL_SKIP. It tells libnl to stop calling other callbacks for this message and proceed with processing the next kernel message. """ gnlh = genlmsghdr(nlmsg_data(nlmsg_hdr(msg))) if gnlh.cmd == nl80211.NL80211_CMD_SCAN_ABORTED: arg.value = 1 # The scan was aborted for some reason. elif gnlh.cmd == nl80211.NL80211_CMD_NEW_SCAN_RESULTS: arg.value = 0 # The scan completed successfully. `callback_dump` will collect the results later. return libnl.handlers.NL_SKIP
[ "def", "callback_trigger", "(", "msg", ",", "arg", ")", ":", "gnlh", "=", "genlmsghdr", "(", "nlmsg_data", "(", "nlmsg_hdr", "(", "msg", ")", ")", ")", "if", "gnlh", ".", "cmd", "==", "nl80211", ".", "NL80211_CMD_SCAN_ABORTED", ":", "arg", ".", "value", "=", "1", "# The scan was aborted for some reason.", "elif", "gnlh", ".", "cmd", "==", "nl80211", ".", "NL80211_CMD_NEW_SCAN_RESULTS", ":", "arg", ".", "value", "=", "0", "# The scan completed successfully. `callback_dump` will collect the results later.", "return", "libnl", ".", "handlers", ".", "NL_SKIP" ]
Called when the kernel is done scanning. Only signals if it was successful or if it failed. No other data. Positional arguments: msg -- nl_msg class instance containing the data sent by the kernel. arg -- mutable integer (ctypes.c_int()) to update with results. Returns: An integer, value of NL_SKIP. It tells libnl to stop calling other callbacks for this message and proceed with processing the next kernel message.
[ "Called", "when", "the", "kernel", "is", "done", "scanning", ".", "Only", "signals", "if", "it", "was", "successful", "or", "if", "it", "failed", ".", "No", "other", "data", "." ]
python
train
UCL-INGI/INGInious
base-containers/base/inginious/rst.py
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/base-containers/base/inginious/rst.py#L18-L25
def get_imageblock(filename, format=''): """ Generates rst raw block for given image filename and format""" _, extension = os.path.splitext(filename) with open(filename, "rb") as image_file: encoded_string = base64.b64encode(image_file.read()).decode('utf-8') return '\n\n.. raw:: html\n\n\t<img src="data:image/' + (format if format else extension[1:]) + ';base64,' + encoded_string +'">\n'
[ "def", "get_imageblock", "(", "filename", ",", "format", "=", "''", ")", ":", "_", ",", "extension", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "with", "open", "(", "filename", ",", "\"rb\"", ")", "as", "image_file", ":", "encoded_string", "=", "base64", ".", "b64encode", "(", "image_file", ".", "read", "(", ")", ")", ".", "decode", "(", "'utf-8'", ")", "return", "'\\n\\n.. raw:: html\\n\\n\\t<img src=\"data:image/'", "+", "(", "format", "if", "format", "else", "extension", "[", "1", ":", "]", ")", "+", "';base64,'", "+", "encoded_string", "+", "'\">\\n'" ]
Generates rst raw block for given image filename and format
[ "Generates", "rst", "raw", "block", "for", "given", "image", "filename", "and", "format" ]
python
train
SheffieldML/GPy
GPy/kern/src/multidimensional_integral_limits.py
https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/kern/src/multidimensional_integral_limits.py#L111-L120
def Kdiag(self, X): """I've used the fact that we call this method for K_ff when finding the covariance as a hack so I know if I should return K_ff or K_xx. In this case we're returning K_ff!! $K_{ff}^{post} = K_{ff} - K_{fx} K_{xx}^{-1} K_{xf}$""" K_ff = np.ones(X.shape[0]) for i,x in enumerate(X): for il,l in enumerate(self.lengthscale): idx = il*2 K_ff[i] *= self.k_ff(x[idx],x[idx],l) return K_ff * self.variances[0]
[ "def", "Kdiag", "(", "self", ",", "X", ")", ":", "K_ff", "=", "np", ".", "ones", "(", "X", ".", "shape", "[", "0", "]", ")", "for", "i", ",", "x", "in", "enumerate", "(", "X", ")", ":", "for", "il", ",", "l", "in", "enumerate", "(", "self", ".", "lengthscale", ")", ":", "idx", "=", "il", "*", "2", "K_ff", "[", "i", "]", "*=", "self", ".", "k_ff", "(", "x", "[", "idx", "]", ",", "x", "[", "idx", "]", ",", "l", ")", "return", "K_ff", "*", "self", ".", "variances", "[", "0", "]" ]
I've used the fact that we call this method for K_ff when finding the covariance as a hack so I know if I should return K_ff or K_xx. In this case we're returning K_ff!! $K_{ff}^{post} = K_{ff} - K_{fx} K_{xx}^{-1} K_{xf}$
[ "I", "ve", "used", "the", "fact", "that", "we", "call", "this", "method", "for", "K_ff", "when", "finding", "the", "covariance", "as", "a", "hack", "so", "I", "know", "if", "I", "should", "return", "K_ff", "or", "K_xx", ".", "In", "this", "case", "we", "re", "returning", "K_ff!!", "$K_", "{", "ff", "}", "^", "{", "post", "}", "=", "K_", "{", "ff", "}", "-", "K_", "{", "fx", "}", "K_", "{", "xx", "}", "^", "{", "-", "1", "}", "K_", "{", "xf", "}", "$" ]
python
train
hydraplatform/hydra-base
hydra_base/util/dataset_util.py
https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/util/dataset_util.py#L95-L165
def _get_val(val, full=False): """ Get the value(s) of a dataset as a single value or as 1-d list of values. In the special case of timeseries, when a check is for time-based criteria, you can return the entire timeseries. """ try: val = val.strip() except: pass logging.debug("%s, type=%s", val, type(val)) if isinstance(val, float): return val if isinstance(val, int): return val if isinstance(val, np.ndarray): return list(val) try: val = float(val) return val except: pass try: val = int(val) return val except: pass if type(val) == pd.DataFrame: if full: return val newval = [] values = val.values for v in values: newv = _get_val(v) if type(newv) == list: newval.extend(newv) else: newval.append(newv) val = newval elif type(val) == dict: if full: return val newval = [] for v in val.values(): newv = _get_val(v) if type(newv) == list: newval.extend(newv) else: newval.append(newv) val = newval elif type(val) == list or type(val) == np.ndarray: newval = [] for arr_val in val: v = _get_val(arr_val) newval.append(v) val = newval return val
[ "def", "_get_val", "(", "val", ",", "full", "=", "False", ")", ":", "try", ":", "val", "=", "val", ".", "strip", "(", ")", "except", ":", "pass", "logging", ".", "debug", "(", "\"%s, type=%s\"", ",", "val", ",", "type", "(", "val", ")", ")", "if", "isinstance", "(", "val", ",", "float", ")", ":", "return", "val", "if", "isinstance", "(", "val", ",", "int", ")", ":", "return", "val", "if", "isinstance", "(", "val", ",", "np", ".", "ndarray", ")", ":", "return", "list", "(", "val", ")", "try", ":", "val", "=", "float", "(", "val", ")", "return", "val", "except", ":", "pass", "try", ":", "val", "=", "int", "(", "val", ")", "return", "val", "except", ":", "pass", "if", "type", "(", "val", ")", "==", "pd", ".", "DataFrame", ":", "if", "full", ":", "return", "val", "newval", "=", "[", "]", "values", "=", "val", ".", "values", "for", "v", "in", "values", ":", "newv", "=", "_get_val", "(", "v", ")", "if", "type", "(", "newv", ")", "==", "list", ":", "newval", ".", "extend", "(", "newv", ")", "else", ":", "newval", ".", "append", "(", "newv", ")", "val", "=", "newval", "elif", "type", "(", "val", ")", "==", "dict", ":", "if", "full", ":", "return", "val", "newval", "=", "[", "]", "for", "v", "in", "val", ".", "values", "(", ")", ":", "newv", "=", "_get_val", "(", "v", ")", "if", "type", "(", "newv", ")", "==", "list", ":", "newval", ".", "extend", "(", "newv", ")", "else", ":", "newval", ".", "append", "(", "newv", ")", "val", "=", "newval", "elif", "type", "(", "val", ")", "==", "list", "or", "type", "(", "val", ")", "==", "np", ".", "ndarray", ":", "newval", "=", "[", "]", "for", "arr_val", "in", "val", ":", "v", "=", "_get_val", "(", "arr_val", ")", "newval", ".", "append", "(", "v", ")", "val", "=", "newval", "return", "val" ]
Get the value(s) of a dataset as a single value or as 1-d list of values. In the special case of timeseries, when a check is for time-based criteria, you can return the entire timeseries.
[ "Get", "the", "value", "(", "s", ")", "of", "a", "dataset", "as", "a", "single", "value", "or", "as", "1", "-", "d", "list", "of", "values", ".", "In", "the", "special", "case", "of", "timeseries", "when", "a", "check", "is", "for", "time", "-", "based", "criteria", "you", "can", "return", "the", "entire", "timeseries", "." ]
python
train
iotile/coretools
iotileemulate/iotile/emulate/reference/controller_features/tile_manager.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotileemulate/iotile/emulate/reference/controller_features/tile_manager.py#L86-L92
def clear_to_reset(self, config_vars): """Clear to the state immediately after a reset.""" super(TileManagerState, self).clear_to_reset(config_vars) self.registered_tiles = self.registered_tiles[:1] self.safe_mode = False self.debug_mode = False
[ "def", "clear_to_reset", "(", "self", ",", "config_vars", ")", ":", "super", "(", "TileManagerState", ",", "self", ")", ".", "clear_to_reset", "(", "config_vars", ")", "self", ".", "registered_tiles", "=", "self", ".", "registered_tiles", "[", ":", "1", "]", "self", ".", "safe_mode", "=", "False", "self", ".", "debug_mode", "=", "False" ]
Clear to the state immediately after a reset.
[ "Clear", "to", "the", "state", "immediately", "after", "a", "reset", "." ]
python
train
thiagopbueno/rddl2tf
rddl2tf/compiler.py
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L736-L784
def _compile_arithmetic_expression(self, expr: Expression, scope: Dict[str, TensorFluent], batch_size: Optional[int] = None, noise: Optional[List[tf.Tensor]] = None) -> TensorFluent: '''Compile an arithmetic expression `expr` into a TensorFluent in the given `scope` with optional batch size. Args: expr (:obj:`rddl2tf.expr.Expression`): A RDDL arithmetic expression. scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope. batch_size (Optional[size]): The batch size. Returns: :obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent. ''' etype = expr.etype args = expr.args if len(args) == 1: etype2op = { '+': lambda x: x, '-': lambda x: -x } if etype[1] not in etype2op: raise ValueError('Invalid binary arithmetic expression:\n{}'.format(expr)) op = etype2op[etype[1]] x = self._compile_expression(args[0], scope, batch_size, noise) fluent = op(x) else: etype2op = { '+': lambda x, y: x + y, '-': lambda x, y: x - y, '*': lambda x, y: x * y, '/': lambda x, y: x / y, } if etype[1] not in etype2op: raise ValueError('Invalid binary arithmetic expression:\n{}'.format(expr)) op = etype2op[etype[1]] x = self._compile_expression(args[0], scope, batch_size, noise) y = self._compile_expression(args[1], scope, batch_size, noise) fluent = op(x, y) return fluent
[ "def", "_compile_arithmetic_expression", "(", "self", ",", "expr", ":", "Expression", ",", "scope", ":", "Dict", "[", "str", ",", "TensorFluent", "]", ",", "batch_size", ":", "Optional", "[", "int", "]", "=", "None", ",", "noise", ":", "Optional", "[", "List", "[", "tf", ".", "Tensor", "]", "]", "=", "None", ")", "->", "TensorFluent", ":", "etype", "=", "expr", ".", "etype", "args", "=", "expr", ".", "args", "if", "len", "(", "args", ")", "==", "1", ":", "etype2op", "=", "{", "'+'", ":", "lambda", "x", ":", "x", ",", "'-'", ":", "lambda", "x", ":", "-", "x", "}", "if", "etype", "[", "1", "]", "not", "in", "etype2op", ":", "raise", "ValueError", "(", "'Invalid binary arithmetic expression:\\n{}'", ".", "format", "(", "expr", ")", ")", "op", "=", "etype2op", "[", "etype", "[", "1", "]", "]", "x", "=", "self", ".", "_compile_expression", "(", "args", "[", "0", "]", ",", "scope", ",", "batch_size", ",", "noise", ")", "fluent", "=", "op", "(", "x", ")", "else", ":", "etype2op", "=", "{", "'+'", ":", "lambda", "x", ",", "y", ":", "x", "+", "y", ",", "'-'", ":", "lambda", "x", ",", "y", ":", "x", "-", "y", ",", "'*'", ":", "lambda", "x", ",", "y", ":", "x", "*", "y", ",", "'/'", ":", "lambda", "x", ",", "y", ":", "x", "/", "y", ",", "}", "if", "etype", "[", "1", "]", "not", "in", "etype2op", ":", "raise", "ValueError", "(", "'Invalid binary arithmetic expression:\\n{}'", ".", "format", "(", "expr", ")", ")", "op", "=", "etype2op", "[", "etype", "[", "1", "]", "]", "x", "=", "self", ".", "_compile_expression", "(", "args", "[", "0", "]", ",", "scope", ",", "batch_size", ",", "noise", ")", "y", "=", "self", ".", "_compile_expression", "(", "args", "[", "1", "]", ",", "scope", ",", "batch_size", ",", "noise", ")", "fluent", "=", "op", "(", "x", ",", "y", ")", "return", "fluent" ]
Compile an arithmetic expression `expr` into a TensorFluent in the given `scope` with optional batch size. Args: expr (:obj:`rddl2tf.expr.Expression`): A RDDL arithmetic expression. scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope. batch_size (Optional[size]): The batch size. Returns: :obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent.
[ "Compile", "an", "arithmetic", "expression", "expr", "into", "a", "TensorFluent", "in", "the", "given", "scope", "with", "optional", "batch", "size", "." ]
python
train
saltstack/salt
salt/cloud/clouds/vmware.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/vmware.py#L1664-L1688
def list_datastore_full(kwargs=None, call=None, datastore=None): ''' Returns a dictionary with basic information for the given datastore CLI Example: .. code-block:: bash salt-cloud -f list_datastore_full my-vmware-config datastore=datastore-name ''' if call != 'function': raise SaltCloudSystemExit( 'The list_datastore_full function must be called with ' '-f or --function.' ) if kwargs: datastore = kwargs.get('datastore', None) if not datastore: raise SaltCloudSystemExit( 'The list_datastore_full function requires a datastore' ) return {datastore: salt.utils.vmware.list_datastore_full(_get_si(), datastore)}
[ "def", "list_datastore_full", "(", "kwargs", "=", "None", ",", "call", "=", "None", ",", "datastore", "=", "None", ")", ":", "if", "call", "!=", "'function'", ":", "raise", "SaltCloudSystemExit", "(", "'The list_datastore_full function must be called with '", "'-f or --function.'", ")", "if", "kwargs", ":", "datastore", "=", "kwargs", ".", "get", "(", "'datastore'", ",", "None", ")", "if", "not", "datastore", ":", "raise", "SaltCloudSystemExit", "(", "'The list_datastore_full function requires a datastore'", ")", "return", "{", "datastore", ":", "salt", ".", "utils", ".", "vmware", ".", "list_datastore_full", "(", "_get_si", "(", ")", ",", "datastore", ")", "}" ]
Returns a dictionary with basic information for the given datastore CLI Example: .. code-block:: bash salt-cloud -f list_datastore_full my-vmware-config datastore=datastore-name
[ "Returns", "a", "dictionary", "with", "basic", "information", "for", "the", "given", "datastore" ]
python
train
mbedmicro/pyOCD
pyocd/probe/stlink/detect/windows.py
https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/probe/stlink/detect/windows.py#L189-L209
def _vid_pid_path_to_usb_info(vid_pid_path): """! Provide the vendor ID and product ID of a device based on its entry in the registry @return Returns {'vendor_id': '<vendor ID>', 'product': '<product ID>'} @details If the vendor ID or product ID can't be determined, they will be returned as None. """ result = {"vendor_id": None, "product_id": None} for component in vid_pid_path.split("&"): component_part = component.lower().split("_") if len(component_part) != 2: logger.debug("Unexpected VID/PID string structure %s", component) break if component_part[0] == "vid": result["vendor_id"] = component_part[1] elif component_part[0] == "pid": result["product_id"] = component_part[1] return result
[ "def", "_vid_pid_path_to_usb_info", "(", "vid_pid_path", ")", ":", "result", "=", "{", "\"vendor_id\"", ":", "None", ",", "\"product_id\"", ":", "None", "}", "for", "component", "in", "vid_pid_path", ".", "split", "(", "\"&\"", ")", ":", "component_part", "=", "component", ".", "lower", "(", ")", ".", "split", "(", "\"_\"", ")", "if", "len", "(", "component_part", ")", "!=", "2", ":", "logger", ".", "debug", "(", "\"Unexpected VID/PID string structure %s\"", ",", "component", ")", "break", "if", "component_part", "[", "0", "]", "==", "\"vid\"", ":", "result", "[", "\"vendor_id\"", "]", "=", "component_part", "[", "1", "]", "elif", "component_part", "[", "0", "]", "==", "\"pid\"", ":", "result", "[", "\"product_id\"", "]", "=", "component_part", "[", "1", "]", "return", "result" ]
! Provide the vendor ID and product ID of a device based on its entry in the registry @return Returns {'vendor_id': '<vendor ID>', 'product': '<product ID>'} @details If the vendor ID or product ID can't be determined, they will be returned as None.
[ "!", "Provide", "the", "vendor", "ID", "and", "product", "ID", "of", "a", "device", "based", "on", "its", "entry", "in", "the", "registry" ]
python
train
christophertbrown/bioscripts
ctbBio/rRNA_copies.py
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rRNA_copies.py#L18-L29
def rna_bases(rna_cov, scaffold, bases, line): """ determine if read overlaps with rna, if so count bases """ start = int(line[3]) stop = start + bases - 1 if scaffold not in rna_cov: return rna_cov for pos in rna_cov[scaffold][2]: ol = get_overlap([start, stop], pos) rna_cov[scaffold][0] += ol return rna_cov
[ "def", "rna_bases", "(", "rna_cov", ",", "scaffold", ",", "bases", ",", "line", ")", ":", "start", "=", "int", "(", "line", "[", "3", "]", ")", "stop", "=", "start", "+", "bases", "-", "1", "if", "scaffold", "not", "in", "rna_cov", ":", "return", "rna_cov", "for", "pos", "in", "rna_cov", "[", "scaffold", "]", "[", "2", "]", ":", "ol", "=", "get_overlap", "(", "[", "start", ",", "stop", "]", ",", "pos", ")", "rna_cov", "[", "scaffold", "]", "[", "0", "]", "+=", "ol", "return", "rna_cov" ]
determine if read overlaps with rna, if so count bases
[ "determine", "if", "read", "overlaps", "with", "rna", "if", "so", "count", "bases" ]
python
train
StackStorm/pybind
pybind/nos/v6_0_2f/brocade_entity_rpc/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/brocade_entity_rpc/__init__.py#L100-L126
def _set_get_contained_in_ID(self, v, load=False): """ Setter method for get_contained_in_ID, mapped from YANG variable /brocade_entity_rpc/get_contained_in_ID (rpc) If this variable is read-only (config: false) in the source YANG file, then _set_get_contained_in_ID is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_get_contained_in_ID() directly. YANG Description: This is a function that returns the slot/container name/ID, where this managed device is 'contained in'. The managed device here, is typically 1 Rack Unit (RU) device. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=get_contained_in_ID.get_contained_in_ID, is_leaf=True, yang_name="get-contained-in-ID", rest_name="get-contained-in-ID", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'get_container_id_action_point'}}, namespace='urn:brocade.com:mgmt:brocade-entity', defining_module='brocade-entity', yang_type='rpc', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """get_contained_in_ID must be of a type compatible with rpc""", 'defined-type': "rpc", 'generated-type': """YANGDynClass(base=get_contained_in_ID.get_contained_in_ID, is_leaf=True, yang_name="get-contained-in-ID", rest_name="get-contained-in-ID", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'get_container_id_action_point'}}, namespace='urn:brocade.com:mgmt:brocade-entity', defining_module='brocade-entity', yang_type='rpc', is_config=True)""", }) self.__get_contained_in_ID = t if hasattr(self, '_set'): self._set()
[ "def", "_set_get_contained_in_ID", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "get_contained_in_ID", ".", "get_contained_in_ID", ",", "is_leaf", "=", "True", ",", "yang_name", "=", "\"get-contained-in-ID\"", ",", "rest_name", "=", "\"get-contained-in-ID\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "False", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'hidden'", ":", "u'rpccmd'", ",", "u'actionpoint'", ":", "u'get_container_id_action_point'", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-entity'", ",", "defining_module", "=", "'brocade-entity'", ",", "yang_type", "=", "'rpc'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"get_contained_in_ID must be of a type compatible with rpc\"\"\"", ",", "'defined-type'", ":", "\"rpc\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=get_contained_in_ID.get_contained_in_ID, is_leaf=True, yang_name=\"get-contained-in-ID\", rest_name=\"get-contained-in-ID\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'get_container_id_action_point'}}, namespace='urn:brocade.com:mgmt:brocade-entity', defining_module='brocade-entity', yang_type='rpc', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__get_contained_in_ID", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for get_contained_in_ID, mapped from YANG variable /brocade_entity_rpc/get_contained_in_ID (rpc) If this variable is read-only (config: false) in the source YANG file, then _set_get_contained_in_ID is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_get_contained_in_ID() directly. YANG Description: This is a function that returns the slot/container name/ID, where this managed device is 'contained in'. The managed device here, is typically 1 Rack Unit (RU) device.
[ "Setter", "method", "for", "get_contained_in_ID", "mapped", "from", "YANG", "variable", "/", "brocade_entity_rpc", "/", "get_contained_in_ID", "(", "rpc", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_get_contained_in_ID", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_get_contained_in_ID", "()", "directly", "." ]
python
train
caesar0301/relogger
relogger/config_parser.py
https://github.com/caesar0301/relogger/blob/40b722ad2115ac6a179e2cc4eb0c88333f5114de/relogger/config_parser.py#L149-L154
def _detect_loop(self): """ detect loops in flow table, raise error if being present """ for source, dests in self.flowtable.items(): if source in dests: raise conferr('Loops detected: %s --> %s' % (source, source))
[ "def", "_detect_loop", "(", "self", ")", ":", "for", "source", ",", "dests", "in", "self", ".", "flowtable", ".", "items", "(", ")", ":", "if", "source", "in", "dests", ":", "raise", "conferr", "(", "'Loops detected: %s --> %s'", "%", "(", "source", ",", "source", ")", ")" ]
detect loops in flow table, raise error if being present
[ "detect", "loops", "in", "flow", "table", "raise", "error", "if", "being", "present" ]
python
train
fabaff/python-mystrom
pymystrom/cli.py
https://github.com/fabaff/python-mystrom/blob/86410f8952104651ef76ad37c84c29740c50551e/pymystrom/cli.py#L157-L160
def color(ip, mac, hue, saturation, value): """Switch the bulb on with the given color.""" bulb = MyStromBulb(ip, mac) bulb.set_color_hsv(hue, saturation, value)
[ "def", "color", "(", "ip", ",", "mac", ",", "hue", ",", "saturation", ",", "value", ")", ":", "bulb", "=", "MyStromBulb", "(", "ip", ",", "mac", ")", "bulb", ".", "set_color_hsv", "(", "hue", ",", "saturation", ",", "value", ")" ]
Switch the bulb on with the given color.
[ "Switch", "the", "bulb", "on", "with", "the", "given", "color", "." ]
python
train
pyrogram/pyrogram
pyrogram/client/methods/bots/answer_callback_query.py
https://github.com/pyrogram/pyrogram/blob/e7258a341ba905cfa86264c22040654db732ec1c/pyrogram/client/methods/bots/answer_callback_query.py#L24-L70
def answer_callback_query( self, callback_query_id: str, text: str = None, show_alert: bool = None, url: str = None, cache_time: int = 0 ): """Use this method to send answers to callback queries sent from inline keyboards. The answer will be displayed to the user as a notification at the top of the chat screen or as an alert. Args: callback_query_id (``str``): Unique identifier for the query to be answered. text (``str``): Text of the notification. If not specified, nothing will be shown to the user, 0-200 characters. show_alert (``bool``): If true, an alert will be shown by the client instead of a notification at the top of the chat screen. Defaults to False. url (``str``): URL that will be opened by the user's client. If you have created a Game and accepted the conditions via @Botfather, specify the URL that opens your game – note that this will only work if the query comes from a callback_game button. Otherwise, you may use links like t.me/your_bot?start=XXXX that open your bot with a parameter. cache_time (``int``): The maximum amount of time in seconds that the result of the callback query may be cached client-side. Telegram apps will support caching starting in version 3.14. Defaults to 0. Returns: True, on success. Raises: :class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error. """ return self.send( functions.messages.SetBotCallbackAnswer( query_id=int(callback_query_id), cache_time=cache_time, alert=show_alert or None, message=text, url=url ) )
[ "def", "answer_callback_query", "(", "self", ",", "callback_query_id", ":", "str", ",", "text", ":", "str", "=", "None", ",", "show_alert", ":", "bool", "=", "None", ",", "url", ":", "str", "=", "None", ",", "cache_time", ":", "int", "=", "0", ")", ":", "return", "self", ".", "send", "(", "functions", ".", "messages", ".", "SetBotCallbackAnswer", "(", "query_id", "=", "int", "(", "callback_query_id", ")", ",", "cache_time", "=", "cache_time", ",", "alert", "=", "show_alert", "or", "None", ",", "message", "=", "text", ",", "url", "=", "url", ")", ")" ]
Use this method to send answers to callback queries sent from inline keyboards. The answer will be displayed to the user as a notification at the top of the chat screen or as an alert. Args: callback_query_id (``str``): Unique identifier for the query to be answered. text (``str``): Text of the notification. If not specified, nothing will be shown to the user, 0-200 characters. show_alert (``bool``): If true, an alert will be shown by the client instead of a notification at the top of the chat screen. Defaults to False. url (``str``): URL that will be opened by the user's client. If you have created a Game and accepted the conditions via @Botfather, specify the URL that opens your game – note that this will only work if the query comes from a callback_game button. Otherwise, you may use links like t.me/your_bot?start=XXXX that open your bot with a parameter. cache_time (``int``): The maximum amount of time in seconds that the result of the callback query may be cached client-side. Telegram apps will support caching starting in version 3.14. Defaults to 0. Returns: True, on success. Raises: :class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
[ "Use", "this", "method", "to", "send", "answers", "to", "callback", "queries", "sent", "from", "inline", "keyboards", ".", "The", "answer", "will", "be", "displayed", "to", "the", "user", "as", "a", "notification", "at", "the", "top", "of", "the", "chat", "screen", "or", "as", "an", "alert", "." ]
python
train
chezou/tabula-py
tabula/file_util.py
https://github.com/chezou/tabula-py/blob/e61d46ee3c93bb40396e48dac5a9493e898f561a/tabula/file_util.py#L24-L63
def localize_file(path_or_buffer): '''Ensure localize target file. If the target file is remote, this function fetches into local storage. Args: path (str): File path or file like object or URL of target file. Returns: filename (str): file name in local storage temporary_file_flag (bool): temporary file flag ''' path_or_buffer = _stringify_path(path_or_buffer) if _is_url(path_or_buffer): req = urlopen(path_or_buffer) filename = os.path.basename(req.geturl()) if os.path.splitext(filename)[-1] is not ".pdf": pid = os.getpid() filename = "{0}.pdf".format(pid) with open(filename, 'wb') as f: shutil.copyfileobj(req, f) return filename, True elif is_file_like(path_or_buffer): pid = os.getpid() filename = "{0}.pdf".format(pid) with open(filename, 'wb') as f: shutil.copyfileobj(path_or_buffer, f) return filename, True # File path case else: return os.path.expanduser(path_or_buffer), False
[ "def", "localize_file", "(", "path_or_buffer", ")", ":", "path_or_buffer", "=", "_stringify_path", "(", "path_or_buffer", ")", "if", "_is_url", "(", "path_or_buffer", ")", ":", "req", "=", "urlopen", "(", "path_or_buffer", ")", "filename", "=", "os", ".", "path", ".", "basename", "(", "req", ".", "geturl", "(", ")", ")", "if", "os", ".", "path", ".", "splitext", "(", "filename", ")", "[", "-", "1", "]", "is", "not", "\".pdf\"", ":", "pid", "=", "os", ".", "getpid", "(", ")", "filename", "=", "\"{0}.pdf\"", ".", "format", "(", "pid", ")", "with", "open", "(", "filename", ",", "'wb'", ")", "as", "f", ":", "shutil", ".", "copyfileobj", "(", "req", ",", "f", ")", "return", "filename", ",", "True", "elif", "is_file_like", "(", "path_or_buffer", ")", ":", "pid", "=", "os", ".", "getpid", "(", ")", "filename", "=", "\"{0}.pdf\"", ".", "format", "(", "pid", ")", "with", "open", "(", "filename", ",", "'wb'", ")", "as", "f", ":", "shutil", ".", "copyfileobj", "(", "path_or_buffer", ",", "f", ")", "return", "filename", ",", "True", "# File path case", "else", ":", "return", "os", ".", "path", ".", "expanduser", "(", "path_or_buffer", ")", ",", "False" ]
Ensure localize target file. If the target file is remote, this function fetches into local storage. Args: path (str): File path or file like object or URL of target file. Returns: filename (str): file name in local storage temporary_file_flag (bool): temporary file flag
[ "Ensure", "localize", "target", "file", "." ]
python
train
saltstack/salt
salt/cloud/clouds/vmware.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/vmware.py#L3737-L3811
def create_folder(kwargs=None, call=None): ''' Create the specified folder path in this VMware environment .. note:: To create a Host and Cluster Folder under a Datacenter, specify ``path="/yourDatacenterName/host/yourFolderName"`` To create a Network Folder under a Datacenter, specify ``path="/yourDatacenterName/network/yourFolderName"`` To create a Storage Folder under a Datacenter, specify ``path="/yourDatacenterName/datastore/yourFolderName"`` To create a VM and Template Folder under a Datacenter, specify ``path="/yourDatacenterName/vm/yourFolderName"`` CLI Example: .. code-block:: bash salt-cloud -f create_folder my-vmware-config path="/Local/a/b/c" salt-cloud -f create_folder my-vmware-config path="/MyDatacenter/vm/MyVMFolder" salt-cloud -f create_folder my-vmware-config path="/MyDatacenter/host/MyHostFolder" salt-cloud -f create_folder my-vmware-config path="/MyDatacenter/network/MyNetworkFolder" salt-cloud -f create_folder my-vmware-config path="/MyDatacenter/storage/MyStorageFolder" ''' if call != 'function': raise SaltCloudSystemExit( 'The create_folder function must be called with ' '-f or --function.' ) # Get the service instance object si = _get_si() folder_path = kwargs.get('path') if kwargs and 'path' in kwargs else None if not folder_path: raise SaltCloudSystemExit( 'You must specify a non empty folder path.' ) folder_refs = [] inventory_path = '/' path_exists = True # Split the path in a list and loop over it to check for its existence for index, folder_name in enumerate(os.path.normpath(folder_path.strip('/')).split('/')): inventory_path = os.path.join(inventory_path, folder_name) folder_ref = si.content.searchIndex.FindByInventoryPath(inventoryPath=inventory_path) if isinstance(folder_ref, vim.Folder): # This is a folder that exists so just append and skip it log.debug("Path %s/ exists in the inventory", inventory_path) folder_refs.append(folder_ref) elif isinstance(folder_ref, vim.Datacenter): # This is a datacenter that exists so just append and skip it log.debug("Path %s/ exists in the inventory", inventory_path) folder_refs.append(folder_ref) else: path_exists = False if not folder_refs: # If this is the first folder, create it under the rootFolder log.debug("Creating folder %s under rootFolder in the inventory", folder_name) folder_refs.append(si.content.rootFolder.CreateFolder(folder_name)) else: # Create the folder under the parent folder log.debug("Creating path %s/ in the inventory", inventory_path) folder_refs.append(folder_refs[index-1].CreateFolder(folder_name)) if path_exists: return {inventory_path: 'specfied path already exists'} return {inventory_path: 'created the specified path'}
[ "def", "create_folder", "(", "kwargs", "=", "None", ",", "call", "=", "None", ")", ":", "if", "call", "!=", "'function'", ":", "raise", "SaltCloudSystemExit", "(", "'The create_folder function must be called with '", "'-f or --function.'", ")", "# Get the service instance object", "si", "=", "_get_si", "(", ")", "folder_path", "=", "kwargs", ".", "get", "(", "'path'", ")", "if", "kwargs", "and", "'path'", "in", "kwargs", "else", "None", "if", "not", "folder_path", ":", "raise", "SaltCloudSystemExit", "(", "'You must specify a non empty folder path.'", ")", "folder_refs", "=", "[", "]", "inventory_path", "=", "'/'", "path_exists", "=", "True", "# Split the path in a list and loop over it to check for its existence", "for", "index", ",", "folder_name", "in", "enumerate", "(", "os", ".", "path", ".", "normpath", "(", "folder_path", ".", "strip", "(", "'/'", ")", ")", ".", "split", "(", "'/'", ")", ")", ":", "inventory_path", "=", "os", ".", "path", ".", "join", "(", "inventory_path", ",", "folder_name", ")", "folder_ref", "=", "si", ".", "content", ".", "searchIndex", ".", "FindByInventoryPath", "(", "inventoryPath", "=", "inventory_path", ")", "if", "isinstance", "(", "folder_ref", ",", "vim", ".", "Folder", ")", ":", "# This is a folder that exists so just append and skip it", "log", ".", "debug", "(", "\"Path %s/ exists in the inventory\"", ",", "inventory_path", ")", "folder_refs", ".", "append", "(", "folder_ref", ")", "elif", "isinstance", "(", "folder_ref", ",", "vim", ".", "Datacenter", ")", ":", "# This is a datacenter that exists so just append and skip it", "log", ".", "debug", "(", "\"Path %s/ exists in the inventory\"", ",", "inventory_path", ")", "folder_refs", ".", "append", "(", "folder_ref", ")", "else", ":", "path_exists", "=", "False", "if", "not", "folder_refs", ":", "# If this is the first folder, create it under the rootFolder", "log", ".", "debug", "(", "\"Creating folder %s under rootFolder in the inventory\"", ",", "folder_name", ")", "folder_refs", ".", "append", "(", "si", ".", "content", ".", "rootFolder", ".", "CreateFolder", "(", "folder_name", ")", ")", "else", ":", "# Create the folder under the parent folder", "log", ".", "debug", "(", "\"Creating path %s/ in the inventory\"", ",", "inventory_path", ")", "folder_refs", ".", "append", "(", "folder_refs", "[", "index", "-", "1", "]", ".", "CreateFolder", "(", "folder_name", ")", ")", "if", "path_exists", ":", "return", "{", "inventory_path", ":", "'specfied path already exists'", "}", "return", "{", "inventory_path", ":", "'created the specified path'", "}" ]
Create the specified folder path in this VMware environment .. note:: To create a Host and Cluster Folder under a Datacenter, specify ``path="/yourDatacenterName/host/yourFolderName"`` To create a Network Folder under a Datacenter, specify ``path="/yourDatacenterName/network/yourFolderName"`` To create a Storage Folder under a Datacenter, specify ``path="/yourDatacenterName/datastore/yourFolderName"`` To create a VM and Template Folder under a Datacenter, specify ``path="/yourDatacenterName/vm/yourFolderName"`` CLI Example: .. code-block:: bash salt-cloud -f create_folder my-vmware-config path="/Local/a/b/c" salt-cloud -f create_folder my-vmware-config path="/MyDatacenter/vm/MyVMFolder" salt-cloud -f create_folder my-vmware-config path="/MyDatacenter/host/MyHostFolder" salt-cloud -f create_folder my-vmware-config path="/MyDatacenter/network/MyNetworkFolder" salt-cloud -f create_folder my-vmware-config path="/MyDatacenter/storage/MyStorageFolder"
[ "Create", "the", "specified", "folder", "path", "in", "this", "VMware", "environment" ]
python
train
github/octodns
octodns/provider/dyn.py
https://github.com/github/octodns/blob/65ee60491e22e6bb0a2aa08f7069c6ecf6c3fee6/octodns/provider/dyn.py#L156-L162
def flush_zone(cls, zone_name): '''Flushes the zone cache, if there is one''' cls.log.debug('flush_zone: zone_name=%s', zone_name) try: del cls._cache[zone_name] except KeyError: pass
[ "def", "flush_zone", "(", "cls", ",", "zone_name", ")", ":", "cls", ".", "log", ".", "debug", "(", "'flush_zone: zone_name=%s'", ",", "zone_name", ")", "try", ":", "del", "cls", ".", "_cache", "[", "zone_name", "]", "except", "KeyError", ":", "pass" ]
Flushes the zone cache, if there is one
[ "Flushes", "the", "zone", "cache", "if", "there", "is", "one" ]
python
train
DataMedSci/mcpartools
setup.py
https://github.com/DataMedSci/mcpartools/blob/84f869094d05bf70f09e8aaeca671ddaa1c56ec4/setup.py#L84-L97
def get_version(): """ Get project version (using versioneer) :return: string containing version """ setup_versioneer() clean_cache() import versioneer version = versioneer.get_version() parsed_version = parse_version(version) if '*@' in str(parsed_version): import time version += str(int(time.time())) return version
[ "def", "get_version", "(", ")", ":", "setup_versioneer", "(", ")", "clean_cache", "(", ")", "import", "versioneer", "version", "=", "versioneer", ".", "get_version", "(", ")", "parsed_version", "=", "parse_version", "(", "version", ")", "if", "'*@'", "in", "str", "(", "parsed_version", ")", ":", "import", "time", "version", "+=", "str", "(", "int", "(", "time", ".", "time", "(", ")", ")", ")", "return", "version" ]
Get project version (using versioneer) :return: string containing version
[ "Get", "project", "version", "(", "using", "versioneer", ")", ":", "return", ":", "string", "containing", "version" ]
python
train
brentp/cruzdb
cruzdb/models.py
https://github.com/brentp/cruzdb/blob/9068d46e25952f4a929dde0242beb31fa4c7e89a/cruzdb/models.py#L527-L542
def sequence(self, per_exon=False): """ Return the sequence for this feature. if per-exon is True, return an array of exon sequences This sequence is never reverse complemented """ db = self.db if not per_exon: start = self.txStart + 1 return _sequence(db, self.chrom, start, self.txEnd) else: # TODO: use same strategy as cds_sequence to reduce # of requests. seqs = [] for start, end in self.exons: seqs.append(_sequence(db, self.chrom, start + 1, end)) return seqs
[ "def", "sequence", "(", "self", ",", "per_exon", "=", "False", ")", ":", "db", "=", "self", ".", "db", "if", "not", "per_exon", ":", "start", "=", "self", ".", "txStart", "+", "1", "return", "_sequence", "(", "db", ",", "self", ".", "chrom", ",", "start", ",", "self", ".", "txEnd", ")", "else", ":", "# TODO: use same strategy as cds_sequence to reduce # of requests.", "seqs", "=", "[", "]", "for", "start", ",", "end", "in", "self", ".", "exons", ":", "seqs", ".", "append", "(", "_sequence", "(", "db", ",", "self", ".", "chrom", ",", "start", "+", "1", ",", "end", ")", ")", "return", "seqs" ]
Return the sequence for this feature. if per-exon is True, return an array of exon sequences This sequence is never reverse complemented
[ "Return", "the", "sequence", "for", "this", "feature", ".", "if", "per", "-", "exon", "is", "True", "return", "an", "array", "of", "exon", "sequences", "This", "sequence", "is", "never", "reverse", "complemented" ]
python
train
buildbot/buildbot
master/buildbot/steps/package/deb/lintian.py
https://github.com/buildbot/buildbot/blob/5df3cfae6d760557d99156633c32b1822a1e130c/master/buildbot/steps/package/deb/lintian.py#L84-L98
def createSummary(self, log): """ Create nice summary logs. @param log: log to create summary off of. """ warnings = self.obs.warnings errors = self.obs.errors if warnings: self.addCompleteLog('%d Warnings' % len(warnings), "\n".join(warnings)) self.warnCount = len(warnings) if errors: self.addCompleteLog('%d Errors' % len(errors), "\n".join(errors)) self.errCount = len(errors)
[ "def", "createSummary", "(", "self", ",", "log", ")", ":", "warnings", "=", "self", ".", "obs", ".", "warnings", "errors", "=", "self", ".", "obs", ".", "errors", "if", "warnings", ":", "self", ".", "addCompleteLog", "(", "'%d Warnings'", "%", "len", "(", "warnings", ")", ",", "\"\\n\"", ".", "join", "(", "warnings", ")", ")", "self", ".", "warnCount", "=", "len", "(", "warnings", ")", "if", "errors", ":", "self", ".", "addCompleteLog", "(", "'%d Errors'", "%", "len", "(", "errors", ")", ",", "\"\\n\"", ".", "join", "(", "errors", ")", ")", "self", ".", "errCount", "=", "len", "(", "errors", ")" ]
Create nice summary logs. @param log: log to create summary off of.
[ "Create", "nice", "summary", "logs", "." ]
python
train
stevearc/dql
dql/output.py
https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/output.py#L249-L253
def wait(self): """ Block for user input """ self._write_footer() super(ColumnFormat, self).wait() self._write_header()
[ "def", "wait", "(", "self", ")", ":", "self", ".", "_write_footer", "(", ")", "super", "(", "ColumnFormat", ",", "self", ")", ".", "wait", "(", ")", "self", ".", "_write_header", "(", ")" ]
Block for user input
[ "Block", "for", "user", "input" ]
python
train
mar10/wsgidav
wsgidav/util.py
https://github.com/mar10/wsgidav/blob/cec0d84222fc24bea01be1cea91729001963f172/wsgidav/util.py#L246-L257
def get_module_logger(moduleName, defaultToVerbose=False): """Create a module logger, that can be en/disabled by configuration. @see: unit.init_logging """ # moduleName = moduleName.split(".")[-1] if not moduleName.startswith(BASE_LOGGER_NAME + "."): moduleName = BASE_LOGGER_NAME + "." + moduleName logger = logging.getLogger(moduleName) # if logger.level == logging.NOTSET and not defaultToVerbose: # logger.setLevel(logging.INFO) # Disable debug messages by default return logger
[ "def", "get_module_logger", "(", "moduleName", ",", "defaultToVerbose", "=", "False", ")", ":", "# moduleName = moduleName.split(\".\")[-1]", "if", "not", "moduleName", ".", "startswith", "(", "BASE_LOGGER_NAME", "+", "\".\"", ")", ":", "moduleName", "=", "BASE_LOGGER_NAME", "+", "\".\"", "+", "moduleName", "logger", "=", "logging", ".", "getLogger", "(", "moduleName", ")", "# if logger.level == logging.NOTSET and not defaultToVerbose:", "# logger.setLevel(logging.INFO) # Disable debug messages by default", "return", "logger" ]
Create a module logger, that can be en/disabled by configuration. @see: unit.init_logging
[ "Create", "a", "module", "logger", "that", "can", "be", "en", "/", "disabled", "by", "configuration", "." ]
python
valid
PaulHancock/Aegean
AegeanTools/fitting.py
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/fitting.py#L760-L792
def bias_correct(params, data, acf=None): """ Calculate and apply a bias correction to the given fit parameters Parameters ---------- params : lmfit.Parameters The model parameters. These will be modified. data : 2d-array The data which was used in the fitting acf : 2d-array ACF of the data. Default = None. Returns ------- None See Also -------- :func:`AegeanTools.fitting.RB_bias` """ bias = RB_bias(data, params, acf=acf) i = 0 for p in params: if 'theta' in p: continue if params[p].vary: params[p].value -= bias[i] i += 1 return
[ "def", "bias_correct", "(", "params", ",", "data", ",", "acf", "=", "None", ")", ":", "bias", "=", "RB_bias", "(", "data", ",", "params", ",", "acf", "=", "acf", ")", "i", "=", "0", "for", "p", "in", "params", ":", "if", "'theta'", "in", "p", ":", "continue", "if", "params", "[", "p", "]", ".", "vary", ":", "params", "[", "p", "]", ".", "value", "-=", "bias", "[", "i", "]", "i", "+=", "1", "return" ]
Calculate and apply a bias correction to the given fit parameters Parameters ---------- params : lmfit.Parameters The model parameters. These will be modified. data : 2d-array The data which was used in the fitting acf : 2d-array ACF of the data. Default = None. Returns ------- None See Also -------- :func:`AegeanTools.fitting.RB_bias`
[ "Calculate", "and", "apply", "a", "bias", "correction", "to", "the", "given", "fit", "parameters" ]
python
train
PmagPy/PmagPy
dialogs/pmag_er_magic_dialogs.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/pmag_er_magic_dialogs.py#L1131-L1142
def onLeftClickLabel(self, event): """ When user clicks on a grid label, determine if it is a row label or a col label. Pass along the event to the appropriate function. (It will either highlight a column for editing all values, or highlight a row for deletion). """ if event.Col == -1 and event.Row == -1: pass elif event.Col < 0: self.onSelectRow(event) elif event.Row < 0: self.drop_down_menu.on_label_click(event)
[ "def", "onLeftClickLabel", "(", "self", ",", "event", ")", ":", "if", "event", ".", "Col", "==", "-", "1", "and", "event", ".", "Row", "==", "-", "1", ":", "pass", "elif", "event", ".", "Col", "<", "0", ":", "self", ".", "onSelectRow", "(", "event", ")", "elif", "event", ".", "Row", "<", "0", ":", "self", ".", "drop_down_menu", ".", "on_label_click", "(", "event", ")" ]
When user clicks on a grid label, determine if it is a row label or a col label. Pass along the event to the appropriate function. (It will either highlight a column for editing all values, or highlight a row for deletion).
[ "When", "user", "clicks", "on", "a", "grid", "label", "determine", "if", "it", "is", "a", "row", "label", "or", "a", "col", "label", ".", "Pass", "along", "the", "event", "to", "the", "appropriate", "function", ".", "(", "It", "will", "either", "highlight", "a", "column", "for", "editing", "all", "values", "or", "highlight", "a", "row", "for", "deletion", ")", "." ]
python
train
senaite/senaite.core
bika/lims/browser/header_table.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/header_table.py#L164-L203
def get_field_visibility_mode(self, field): """Returns "view" or "edit" modes, together with the place within where this field has to be rendered, based on the permissions the current user has for the context and the field passed in """ fallback_mode = ("hidden", "hidden") widget = field.widget # TODO This needs to be done differently # Check where the field has to be located layout = widget.isVisible(self.context, "header_table") if layout in ["invisible", "hidden"]: return fallback_mode # Check permissions. We want to display field (either in view or edit # modes) only if the current user has enough privileges. if field.checkPermission("edit", self.context): mode = "edit" sm = getSecurityManager() if not sm.checkPermission(ModifyPortalContent, self.context): logger.warn("Permission '{}' granted for the edition of '{}', " "but 'Modify portal content' not granted" .format(field.write_permission, field.getName())) elif field.checkPermission("view", self.context): mode = "view" else: return fallback_mode # Check if the field needs to be displayed or not, even if the user has # the permissions for edit or view. This may depend on criteria other # than permissions (e.g. visibility depending on a setup setting, etc.) if widget.isVisible(self.context, mode, field=field) != "visible": if mode == "view": return fallback_mode # The field cannot be rendered in edit mode, but maybe can be # rendered in view mode. mode = "view" if widget.isVisible(self.context, mode, field=field) != "visible": return fallback_mode return (mode, layout)
[ "def", "get_field_visibility_mode", "(", "self", ",", "field", ")", ":", "fallback_mode", "=", "(", "\"hidden\"", ",", "\"hidden\"", ")", "widget", "=", "field", ".", "widget", "# TODO This needs to be done differently", "# Check where the field has to be located", "layout", "=", "widget", ".", "isVisible", "(", "self", ".", "context", ",", "\"header_table\"", ")", "if", "layout", "in", "[", "\"invisible\"", ",", "\"hidden\"", "]", ":", "return", "fallback_mode", "# Check permissions. We want to display field (either in view or edit", "# modes) only if the current user has enough privileges.", "if", "field", ".", "checkPermission", "(", "\"edit\"", ",", "self", ".", "context", ")", ":", "mode", "=", "\"edit\"", "sm", "=", "getSecurityManager", "(", ")", "if", "not", "sm", ".", "checkPermission", "(", "ModifyPortalContent", ",", "self", ".", "context", ")", ":", "logger", ".", "warn", "(", "\"Permission '{}' granted for the edition of '{}', \"", "\"but 'Modify portal content' not granted\"", ".", "format", "(", "field", ".", "write_permission", ",", "field", ".", "getName", "(", ")", ")", ")", "elif", "field", ".", "checkPermission", "(", "\"view\"", ",", "self", ".", "context", ")", ":", "mode", "=", "\"view\"", "else", ":", "return", "fallback_mode", "# Check if the field needs to be displayed or not, even if the user has", "# the permissions for edit or view. This may depend on criteria other", "# than permissions (e.g. visibility depending on a setup setting, etc.)", "if", "widget", ".", "isVisible", "(", "self", ".", "context", ",", "mode", ",", "field", "=", "field", ")", "!=", "\"visible\"", ":", "if", "mode", "==", "\"view\"", ":", "return", "fallback_mode", "# The field cannot be rendered in edit mode, but maybe can be", "# rendered in view mode.", "mode", "=", "\"view\"", "if", "widget", ".", "isVisible", "(", "self", ".", "context", ",", "mode", ",", "field", "=", "field", ")", "!=", "\"visible\"", ":", "return", "fallback_mode", "return", "(", "mode", ",", "layout", ")" ]
Returns "view" or "edit" modes, together with the place within where this field has to be rendered, based on the permissions the current user has for the context and the field passed in
[ "Returns", "view", "or", "edit", "modes", "together", "with", "the", "place", "within", "where", "this", "field", "has", "to", "be", "rendered", "based", "on", "the", "permissions", "the", "current", "user", "has", "for", "the", "context", "and", "the", "field", "passed", "in" ]
python
train
twitterdev/tweet_parser
tweet_parser/tweet_checking.py
https://github.com/twitterdev/tweet_parser/blob/3435de8367d36b483a6cfd8d46cc28694ee8a42e/tweet_parser/tweet_checking.py#L129-L148
def check_tweet(tweet, validation_checking=False): """ Ensures a tweet is valid and determines the type of format for the tweet. Args: tweet (dict/Tweet): the tweet payload validation_checking (bool): check for valid key structure in a tweet. """ if "id" not in tweet: raise NotATweetError("This text has no 'id' key") original_format = is_original_format(tweet) if original_format: _check_original_format_tweet(tweet, validation_checking=validation_checking) else: _check_activity_streams_tweet(tweet, validation_checking=validation_checking) return original_format
[ "def", "check_tweet", "(", "tweet", ",", "validation_checking", "=", "False", ")", ":", "if", "\"id\"", "not", "in", "tweet", ":", "raise", "NotATweetError", "(", "\"This text has no 'id' key\"", ")", "original_format", "=", "is_original_format", "(", "tweet", ")", "if", "original_format", ":", "_check_original_format_tweet", "(", "tweet", ",", "validation_checking", "=", "validation_checking", ")", "else", ":", "_check_activity_streams_tweet", "(", "tweet", ",", "validation_checking", "=", "validation_checking", ")", "return", "original_format" ]
Ensures a tweet is valid and determines the type of format for the tweet. Args: tweet (dict/Tweet): the tweet payload validation_checking (bool): check for valid key structure in a tweet.
[ "Ensures", "a", "tweet", "is", "valid", "and", "determines", "the", "type", "of", "format", "for", "the", "tweet", "." ]
python
train
kstaniek/condoor
condoor/drivers/XE.py
https://github.com/kstaniek/condoor/blob/77c054b29d4e286c1d7aca2c74dff86b805e1fae/condoor/drivers/XE.py#L34-L73
def reload(self, reload_timeout=300, save_config=True): """Reload the device. CSM_DUT#reload System configuration has been modified. Save? [yes/no]: yes Building configuration... [OK] Proceed with reload? [confirm] """ SAVE_CONFIG = re.compile(re.escape("System configuration has been modified. Save? [yes/no]: ")) PROCEED = re.compile(re.escape("Proceed with reload? [confirm]")) IMAGE = re.compile("Passing control to the main image") BOOTSTRAP = re.compile("System Bootstrap") LOCATED = re.compile("Located .*") RETURN = re.compile(re.escape("Press RETURN to get started!")) response = "yes" if save_config else "no" # 0 1 2 3 4 events = [SAVE_CONFIG, PROCEED, LOCATED, RETURN, self.username_re, self.password_re, BOOTSTRAP, IMAGE, TIMEOUT, EOF] # 5 6 7 8 9 transitions = [ (SAVE_CONFIG, [0], 1, partial(a_send_line, response), 60), (PROCEED, [0, 1], 2, partial(a_send, "\r"), reload_timeout), (LOCATED, [2], 2, a_message_callback, reload_timeout), # if timeout try to send the reload command again (TIMEOUT, [0], 0, partial(a_send_line, self.reload_cmd), 10), (BOOTSTRAP, [2], -1, a_disconnect, reload_timeout), (IMAGE, [2], 3, a_message_callback, reload_timeout), (self.username_re, [3], -1, a_return_and_reconnect, 0), (self.password_re, [3], -1, a_return_and_reconnect, 0), (RETURN, [3], -1, a_return_and_reconnect, 0), (TIMEOUT, [2], -1, a_disconnect, 0), (EOF, [0, 1, 2, 3], -1, a_disconnect, 0) ] fsm = FSM("IOS-RELOAD", self.device, events, transitions, timeout=10) return fsm.run()
[ "def", "reload", "(", "self", ",", "reload_timeout", "=", "300", ",", "save_config", "=", "True", ")", ":", "SAVE_CONFIG", "=", "re", ".", "compile", "(", "re", ".", "escape", "(", "\"System configuration has been modified. Save? [yes/no]: \"", ")", ")", "PROCEED", "=", "re", ".", "compile", "(", "re", ".", "escape", "(", "\"Proceed with reload? [confirm]\"", ")", ")", "IMAGE", "=", "re", ".", "compile", "(", "\"Passing control to the main image\"", ")", "BOOTSTRAP", "=", "re", ".", "compile", "(", "\"System Bootstrap\"", ")", "LOCATED", "=", "re", ".", "compile", "(", "\"Located .*\"", ")", "RETURN", "=", "re", ".", "compile", "(", "re", ".", "escape", "(", "\"Press RETURN to get started!\"", ")", ")", "response", "=", "\"yes\"", "if", "save_config", "else", "\"no\"", "# 0 1 2 3 4", "events", "=", "[", "SAVE_CONFIG", ",", "PROCEED", ",", "LOCATED", ",", "RETURN", ",", "self", ".", "username_re", ",", "self", ".", "password_re", ",", "BOOTSTRAP", ",", "IMAGE", ",", "TIMEOUT", ",", "EOF", "]", "# 5 6 7 8 9", "transitions", "=", "[", "(", "SAVE_CONFIG", ",", "[", "0", "]", ",", "1", ",", "partial", "(", "a_send_line", ",", "response", ")", ",", "60", ")", ",", "(", "PROCEED", ",", "[", "0", ",", "1", "]", ",", "2", ",", "partial", "(", "a_send", ",", "\"\\r\"", ")", ",", "reload_timeout", ")", ",", "(", "LOCATED", ",", "[", "2", "]", ",", "2", ",", "a_message_callback", ",", "reload_timeout", ")", ",", "# if timeout try to send the reload command again", "(", "TIMEOUT", ",", "[", "0", "]", ",", "0", ",", "partial", "(", "a_send_line", ",", "self", ".", "reload_cmd", ")", ",", "10", ")", ",", "(", "BOOTSTRAP", ",", "[", "2", "]", ",", "-", "1", ",", "a_disconnect", ",", "reload_timeout", ")", ",", "(", "IMAGE", ",", "[", "2", "]", ",", "3", ",", "a_message_callback", ",", "reload_timeout", ")", ",", "(", "self", ".", "username_re", ",", "[", "3", "]", ",", "-", "1", ",", "a_return_and_reconnect", ",", "0", ")", ",", "(", "self", ".", "password_re", ",", "[", "3", "]", ",", "-", "1", ",", "a_return_and_reconnect", ",", "0", ")", ",", "(", "RETURN", ",", "[", "3", "]", ",", "-", "1", ",", "a_return_and_reconnect", ",", "0", ")", ",", "(", "TIMEOUT", ",", "[", "2", "]", ",", "-", "1", ",", "a_disconnect", ",", "0", ")", ",", "(", "EOF", ",", "[", "0", ",", "1", ",", "2", ",", "3", "]", ",", "-", "1", ",", "a_disconnect", ",", "0", ")", "]", "fsm", "=", "FSM", "(", "\"IOS-RELOAD\"", ",", "self", ".", "device", ",", "events", ",", "transitions", ",", "timeout", "=", "10", ")", "return", "fsm", ".", "run", "(", ")" ]
Reload the device. CSM_DUT#reload System configuration has been modified. Save? [yes/no]: yes Building configuration... [OK] Proceed with reload? [confirm]
[ "Reload", "the", "device", "." ]
python
train
ymoch/apyori
apyori.py
https://github.com/ymoch/apyori/blob/8cc20a19d01b18b83e18e54aabb416c8dedabfde/apyori.py#L206-L222
def gen_ordered_statistics(transaction_manager, record): """ Returns a generator of ordered statistics as OrderedStatistic instances. Arguments: transaction_manager -- Transactions as a TransactionManager instance. record -- A support record as a SupportRecord instance. """ items = record.items for combination_set in combinations(sorted(items), len(items) - 1): items_base = frozenset(combination_set) items_add = frozenset(items.difference(items_base)) confidence = ( record.support / transaction_manager.calc_support(items_base)) lift = confidence / transaction_manager.calc_support(items_add) yield OrderedStatistic( frozenset(items_base), frozenset(items_add), confidence, lift)
[ "def", "gen_ordered_statistics", "(", "transaction_manager", ",", "record", ")", ":", "items", "=", "record", ".", "items", "for", "combination_set", "in", "combinations", "(", "sorted", "(", "items", ")", ",", "len", "(", "items", ")", "-", "1", ")", ":", "items_base", "=", "frozenset", "(", "combination_set", ")", "items_add", "=", "frozenset", "(", "items", ".", "difference", "(", "items_base", ")", ")", "confidence", "=", "(", "record", ".", "support", "/", "transaction_manager", ".", "calc_support", "(", "items_base", ")", ")", "lift", "=", "confidence", "/", "transaction_manager", ".", "calc_support", "(", "items_add", ")", "yield", "OrderedStatistic", "(", "frozenset", "(", "items_base", ")", ",", "frozenset", "(", "items_add", ")", ",", "confidence", ",", "lift", ")" ]
Returns a generator of ordered statistics as OrderedStatistic instances. Arguments: transaction_manager -- Transactions as a TransactionManager instance. record -- A support record as a SupportRecord instance.
[ "Returns", "a", "generator", "of", "ordered", "statistics", "as", "OrderedStatistic", "instances", "." ]
python
train
ossobv/dutree
dutree/dutree.py
https://github.com/ossobv/dutree/blob/adceeeb17f9fd70a7ed9c674850d7015d820eb2a/dutree/dutree.py#L120-L124
def app_size(self): "Return the total apparent size, including children." if self._nodes is None: return self._app_size return sum(i.app_size() for i in self._nodes)
[ "def", "app_size", "(", "self", ")", ":", "if", "self", ".", "_nodes", "is", "None", ":", "return", "self", ".", "_app_size", "return", "sum", "(", "i", ".", "app_size", "(", ")", "for", "i", "in", "self", ".", "_nodes", ")" ]
Return the total apparent size, including children.
[ "Return", "the", "total", "apparent", "size", "including", "children", "." ]
python
train
skorch-dev/skorch
skorch/net.py
https://github.com/skorch-dev/skorch/blob/5b9b8b7b7712cb6e5aaa759d9608ea6269d5bcd3/skorch/net.py#L1222-L1248
def _get_params_for_optimizer(self, prefix, named_parameters): """Parse kwargs configuration for the optimizer identified by the given prefix. Supports param group assignment using wildcards: optimizer__lr=0.05, optimizer__param_groups=[ ('rnn*.period', {'lr': 0.3, 'momentum': 0}), ('rnn0', {'lr': 0.1}), ] The first positional argument are the param groups. """ kwargs = self._get_params_for(prefix) params = list(named_parameters) pgroups = [] for pattern, group in kwargs.pop('param_groups', []): matches = [i for i, (name, _) in enumerate(params) if fnmatch.fnmatch(name, pattern)] if matches: p = [params.pop(i)[1] for i in reversed(matches)] pgroups.append({'params': p, **group}) if params: pgroups.append({'params': [p for _, p in params]}) return [pgroups], kwargs
[ "def", "_get_params_for_optimizer", "(", "self", ",", "prefix", ",", "named_parameters", ")", ":", "kwargs", "=", "self", ".", "_get_params_for", "(", "prefix", ")", "params", "=", "list", "(", "named_parameters", ")", "pgroups", "=", "[", "]", "for", "pattern", ",", "group", "in", "kwargs", ".", "pop", "(", "'param_groups'", ",", "[", "]", ")", ":", "matches", "=", "[", "i", "for", "i", ",", "(", "name", ",", "_", ")", "in", "enumerate", "(", "params", ")", "if", "fnmatch", ".", "fnmatch", "(", "name", ",", "pattern", ")", "]", "if", "matches", ":", "p", "=", "[", "params", ".", "pop", "(", "i", ")", "[", "1", "]", "for", "i", "in", "reversed", "(", "matches", ")", "]", "pgroups", ".", "append", "(", "{", "'params'", ":", "p", ",", "*", "*", "group", "}", ")", "if", "params", ":", "pgroups", ".", "append", "(", "{", "'params'", ":", "[", "p", "for", "_", ",", "p", "in", "params", "]", "}", ")", "return", "[", "pgroups", "]", ",", "kwargs" ]
Parse kwargs configuration for the optimizer identified by the given prefix. Supports param group assignment using wildcards: optimizer__lr=0.05, optimizer__param_groups=[ ('rnn*.period', {'lr': 0.3, 'momentum': 0}), ('rnn0', {'lr': 0.1}), ] The first positional argument are the param groups.
[ "Parse", "kwargs", "configuration", "for", "the", "optimizer", "identified", "by", "the", "given", "prefix", ".", "Supports", "param", "group", "assignment", "using", "wildcards", ":" ]
python
train