repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
JukeboxPipeline/jukebox-core
src/jukeboxcore/gui/widgets/tooltip.py
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/tooltip.py#L95-L112
def setup_layout(self, orientation=None): """Setup the layout for the tooltip in the given orientation :param layout: the orentation of the layout :type layout: QtCore.Qt.Orientation | None :returns: None :rtype: None :raises: None """ if orientation == QtCore.Qt.Horizontal or orientation is None: layout = QtGui.QHBoxLayout() elif orientation == QtCore.Qt.Vertical: layout = QtGui.QVBoxLayout() else: raise TypeError('Orientation is of wrong type! Allowed is QtCore.Qt.Horizontal and QtCore.Qt.Vertical. Given: %s' % orientation) layout.setContentsMargins(0, 0, 0, 0) layout.setSpacing(0) self.setLayout(layout)
[ "def", "setup_layout", "(", "self", ",", "orientation", "=", "None", ")", ":", "if", "orientation", "==", "QtCore", ".", "Qt", ".", "Horizontal", "or", "orientation", "is", "None", ":", "layout", "=", "QtGui", ".", "QHBoxLayout", "(", ")", "elif", "orientation", "==", "QtCore", ".", "Qt", ".", "Vertical", ":", "layout", "=", "QtGui", ".", "QVBoxLayout", "(", ")", "else", ":", "raise", "TypeError", "(", "'Orientation is of wrong type! Allowed is QtCore.Qt.Horizontal and QtCore.Qt.Vertical. Given: %s'", "%", "orientation", ")", "layout", ".", "setContentsMargins", "(", "0", ",", "0", ",", "0", ",", "0", ")", "layout", ".", "setSpacing", "(", "0", ")", "self", ".", "setLayout", "(", "layout", ")" ]
Setup the layout for the tooltip in the given orientation :param layout: the orentation of the layout :type layout: QtCore.Qt.Orientation | None :returns: None :rtype: None :raises: None
[ "Setup", "the", "layout", "for", "the", "tooltip", "in", "the", "given", "orientation" ]
python
train
RudolfCardinal/pythonlib
cardinal_pythonlib/tools/list_all_file_extensions.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/tools/list_all_file_extensions.py#L43-L65
def list_file_extensions(path: str, reportevery: int = 1) -> List[str]: """ Returns a sorted list of every file extension found in a directory and its subdirectories. Args: path: path to scan reportevery: report directory progress after every *n* steps Returns: sorted list of every file extension found """ extensions = set() count = 0 for root, dirs, files in os.walk(path): count += 1 if count % reportevery == 0: log.debug("Walking directory {}: {!r}", count, root) for file in files: filename, ext = os.path.splitext(file) extensions.add(ext) return sorted(list(extensions))
[ "def", "list_file_extensions", "(", "path", ":", "str", ",", "reportevery", ":", "int", "=", "1", ")", "->", "List", "[", "str", "]", ":", "extensions", "=", "set", "(", ")", "count", "=", "0", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "path", ")", ":", "count", "+=", "1", "if", "count", "%", "reportevery", "==", "0", ":", "log", ".", "debug", "(", "\"Walking directory {}: {!r}\"", ",", "count", ",", "root", ")", "for", "file", "in", "files", ":", "filename", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "file", ")", "extensions", ".", "add", "(", "ext", ")", "return", "sorted", "(", "list", "(", "extensions", ")", ")" ]
Returns a sorted list of every file extension found in a directory and its subdirectories. Args: path: path to scan reportevery: report directory progress after every *n* steps Returns: sorted list of every file extension found
[ "Returns", "a", "sorted", "list", "of", "every", "file", "extension", "found", "in", "a", "directory", "and", "its", "subdirectories", "." ]
python
train
openpermissions/perch
perch/model.py
https://github.com/openpermissions/perch/blob/36d78994133918f3c52c187f19e50132960a0156/perch/model.py#L568-L583
def get(cls, resource_id, include_deactivated=False): """ Get a resource :param resource_id: the resource ID :param include_deactivated: Include deactivated resources in response :returns: a SubResource instance :raises: SocketError, CouchException """ if include_deactivated: resource = yield cls.view.first(key=resource_id, include_docs=True) else: resource = yield cls.active_view.first(key=resource_id, include_docs=True) parent = cls.parent_resource(**resource['doc']) raise Return(cls(parent=parent, **resource['value']))
[ "def", "get", "(", "cls", ",", "resource_id", ",", "include_deactivated", "=", "False", ")", ":", "if", "include_deactivated", ":", "resource", "=", "yield", "cls", ".", "view", ".", "first", "(", "key", "=", "resource_id", ",", "include_docs", "=", "True", ")", "else", ":", "resource", "=", "yield", "cls", ".", "active_view", ".", "first", "(", "key", "=", "resource_id", ",", "include_docs", "=", "True", ")", "parent", "=", "cls", ".", "parent_resource", "(", "*", "*", "resource", "[", "'doc'", "]", ")", "raise", "Return", "(", "cls", "(", "parent", "=", "parent", ",", "*", "*", "resource", "[", "'value'", "]", ")", ")" ]
Get a resource :param resource_id: the resource ID :param include_deactivated: Include deactivated resources in response :returns: a SubResource instance :raises: SocketError, CouchException
[ "Get", "a", "resource" ]
python
train
Delgan/loguru
loguru/_logger.py
https://github.com/Delgan/loguru/blob/6571879c37904e3a18567e694d70651c6886b860/loguru/_logger.py#L1638-L1650
def stop(self, *args, **kwargs): """Deprecated function to |remove| an existing handler. Warnings -------- .. deprecated:: 0.2.2 ``stop()`` will be removed in Loguru 1.0.0, it is replaced by ``remove()`` which is a less confusing name. """ warnings.warn( "The 'stop()' method is deprecated, please use 'remove()' instead", DeprecationWarning ) return self.remove(*args, **kwargs)
[ "def", "stop", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "warnings", ".", "warn", "(", "\"The 'stop()' method is deprecated, please use 'remove()' instead\"", ",", "DeprecationWarning", ")", "return", "self", ".", "remove", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Deprecated function to |remove| an existing handler. Warnings -------- .. deprecated:: 0.2.2 ``stop()`` will be removed in Loguru 1.0.0, it is replaced by ``remove()`` which is a less confusing name.
[ "Deprecated", "function", "to", "|remove|", "an", "existing", "handler", "." ]
python
train
blockstack/pybitcoin
pybitcoin/rpc/namecoind_client.py
https://github.com/blockstack/pybitcoin/blob/92c8da63c40f7418594b1ce395990c3f5a4787cc/pybitcoin/rpc/namecoind_client.py#L190-L215
def name_transfer(self, key, new_address, value=None): """ Check if this name exists and if it does, find the value field note that update command needs an arg of <new value>. in case we're simply transferring, need to obtain old value first """ key_details = self.name_show(key) if 'code' in key_details and key_details.get('code') == -4: return error_reply("Key does not exist") # get new 'value' if given, otherwise use the old 'value' if value is None: value = json.dumps(key_details['value']) if not self.unlock_wallet(self.passphrase): error_reply("Error unlocking wallet", 403) # transfer the name (underlying call is still name_update) try: # update the 'value' reply = self.obj.name_update(key, value, new_address) except JSONRPCException as e: return e.error return reply
[ "def", "name_transfer", "(", "self", ",", "key", ",", "new_address", ",", "value", "=", "None", ")", ":", "key_details", "=", "self", ".", "name_show", "(", "key", ")", "if", "'code'", "in", "key_details", "and", "key_details", ".", "get", "(", "'code'", ")", "==", "-", "4", ":", "return", "error_reply", "(", "\"Key does not exist\"", ")", "# get new 'value' if given, otherwise use the old 'value'", "if", "value", "is", "None", ":", "value", "=", "json", ".", "dumps", "(", "key_details", "[", "'value'", "]", ")", "if", "not", "self", ".", "unlock_wallet", "(", "self", ".", "passphrase", ")", ":", "error_reply", "(", "\"Error unlocking wallet\"", ",", "403", ")", "# transfer the name (underlying call is still name_update)", "try", ":", "# update the 'value'", "reply", "=", "self", ".", "obj", ".", "name_update", "(", "key", ",", "value", ",", "new_address", ")", "except", "JSONRPCException", "as", "e", ":", "return", "e", ".", "error", "return", "reply" ]
Check if this name exists and if it does, find the value field note that update command needs an arg of <new value>. in case we're simply transferring, need to obtain old value first
[ "Check", "if", "this", "name", "exists", "and", "if", "it", "does", "find", "the", "value", "field", "note", "that", "update", "command", "needs", "an", "arg", "of", "<new", "value", ">", ".", "in", "case", "we", "re", "simply", "transferring", "need", "to", "obtain", "old", "value", "first" ]
python
train
gouthambs/Flask-Blogging
flask_blogging/sqlastorage.py
https://github.com/gouthambs/Flask-Blogging/blob/6636b8941175e9910f116a329521f96b8b05a9ac/flask_blogging/sqlastorage.py#L362-L397
def delete_post(self, post_id): """ Delete the post defined by ``post_id`` :param post_id: The identifier corresponding to a post :type post_id: int :return: Returns True if the post was successfully deleted and False otherwise. """ status = False success = 0 post_id = _as_int(post_id) with self._engine.begin() as conn: try: post_del_statement = self._post_table.delete().where( self._post_table.c.id == post_id) conn.execute(post_del_statement) success += 1 except Exception as e: self._logger.exception(str(e)) try: user_posts_del_statement = self._user_posts_table.delete(). \ where(self._user_posts_table.c.post_id == post_id) conn.execute(user_posts_del_statement) success += 1 except Exception as e: self._logger.exception(str(e)) try: tag_posts_del_statement = self._tag_posts_table.delete(). \ where(self._tag_posts_table.c.post_id == post_id) conn.execute(tag_posts_del_statement) success += 1 except Exception as e: self._logger.exception(str(e)) status = success == 3 return status
[ "def", "delete_post", "(", "self", ",", "post_id", ")", ":", "status", "=", "False", "success", "=", "0", "post_id", "=", "_as_int", "(", "post_id", ")", "with", "self", ".", "_engine", ".", "begin", "(", ")", "as", "conn", ":", "try", ":", "post_del_statement", "=", "self", ".", "_post_table", ".", "delete", "(", ")", ".", "where", "(", "self", ".", "_post_table", ".", "c", ".", "id", "==", "post_id", ")", "conn", ".", "execute", "(", "post_del_statement", ")", "success", "+=", "1", "except", "Exception", "as", "e", ":", "self", ".", "_logger", ".", "exception", "(", "str", "(", "e", ")", ")", "try", ":", "user_posts_del_statement", "=", "self", ".", "_user_posts_table", ".", "delete", "(", ")", ".", "where", "(", "self", ".", "_user_posts_table", ".", "c", ".", "post_id", "==", "post_id", ")", "conn", ".", "execute", "(", "user_posts_del_statement", ")", "success", "+=", "1", "except", "Exception", "as", "e", ":", "self", ".", "_logger", ".", "exception", "(", "str", "(", "e", ")", ")", "try", ":", "tag_posts_del_statement", "=", "self", ".", "_tag_posts_table", ".", "delete", "(", ")", ".", "where", "(", "self", ".", "_tag_posts_table", ".", "c", ".", "post_id", "==", "post_id", ")", "conn", ".", "execute", "(", "tag_posts_del_statement", ")", "success", "+=", "1", "except", "Exception", "as", "e", ":", "self", ".", "_logger", ".", "exception", "(", "str", "(", "e", ")", ")", "status", "=", "success", "==", "3", "return", "status" ]
Delete the post defined by ``post_id`` :param post_id: The identifier corresponding to a post :type post_id: int :return: Returns True if the post was successfully deleted and False otherwise.
[ "Delete", "the", "post", "defined", "by", "post_id" ]
python
train
watson-developer-cloud/python-sdk
ibm_watson/discovery_v1.py
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/discovery_v1.py#L4427-L4474
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'credential_type') and self.credential_type is not None: _dict['credential_type'] = self.credential_type if hasattr(self, 'client_id') and self.client_id is not None: _dict['client_id'] = self.client_id if hasattr(self, 'enterprise_id') and self.enterprise_id is not None: _dict['enterprise_id'] = self.enterprise_id if hasattr(self, 'url') and self.url is not None: _dict['url'] = self.url if hasattr(self, 'username') and self.username is not None: _dict['username'] = self.username if hasattr(self, 'organization_url') and self.organization_url is not None: _dict['organization_url'] = self.organization_url if hasattr(self, 'site_collection_path' ) and self.site_collection_path is not None: _dict['site_collection.path'] = self.site_collection_path if hasattr(self, 'client_secret') and self.client_secret is not None: _dict['client_secret'] = self.client_secret if hasattr(self, 'public_key_id') and self.public_key_id is not None: _dict['public_key_id'] = self.public_key_id if hasattr(self, 'private_key') and self.private_key is not None: _dict['private_key'] = self.private_key if hasattr(self, 'passphrase') and self.passphrase is not None: _dict['passphrase'] = self.passphrase if hasattr(self, 'password') and self.password is not None: _dict['password'] = self.password if hasattr(self, 'gateway_id') and self.gateway_id is not None: _dict['gateway_id'] = self.gateway_id if hasattr(self, 'source_version') and self.source_version is not None: _dict['source_version'] = self.source_version if hasattr( self, 'web_application_url') and self.web_application_url is not None: _dict['web_application_url'] = self.web_application_url if hasattr(self, 'domain') and self.domain is not None: _dict['domain'] = self.domain if hasattr(self, 'endpoint') and self.endpoint is not None: _dict['endpoint'] = self.endpoint if hasattr(self, 'access_key_id') and self.access_key_id is not None: _dict['access_key_id'] = self.access_key_id if hasattr(self, 'secret_access_key') and self.secret_access_key is not None: _dict['secret_access_key'] = self.secret_access_key return _dict
[ "def", "_to_dict", "(", "self", ")", ":", "_dict", "=", "{", "}", "if", "hasattr", "(", "self", ",", "'credential_type'", ")", "and", "self", ".", "credential_type", "is", "not", "None", ":", "_dict", "[", "'credential_type'", "]", "=", "self", ".", "credential_type", "if", "hasattr", "(", "self", ",", "'client_id'", ")", "and", "self", ".", "client_id", "is", "not", "None", ":", "_dict", "[", "'client_id'", "]", "=", "self", ".", "client_id", "if", "hasattr", "(", "self", ",", "'enterprise_id'", ")", "and", "self", ".", "enterprise_id", "is", "not", "None", ":", "_dict", "[", "'enterprise_id'", "]", "=", "self", ".", "enterprise_id", "if", "hasattr", "(", "self", ",", "'url'", ")", "and", "self", ".", "url", "is", "not", "None", ":", "_dict", "[", "'url'", "]", "=", "self", ".", "url", "if", "hasattr", "(", "self", ",", "'username'", ")", "and", "self", ".", "username", "is", "not", "None", ":", "_dict", "[", "'username'", "]", "=", "self", ".", "username", "if", "hasattr", "(", "self", ",", "'organization_url'", ")", "and", "self", ".", "organization_url", "is", "not", "None", ":", "_dict", "[", "'organization_url'", "]", "=", "self", ".", "organization_url", "if", "hasattr", "(", "self", ",", "'site_collection_path'", ")", "and", "self", ".", "site_collection_path", "is", "not", "None", ":", "_dict", "[", "'site_collection.path'", "]", "=", "self", ".", "site_collection_path", "if", "hasattr", "(", "self", ",", "'client_secret'", ")", "and", "self", ".", "client_secret", "is", "not", "None", ":", "_dict", "[", "'client_secret'", "]", "=", "self", ".", "client_secret", "if", "hasattr", "(", "self", ",", "'public_key_id'", ")", "and", "self", ".", "public_key_id", "is", "not", "None", ":", "_dict", "[", "'public_key_id'", "]", "=", "self", ".", "public_key_id", "if", "hasattr", "(", "self", ",", "'private_key'", ")", "and", "self", ".", "private_key", "is", "not", "None", ":", "_dict", "[", "'private_key'", "]", "=", "self", ".", "private_key", "if", "hasattr", "(", "self", ",", "'passphrase'", ")", "and", "self", ".", "passphrase", "is", "not", "None", ":", "_dict", "[", "'passphrase'", "]", "=", "self", ".", "passphrase", "if", "hasattr", "(", "self", ",", "'password'", ")", "and", "self", ".", "password", "is", "not", "None", ":", "_dict", "[", "'password'", "]", "=", "self", ".", "password", "if", "hasattr", "(", "self", ",", "'gateway_id'", ")", "and", "self", ".", "gateway_id", "is", "not", "None", ":", "_dict", "[", "'gateway_id'", "]", "=", "self", ".", "gateway_id", "if", "hasattr", "(", "self", ",", "'source_version'", ")", "and", "self", ".", "source_version", "is", "not", "None", ":", "_dict", "[", "'source_version'", "]", "=", "self", ".", "source_version", "if", "hasattr", "(", "self", ",", "'web_application_url'", ")", "and", "self", ".", "web_application_url", "is", "not", "None", ":", "_dict", "[", "'web_application_url'", "]", "=", "self", ".", "web_application_url", "if", "hasattr", "(", "self", ",", "'domain'", ")", "and", "self", ".", "domain", "is", "not", "None", ":", "_dict", "[", "'domain'", "]", "=", "self", ".", "domain", "if", "hasattr", "(", "self", ",", "'endpoint'", ")", "and", "self", ".", "endpoint", "is", "not", "None", ":", "_dict", "[", "'endpoint'", "]", "=", "self", ".", "endpoint", "if", "hasattr", "(", "self", ",", "'access_key_id'", ")", "and", "self", ".", "access_key_id", "is", "not", "None", ":", "_dict", "[", "'access_key_id'", "]", "=", "self", ".", "access_key_id", "if", "hasattr", "(", "self", ",", "'secret_access_key'", ")", "and", "self", ".", "secret_access_key", "is", "not", "None", ":", "_dict", "[", "'secret_access_key'", "]", "=", "self", ".", "secret_access_key", "return", "_dict" ]
Return a json dictionary representing this model.
[ "Return", "a", "json", "dictionary", "representing", "this", "model", "." ]
python
train
pypa/pipenv
pipenv/vendor/dotenv/main.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/dotenv/main.py#L209-L232
def unset_key(dotenv_path, key_to_unset, quote_mode="always"): """ Removes a given key from the given .env If the .env path given doesn't exist, fails If the given key doesn't exist in the .env, fails """ if not os.path.exists(dotenv_path): warnings.warn("can't delete from %s - it doesn't exist." % dotenv_path) return None, key_to_unset removed = False with rewrite(dotenv_path) as (source, dest): for mapping in parse_stream(source): if mapping.key == key_to_unset: removed = True else: dest.write(mapping.original) if not removed: warnings.warn("key %s not removed from %s - key doesn't exist." % (key_to_unset, dotenv_path)) return None, key_to_unset return removed, key_to_unset
[ "def", "unset_key", "(", "dotenv_path", ",", "key_to_unset", ",", "quote_mode", "=", "\"always\"", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "dotenv_path", ")", ":", "warnings", ".", "warn", "(", "\"can't delete from %s - it doesn't exist.\"", "%", "dotenv_path", ")", "return", "None", ",", "key_to_unset", "removed", "=", "False", "with", "rewrite", "(", "dotenv_path", ")", "as", "(", "source", ",", "dest", ")", ":", "for", "mapping", "in", "parse_stream", "(", "source", ")", ":", "if", "mapping", ".", "key", "==", "key_to_unset", ":", "removed", "=", "True", "else", ":", "dest", ".", "write", "(", "mapping", ".", "original", ")", "if", "not", "removed", ":", "warnings", ".", "warn", "(", "\"key %s not removed from %s - key doesn't exist.\"", "%", "(", "key_to_unset", ",", "dotenv_path", ")", ")", "return", "None", ",", "key_to_unset", "return", "removed", ",", "key_to_unset" ]
Removes a given key from the given .env If the .env path given doesn't exist, fails If the given key doesn't exist in the .env, fails
[ "Removes", "a", "given", "key", "from", "the", "given", ".", "env" ]
python
train
IBM/pyxcli
pyxcli/helpers/exceptool.py
https://github.com/IBM/pyxcli/blob/7d8ece1dcc16f50246740a447aa81b94a0dbced4/pyxcli/helpers/exceptool.py#L21-L39
def chained(wrapping_exc): # pylint: disable=W0212 """ Embeds the current exception information into the given one (which will replace the current one). For example:: try: ... except OSError as ex: raise chained(MyError("database not found!")) """ t, v, tb = sys.exc_info() if not t: return wrapping_exc wrapping_exc._inner_exc = v lines = traceback.format_exception(t, v, tb) wrapping_exc._inner_tb = "".join(lines[1:]) return wrapping_exc
[ "def", "chained", "(", "wrapping_exc", ")", ":", "# pylint: disable=W0212", "t", ",", "v", ",", "tb", "=", "sys", ".", "exc_info", "(", ")", "if", "not", "t", ":", "return", "wrapping_exc", "wrapping_exc", ".", "_inner_exc", "=", "v", "lines", "=", "traceback", ".", "format_exception", "(", "t", ",", "v", ",", "tb", ")", "wrapping_exc", ".", "_inner_tb", "=", "\"\"", ".", "join", "(", "lines", "[", "1", ":", "]", ")", "return", "wrapping_exc" ]
Embeds the current exception information into the given one (which will replace the current one). For example:: try: ... except OSError as ex: raise chained(MyError("database not found!"))
[ "Embeds", "the", "current", "exception", "information", "into", "the", "given", "one", "(", "which", "will", "replace", "the", "current", "one", ")", ".", "For", "example", "::" ]
python
train
martinblech/xmltodict
ez_setup.py
https://github.com/martinblech/xmltodict/blob/f3ab7e1740d37d585ffab0154edb4cb664afe4a9/ez_setup.py#L99-L129
def archive_context(filename): """ Unzip filename to a temporary directory, set to the cwd. The unzipped target is cleaned up after. """ tmpdir = tempfile.mkdtemp() log.warn('Extracting in %s', tmpdir) old_wd = os.getcwd() try: os.chdir(tmpdir) try: with ContextualZipFile(filename) as archive: archive.extractall() except zipfile.BadZipfile as err: if not err.args: err.args = ('', ) err.args = err.args + ( MEANINGFUL_INVALID_ZIP_ERR_MSG.format(filename), ) raise # going in the directory subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0]) os.chdir(subdir) log.warn('Now working in %s', subdir) yield finally: os.chdir(old_wd) shutil.rmtree(tmpdir)
[ "def", "archive_context", "(", "filename", ")", ":", "tmpdir", "=", "tempfile", ".", "mkdtemp", "(", ")", "log", ".", "warn", "(", "'Extracting in %s'", ",", "tmpdir", ")", "old_wd", "=", "os", ".", "getcwd", "(", ")", "try", ":", "os", ".", "chdir", "(", "tmpdir", ")", "try", ":", "with", "ContextualZipFile", "(", "filename", ")", "as", "archive", ":", "archive", ".", "extractall", "(", ")", "except", "zipfile", ".", "BadZipfile", "as", "err", ":", "if", "not", "err", ".", "args", ":", "err", ".", "args", "=", "(", "''", ",", ")", "err", ".", "args", "=", "err", ".", "args", "+", "(", "MEANINGFUL_INVALID_ZIP_ERR_MSG", ".", "format", "(", "filename", ")", ",", ")", "raise", "# going in the directory", "subdir", "=", "os", ".", "path", ".", "join", "(", "tmpdir", ",", "os", ".", "listdir", "(", "tmpdir", ")", "[", "0", "]", ")", "os", ".", "chdir", "(", "subdir", ")", "log", ".", "warn", "(", "'Now working in %s'", ",", "subdir", ")", "yield", "finally", ":", "os", ".", "chdir", "(", "old_wd", ")", "shutil", ".", "rmtree", "(", "tmpdir", ")" ]
Unzip filename to a temporary directory, set to the cwd. The unzipped target is cleaned up after.
[ "Unzip", "filename", "to", "a", "temporary", "directory", "set", "to", "the", "cwd", "." ]
python
train
fdChasm/txCarbonClient
src/txCarbonClient/carbon_client_service.py
https://github.com/fdChasm/txCarbonClient/blob/c342eff1957d281cba3c83fc578f08c4bf9fcd03/src/txCarbonClient/carbon_client_service.py#L30-L44
def publish_metric(self, metric_name, metric_value, epoch_seconds=None): '''Record a single hit on a given metric. Args: metric_name: The name of the metric to record with Carbon. metric_value: The value to record with Carbon. epoch_seconds: Optionally specify the time for the metric hit. Returns: None ''' if epoch_seconds is None: epoch_seconds = self._reactor.seconds() self._client_factory.publish_metric(metric_name, metric_value, int(epoch_seconds))
[ "def", "publish_metric", "(", "self", ",", "metric_name", ",", "metric_value", ",", "epoch_seconds", "=", "None", ")", ":", "if", "epoch_seconds", "is", "None", ":", "epoch_seconds", "=", "self", ".", "_reactor", ".", "seconds", "(", ")", "self", ".", "_client_factory", ".", "publish_metric", "(", "metric_name", ",", "metric_value", ",", "int", "(", "epoch_seconds", ")", ")" ]
Record a single hit on a given metric. Args: metric_name: The name of the metric to record with Carbon. metric_value: The value to record with Carbon. epoch_seconds: Optionally specify the time for the metric hit. Returns: None
[ "Record", "a", "single", "hit", "on", "a", "given", "metric", "." ]
python
train
tensorflow/mesh
mesh_tensorflow/layers.py
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/layers.py#L187-L220
def softmax_cross_entropy_with_logits(logits, targets, vocab_dim, z_loss=0.0): """Per-example softmax loss. if z_loss is nonzero, we add a loss equal to z_loss*log(z)^2, where z is the partition function. Example value: z_loss=1e-4. Two uses of z_loss are: - To keep the logits from drifting too far from zero, which can cause unacceptable roundoff errors in bfloat16. - To encourage the logits to be normalized log-probabilities. Args: logits: a mtf.Tensor whose shape contains vocab_dim targets: a mtf.Tensor with the same shape as logits vocab_dim: a mtf.Dimension z_loss: a float Returns: a mtf.Tensor whose shape is equal to logits.shape - vocab_dim Raises: ValueError: if the shapes do not match. """ if logits.shape != targets.shape: raise ValueError( "logits shape must equal targets shape" "logits=%s targets=%s" % (logits.to_string, targets.to_string)) if vocab_dim not in logits.shape.dims: raise ValueError("vocab_dim must be in logits.shape.dims") log_z = mtf.reduce_logsumexp(logits, vocab_dim) log_softmax = logits - log_z loss = mtf.negative( mtf.reduce_sum(log_softmax * targets, reduced_dim=vocab_dim)) if z_loss != 0: loss += z_loss * mtf.square(log_z) return loss
[ "def", "softmax_cross_entropy_with_logits", "(", "logits", ",", "targets", ",", "vocab_dim", ",", "z_loss", "=", "0.0", ")", ":", "if", "logits", ".", "shape", "!=", "targets", ".", "shape", ":", "raise", "ValueError", "(", "\"logits shape must equal targets shape\"", "\"logits=%s targets=%s\"", "%", "(", "logits", ".", "to_string", ",", "targets", ".", "to_string", ")", ")", "if", "vocab_dim", "not", "in", "logits", ".", "shape", ".", "dims", ":", "raise", "ValueError", "(", "\"vocab_dim must be in logits.shape.dims\"", ")", "log_z", "=", "mtf", ".", "reduce_logsumexp", "(", "logits", ",", "vocab_dim", ")", "log_softmax", "=", "logits", "-", "log_z", "loss", "=", "mtf", ".", "negative", "(", "mtf", ".", "reduce_sum", "(", "log_softmax", "*", "targets", ",", "reduced_dim", "=", "vocab_dim", ")", ")", "if", "z_loss", "!=", "0", ":", "loss", "+=", "z_loss", "*", "mtf", ".", "square", "(", "log_z", ")", "return", "loss" ]
Per-example softmax loss. if z_loss is nonzero, we add a loss equal to z_loss*log(z)^2, where z is the partition function. Example value: z_loss=1e-4. Two uses of z_loss are: - To keep the logits from drifting too far from zero, which can cause unacceptable roundoff errors in bfloat16. - To encourage the logits to be normalized log-probabilities. Args: logits: a mtf.Tensor whose shape contains vocab_dim targets: a mtf.Tensor with the same shape as logits vocab_dim: a mtf.Dimension z_loss: a float Returns: a mtf.Tensor whose shape is equal to logits.shape - vocab_dim Raises: ValueError: if the shapes do not match.
[ "Per", "-", "example", "softmax", "loss", "." ]
python
train
google/grumpy
third_party/stdlib/collections.py
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/collections.py#L538-L579
def update(*args, **kwds): '''Like dict.update() but add counts instead of replacing them. Source can be an iterable, a dictionary, or another Counter instance. >>> c = Counter('which') >>> c.update('witch') # add elements from another iterable >>> d = Counter('watch') >>> c.update(d) # add elements from another counter >>> c['h'] # four 'h' in which, witch, and watch 4 ''' # The regular dict.update() operation makes no sense here because the # replace behavior results in the some of original untouched counts # being mixed-in with all of the other counts for a mismash that # doesn't have a straight-forward interpretation in most counting # contexts. Instead, we implement straight-addition. Both the inputs # and outputs are allowed to contain zero and negative counts. if not args: raise TypeError("descriptor 'update' of 'Counter' object " "needs an argument") self = args[0] args = args[1:] if len(args) > 1: raise TypeError('expected at most 1 arguments, got %d' % len(args)) iterable = args[0] if args else None if iterable is not None: if isinstance(iterable, Mapping): if self: self_get = self.get for elem, count in iterable.iteritems(): self[elem] = self_get(elem, 0) + count else: super(Counter, self).update(iterable) # fast path when counter is empty else: self_get = self.get for elem in iterable: self[elem] = self_get(elem, 0) + 1 if kwds: self.update(kwds)
[ "def", "update", "(", "*", "args", ",", "*", "*", "kwds", ")", ":", "# The regular dict.update() operation makes no sense here because the", "# replace behavior results in the some of original untouched counts", "# being mixed-in with all of the other counts for a mismash that", "# doesn't have a straight-forward interpretation in most counting", "# contexts. Instead, we implement straight-addition. Both the inputs", "# and outputs are allowed to contain zero and negative counts.", "if", "not", "args", ":", "raise", "TypeError", "(", "\"descriptor 'update' of 'Counter' object \"", "\"needs an argument\"", ")", "self", "=", "args", "[", "0", "]", "args", "=", "args", "[", "1", ":", "]", "if", "len", "(", "args", ")", ">", "1", ":", "raise", "TypeError", "(", "'expected at most 1 arguments, got %d'", "%", "len", "(", "args", ")", ")", "iterable", "=", "args", "[", "0", "]", "if", "args", "else", "None", "if", "iterable", "is", "not", "None", ":", "if", "isinstance", "(", "iterable", ",", "Mapping", ")", ":", "if", "self", ":", "self_get", "=", "self", ".", "get", "for", "elem", ",", "count", "in", "iterable", ".", "iteritems", "(", ")", ":", "self", "[", "elem", "]", "=", "self_get", "(", "elem", ",", "0", ")", "+", "count", "else", ":", "super", "(", "Counter", ",", "self", ")", ".", "update", "(", "iterable", ")", "# fast path when counter is empty", "else", ":", "self_get", "=", "self", ".", "get", "for", "elem", "in", "iterable", ":", "self", "[", "elem", "]", "=", "self_get", "(", "elem", ",", "0", ")", "+", "1", "if", "kwds", ":", "self", ".", "update", "(", "kwds", ")" ]
Like dict.update() but add counts instead of replacing them. Source can be an iterable, a dictionary, or another Counter instance. >>> c = Counter('which') >>> c.update('witch') # add elements from another iterable >>> d = Counter('watch') >>> c.update(d) # add elements from another counter >>> c['h'] # four 'h' in which, witch, and watch 4
[ "Like", "dict", ".", "update", "()", "but", "add", "counts", "instead", "of", "replacing", "them", "." ]
python
valid
openmicroscopy/yaclifw
yaclifw/version.py
https://github.com/openmicroscopy/yaclifw/blob/a01179fefb2c2c4260c75e6d1dc6e19de9979d64/yaclifw/version.py#L45-L57
def _lookup_version(module_file): """ For the given module file (usually found by: from package import __file__ as module_file in the caller, return the location of the current RELEASE-VERSION file and the file itself. """ version_dir = path.abspath(path.dirname(module_file)) version_file = path.join(version_dir, "RELEASE-VERSION") return version_dir, version_file
[ "def", "_lookup_version", "(", "module_file", ")", ":", "version_dir", "=", "path", ".", "abspath", "(", "path", ".", "dirname", "(", "module_file", ")", ")", "version_file", "=", "path", ".", "join", "(", "version_dir", ",", "\"RELEASE-VERSION\"", ")", "return", "version_dir", ",", "version_file" ]
For the given module file (usually found by: from package import __file__ as module_file in the caller, return the location of the current RELEASE-VERSION file and the file itself.
[ "For", "the", "given", "module", "file", "(", "usually", "found", "by", ":" ]
python
test
fitnr/convertdate
convertdate/french_republican.py
https://github.com/fitnr/convertdate/blob/e920f168a87f99183b0aa7290d6c3af222582d43/convertdate/french_republican.py#L98-L115
def to_jd(year, month, day, method=None): '''Obtain Julian day from a given French Revolutionary calendar date.''' method = method or 'equinox' if day < 1 or day > 30: raise ValueError("Invalid day for this calendar") if month > 13: raise ValueError("Invalid month for this calendar") if month == 13 and day > 5 + leap(year, method=method): raise ValueError("Invalid day for this month in this calendar") if method == 'equinox': return _to_jd_equinox(year, month, day) else: return _to_jd_schematic(year, month, day, method)
[ "def", "to_jd", "(", "year", ",", "month", ",", "day", ",", "method", "=", "None", ")", ":", "method", "=", "method", "or", "'equinox'", "if", "day", "<", "1", "or", "day", ">", "30", ":", "raise", "ValueError", "(", "\"Invalid day for this calendar\"", ")", "if", "month", ">", "13", ":", "raise", "ValueError", "(", "\"Invalid month for this calendar\"", ")", "if", "month", "==", "13", "and", "day", ">", "5", "+", "leap", "(", "year", ",", "method", "=", "method", ")", ":", "raise", "ValueError", "(", "\"Invalid day for this month in this calendar\"", ")", "if", "method", "==", "'equinox'", ":", "return", "_to_jd_equinox", "(", "year", ",", "month", ",", "day", ")", "else", ":", "return", "_to_jd_schematic", "(", "year", ",", "month", ",", "day", ",", "method", ")" ]
Obtain Julian day from a given French Revolutionary calendar date.
[ "Obtain", "Julian", "day", "from", "a", "given", "French", "Revolutionary", "calendar", "date", "." ]
python
train
olitheolix/qtmacs
qtmacs/extensions/qtmacstextedit_widget.py
https://github.com/olitheolix/qtmacs/blob/36253b082b82590f183fe154b053eb3a1e741be2/qtmacs/extensions/qtmacstextedit_widget.py#L275-L298
def reverseCommit(self): """ Remove the inserted character(s). """ # Move the cursor to the right of the text to delete. tc = self.qteWidget.textCursor() # Delete as many characters as necessary. For an image that would # be exactly 1 even though the HTML code to embed that image is usually # longer. For text, it would be as many characters as the pasted text # was long. if self.isImage: dataLen = 1 else: dataLen = len(self.data) tc.setPosition(self.selStart + dataLen, QtGui.QTextCursor.MoveAnchor) for ii in range(dataLen): tc.deletePreviousChar() # Add the previously selected text (this may be none). tc.insertHtml(self.selText) self.qteWidget.setTextCursor(tc)
[ "def", "reverseCommit", "(", "self", ")", ":", "# Move the cursor to the right of the text to delete.", "tc", "=", "self", ".", "qteWidget", ".", "textCursor", "(", ")", "# Delete as many characters as necessary. For an image that would", "# be exactly 1 even though the HTML code to embed that image is usually", "# longer. For text, it would be as many characters as the pasted text", "# was long.", "if", "self", ".", "isImage", ":", "dataLen", "=", "1", "else", ":", "dataLen", "=", "len", "(", "self", ".", "data", ")", "tc", ".", "setPosition", "(", "self", ".", "selStart", "+", "dataLen", ",", "QtGui", ".", "QTextCursor", ".", "MoveAnchor", ")", "for", "ii", "in", "range", "(", "dataLen", ")", ":", "tc", ".", "deletePreviousChar", "(", ")", "# Add the previously selected text (this may be none).", "tc", ".", "insertHtml", "(", "self", ".", "selText", ")", "self", ".", "qteWidget", ".", "setTextCursor", "(", "tc", ")" ]
Remove the inserted character(s).
[ "Remove", "the", "inserted", "character", "(", "s", ")", "." ]
python
train
zero-os/0-core
client/py-client/zeroos/core0/client/client.py
https://github.com/zero-os/0-core/blob/69f6ce845ab8b8ad805a79a415227e7ac566c218/client/py-client/zeroos/core0/client/client.py#L1967-L1976
def subvol_create(self, path): """ Create a btrfs subvolume in the specified path :param path: path to create """ args = { 'path': path } self._subvol_chk.check(args) self._client.sync('btrfs.subvol_create', args)
[ "def", "subvol_create", "(", "self", ",", "path", ")", ":", "args", "=", "{", "'path'", ":", "path", "}", "self", ".", "_subvol_chk", ".", "check", "(", "args", ")", "self", ".", "_client", ".", "sync", "(", "'btrfs.subvol_create'", ",", "args", ")" ]
Create a btrfs subvolume in the specified path :param path: path to create
[ "Create", "a", "btrfs", "subvolume", "in", "the", "specified", "path", ":", "param", "path", ":", "path", "to", "create" ]
python
train
tcalmant/ipopo
pelix/threadpool.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/threadpool.py#L105-L113
def raise_exception(self, exception): """ Raises an exception in wait() :param exception: An Exception object """ self.__data = None self.__exception = exception self.__event.set()
[ "def", "raise_exception", "(", "self", ",", "exception", ")", ":", "self", ".", "__data", "=", "None", "self", ".", "__exception", "=", "exception", "self", ".", "__event", ".", "set", "(", ")" ]
Raises an exception in wait() :param exception: An Exception object
[ "Raises", "an", "exception", "in", "wait", "()" ]
python
train
postlund/pyatv
pyatv/airplay/srp.py
https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/airplay/srp.py#L63-L70
def get_common_session_key(self, premaster_secret): """K = H(S). Special implementation for Apple TV. """ k_1 = self.hash(premaster_secret, b'\x00\x00\x00\x00', as_bytes=True) k_2 = self.hash(premaster_secret, b'\x00\x00\x00\x01', as_bytes=True) return k_1 + k_2
[ "def", "get_common_session_key", "(", "self", ",", "premaster_secret", ")", ":", "k_1", "=", "self", ".", "hash", "(", "premaster_secret", ",", "b'\\x00\\x00\\x00\\x00'", ",", "as_bytes", "=", "True", ")", "k_2", "=", "self", ".", "hash", "(", "premaster_secret", ",", "b'\\x00\\x00\\x00\\x01'", ",", "as_bytes", "=", "True", ")", "return", "k_1", "+", "k_2" ]
K = H(S). Special implementation for Apple TV.
[ "K", "=", "H", "(", "S", ")", "." ]
python
train
twidi/py-dataql
dataql/solvers/resources.py
https://github.com/twidi/py-dataql/blob/5841a3fd559829193ed709c255166085bdde1c52/dataql/solvers/resources.py#L205-L233
def can_solve(cls, resource): """Tells if the solver is able to resolve the given resource. Arguments --------- resource : subclass of ``dataql.resources.Resource`` The resource to check if it is solvable by the current solver class Returns ------- boolean ``True`` if the current solver class can solve the given resource, ``False`` otherwise. Example ------- >>> AttributeSolver.solvable_resources (<class 'dataql.resources.Field'>,) >>> AttributeSolver.can_solve(Field('foo')) True >>> AttributeSolver.can_solve(Object('bar')) False """ for solvable_resource in cls.solvable_resources: if isinstance(resource, solvable_resource): return True return False
[ "def", "can_solve", "(", "cls", ",", "resource", ")", ":", "for", "solvable_resource", "in", "cls", ".", "solvable_resources", ":", "if", "isinstance", "(", "resource", ",", "solvable_resource", ")", ":", "return", "True", "return", "False" ]
Tells if the solver is able to resolve the given resource. Arguments --------- resource : subclass of ``dataql.resources.Resource`` The resource to check if it is solvable by the current solver class Returns ------- boolean ``True`` if the current solver class can solve the given resource, ``False`` otherwise. Example ------- >>> AttributeSolver.solvable_resources (<class 'dataql.resources.Field'>,) >>> AttributeSolver.can_solve(Field('foo')) True >>> AttributeSolver.can_solve(Object('bar')) False
[ "Tells", "if", "the", "solver", "is", "able", "to", "resolve", "the", "given", "resource", "." ]
python
train
Grokzen/pykwalify
pykwalify/core.py
https://github.com/Grokzen/pykwalify/blob/02b7e21eafb97926f17b7c33e2ee7b3ea67c3ef7/pykwalify/core.py#L919-L966
def _validate_range(self, max_, min_, max_ex, min_ex, value, path, prefix): """ Validate that value is within range values. """ if not isinstance(value, int) and not isinstance(value, float): raise CoreError("Value must be a integer type") log.debug( u"Validate range : %s : %s : %s : %s : %s : %s", max_, min_, max_ex, min_ex, value, path, ) if max_ is not None and max_ < value: self.errors.append(SchemaError.SchemaErrorEntry( msg=u"Type '{prefix}' has size of '{value}', greater than max limit '{max_}'. Path: '{path}'", path=path, value=nativestr(value) if tt['str'](value) else value, prefix=prefix, max_=max_)) if min_ is not None and min_ > value: self.errors.append(SchemaError.SchemaErrorEntry( msg=u"Type '{prefix}' has size of '{value}', less than min limit '{min_}'. Path: '{path}'", path=path, value=nativestr(value) if tt['str'](value) else value, prefix=prefix, min_=min_)) if max_ex is not None and max_ex <= value: self.errors.append(SchemaError.SchemaErrorEntry( msg=u"Type '{prefix}' has size of '{value}', greater than or equals to max limit(exclusive) '{max_ex}'. Path: '{path}'", path=path, value=nativestr(value) if tt['str'](value) else value, prefix=prefix, max_ex=max_ex)) if min_ex is not None and min_ex >= value: self.errors.append(SchemaError.SchemaErrorEntry( msg=u"Type '{prefix}' has size of '{value}', less than or equals to min limit(exclusive) '{min_ex}'. Path: '{path}'", path=path, value=nativestr(value) if tt['str'](value) else value, prefix=prefix, min_ex=min_ex))
[ "def", "_validate_range", "(", "self", ",", "max_", ",", "min_", ",", "max_ex", ",", "min_ex", ",", "value", ",", "path", ",", "prefix", ")", ":", "if", "not", "isinstance", "(", "value", ",", "int", ")", "and", "not", "isinstance", "(", "value", ",", "float", ")", ":", "raise", "CoreError", "(", "\"Value must be a integer type\"", ")", "log", ".", "debug", "(", "u\"Validate range : %s : %s : %s : %s : %s : %s\"", ",", "max_", ",", "min_", ",", "max_ex", ",", "min_ex", ",", "value", ",", "path", ",", ")", "if", "max_", "is", "not", "None", "and", "max_", "<", "value", ":", "self", ".", "errors", ".", "append", "(", "SchemaError", ".", "SchemaErrorEntry", "(", "msg", "=", "u\"Type '{prefix}' has size of '{value}', greater than max limit '{max_}'. Path: '{path}'\"", ",", "path", "=", "path", ",", "value", "=", "nativestr", "(", "value", ")", "if", "tt", "[", "'str'", "]", "(", "value", ")", "else", "value", ",", "prefix", "=", "prefix", ",", "max_", "=", "max_", ")", ")", "if", "min_", "is", "not", "None", "and", "min_", ">", "value", ":", "self", ".", "errors", ".", "append", "(", "SchemaError", ".", "SchemaErrorEntry", "(", "msg", "=", "u\"Type '{prefix}' has size of '{value}', less than min limit '{min_}'. Path: '{path}'\"", ",", "path", "=", "path", ",", "value", "=", "nativestr", "(", "value", ")", "if", "tt", "[", "'str'", "]", "(", "value", ")", "else", "value", ",", "prefix", "=", "prefix", ",", "min_", "=", "min_", ")", ")", "if", "max_ex", "is", "not", "None", "and", "max_ex", "<=", "value", ":", "self", ".", "errors", ".", "append", "(", "SchemaError", ".", "SchemaErrorEntry", "(", "msg", "=", "u\"Type '{prefix}' has size of '{value}', greater than or equals to max limit(exclusive) '{max_ex}'. Path: '{path}'\"", ",", "path", "=", "path", ",", "value", "=", "nativestr", "(", "value", ")", "if", "tt", "[", "'str'", "]", "(", "value", ")", "else", "value", ",", "prefix", "=", "prefix", ",", "max_ex", "=", "max_ex", ")", ")", "if", "min_ex", "is", "not", "None", "and", "min_ex", ">=", "value", ":", "self", ".", "errors", ".", "append", "(", "SchemaError", ".", "SchemaErrorEntry", "(", "msg", "=", "u\"Type '{prefix}' has size of '{value}', less than or equals to min limit(exclusive) '{min_ex}'. Path: '{path}'\"", ",", "path", "=", "path", ",", "value", "=", "nativestr", "(", "value", ")", "if", "tt", "[", "'str'", "]", "(", "value", ")", "else", "value", ",", "prefix", "=", "prefix", ",", "min_ex", "=", "min_ex", ")", ")" ]
Validate that value is within range values.
[ "Validate", "that", "value", "is", "within", "range", "values", "." ]
python
train
earlzo/hfut
hfut/parser.py
https://github.com/earlzo/hfut/blob/09270a9647fba79f26fd1a8a3c53c0678b5257a1/hfut/parser.py#L73-L87
def flatten_list(multiply_list): """ 碾平 list:: >>> a = [1, 2, [3, 4], [[5, 6], [7, 8]]] >>> flatten_list(a) [1, 2, 3, 4, 5, 6, 7, 8] :param multiply_list: 混淆的多层列表 :return: 单层的 list """ if isinstance(multiply_list, list): return [rv for l in multiply_list for rv in flatten_list(l)] else: return [multiply_list]
[ "def", "flatten_list", "(", "multiply_list", ")", ":", "if", "isinstance", "(", "multiply_list", ",", "list", ")", ":", "return", "[", "rv", "for", "l", "in", "multiply_list", "for", "rv", "in", "flatten_list", "(", "l", ")", "]", "else", ":", "return", "[", "multiply_list", "]" ]
碾平 list:: >>> a = [1, 2, [3, 4], [[5, 6], [7, 8]]] >>> flatten_list(a) [1, 2, 3, 4, 5, 6, 7, 8] :param multiply_list: 混淆的多层列表 :return: 单层的 list
[ "碾平", "list", "::" ]
python
train
bitesofcode/projexui
projexui/widgets/xtreewidget/xtreewidgetitem.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xtreewidget/xtreewidgetitem.py#L502-L511
def setSizeHint(self, column, hint): """ Sets the size hint for this item to the inputed size. This will also updated the fixed height property if the hieght of the inputed hint is larger than the current fixed height. :param hint | <QtCore.QSize> """ self._fixedHeight = max(hint.height(), self._fixedHeight) super(XTreeWidgetItem, self).setSizeHint(column, hint)
[ "def", "setSizeHint", "(", "self", ",", "column", ",", "hint", ")", ":", "self", ".", "_fixedHeight", "=", "max", "(", "hint", ".", "height", "(", ")", ",", "self", ".", "_fixedHeight", ")", "super", "(", "XTreeWidgetItem", ",", "self", ")", ".", "setSizeHint", "(", "column", ",", "hint", ")" ]
Sets the size hint for this item to the inputed size. This will also updated the fixed height property if the hieght of the inputed hint is larger than the current fixed height. :param hint | <QtCore.QSize>
[ "Sets", "the", "size", "hint", "for", "this", "item", "to", "the", "inputed", "size", ".", "This", "will", "also", "updated", "the", "fixed", "height", "property", "if", "the", "hieght", "of", "the", "inputed", "hint", "is", "larger", "than", "the", "current", "fixed", "height", ".", ":", "param", "hint", "|", "<QtCore", ".", "QSize", ">" ]
python
train
ddorn/GUI
GUI/vracabulous.py
https://github.com/ddorn/GUI/blob/e1fcb5286d24e0995f280d5180222e51895c368c/GUI/vracabulous.py#L107-L112
def on_unselect(self, item, action): """Add an action to make when an object is unfocused.""" if not isinstance(item, int): item = self.items.index(item) self._on_unselect[item] = action
[ "def", "on_unselect", "(", "self", ",", "item", ",", "action", ")", ":", "if", "not", "isinstance", "(", "item", ",", "int", ")", ":", "item", "=", "self", ".", "items", ".", "index", "(", "item", ")", "self", ".", "_on_unselect", "[", "item", "]", "=", "action" ]
Add an action to make when an object is unfocused.
[ "Add", "an", "action", "to", "make", "when", "an", "object", "is", "unfocused", "." ]
python
train
quantumlib/Cirq
cirq/sim/density_matrix_simulator.py
https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/sim/density_matrix_simulator.py#L322-L388
def compute_displays_sweep( self, program: Union[circuits.Circuit, schedules.Schedule], params: Optional[study.Sweepable] = None, qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT, initial_state: Union[int, np.ndarray] = 0, ) -> List[study.ComputeDisplaysResult]: """Computes displays in the supplied Circuit or Schedule. In contrast to `compute_displays`, this allows for sweeping over different parameter values. Args: program: The circuit or schedule to simulate. params: Parameters to run with the program. qubit_order: Determines the canonical ordering of the qubits used to define the order of amplitudes in the wave function. initial_state: If an int, the state is set to the computational basis state corresponding to this state. Otherwise if it is a np.ndarray it is the full initial state, either a pure state or the full density matrix. If it is the pure state it must be the correct size, be normalized (an L2 norm of 1), and be safely castable to an appropriate dtype for the simulator. If it is a mixed state it must be correctly sized and positive semidefinite with trace one. Returns: List of ComputeDisplaysResults for this run, one for each possible parameter resolver. """ circuit = (program if isinstance(program, circuits.Circuit) else program.to_circuit()) param_resolvers = study.to_resolvers(params or study.ParamResolver({})) qubit_order = ops.QubitOrder.as_qubit_order(qubit_order) qubits = qubit_order.order_for(circuit.all_qubits()) compute_displays_results = [] # type: List[study.ComputeDisplaysResult] for param_resolver in param_resolvers: display_values = {} # type: Dict[Hashable, Any] # Compute the displays in the first Moment moment = circuit[0] matrix = density_matrix_utils.to_valid_density_matrix( initial_state, num_qubits=len(qubits), dtype=self._dtype) qubit_map = {q: i for i, q in enumerate(qubits)} _enter_moment_display_values_into_dictionary( display_values, moment, matrix, qubit_order, qubit_map) # Compute the displays in the rest of the Moments all_step_results = self.simulate_moment_steps( circuit, param_resolver, qubit_order, initial_state) for step_result, moment in zip(all_step_results, circuit[1:]): _enter_moment_display_values_into_dictionary( display_values, moment, step_result.density_matrix(), qubit_order, step_result._qubit_map) compute_displays_results.append(study.ComputeDisplaysResult( params=param_resolver, display_values=display_values)) return compute_displays_results
[ "def", "compute_displays_sweep", "(", "self", ",", "program", ":", "Union", "[", "circuits", ".", "Circuit", ",", "schedules", ".", "Schedule", "]", ",", "params", ":", "Optional", "[", "study", ".", "Sweepable", "]", "=", "None", ",", "qubit_order", ":", "ops", ".", "QubitOrderOrList", "=", "ops", ".", "QubitOrder", ".", "DEFAULT", ",", "initial_state", ":", "Union", "[", "int", ",", "np", ".", "ndarray", "]", "=", "0", ",", ")", "->", "List", "[", "study", ".", "ComputeDisplaysResult", "]", ":", "circuit", "=", "(", "program", "if", "isinstance", "(", "program", ",", "circuits", ".", "Circuit", ")", "else", "program", ".", "to_circuit", "(", ")", ")", "param_resolvers", "=", "study", ".", "to_resolvers", "(", "params", "or", "study", ".", "ParamResolver", "(", "{", "}", ")", ")", "qubit_order", "=", "ops", ".", "QubitOrder", ".", "as_qubit_order", "(", "qubit_order", ")", "qubits", "=", "qubit_order", ".", "order_for", "(", "circuit", ".", "all_qubits", "(", ")", ")", "compute_displays_results", "=", "[", "]", "# type: List[study.ComputeDisplaysResult]", "for", "param_resolver", "in", "param_resolvers", ":", "display_values", "=", "{", "}", "# type: Dict[Hashable, Any]", "# Compute the displays in the first Moment", "moment", "=", "circuit", "[", "0", "]", "matrix", "=", "density_matrix_utils", ".", "to_valid_density_matrix", "(", "initial_state", ",", "num_qubits", "=", "len", "(", "qubits", ")", ",", "dtype", "=", "self", ".", "_dtype", ")", "qubit_map", "=", "{", "q", ":", "i", "for", "i", ",", "q", "in", "enumerate", "(", "qubits", ")", "}", "_enter_moment_display_values_into_dictionary", "(", "display_values", ",", "moment", ",", "matrix", ",", "qubit_order", ",", "qubit_map", ")", "# Compute the displays in the rest of the Moments", "all_step_results", "=", "self", ".", "simulate_moment_steps", "(", "circuit", ",", "param_resolver", ",", "qubit_order", ",", "initial_state", ")", "for", "step_result", ",", "moment", "in", "zip", "(", "all_step_results", ",", "circuit", "[", "1", ":", "]", ")", ":", "_enter_moment_display_values_into_dictionary", "(", "display_values", ",", "moment", ",", "step_result", ".", "density_matrix", "(", ")", ",", "qubit_order", ",", "step_result", ".", "_qubit_map", ")", "compute_displays_results", ".", "append", "(", "study", ".", "ComputeDisplaysResult", "(", "params", "=", "param_resolver", ",", "display_values", "=", "display_values", ")", ")", "return", "compute_displays_results" ]
Computes displays in the supplied Circuit or Schedule. In contrast to `compute_displays`, this allows for sweeping over different parameter values. Args: program: The circuit or schedule to simulate. params: Parameters to run with the program. qubit_order: Determines the canonical ordering of the qubits used to define the order of amplitudes in the wave function. initial_state: If an int, the state is set to the computational basis state corresponding to this state. Otherwise if it is a np.ndarray it is the full initial state, either a pure state or the full density matrix. If it is the pure state it must be the correct size, be normalized (an L2 norm of 1), and be safely castable to an appropriate dtype for the simulator. If it is a mixed state it must be correctly sized and positive semidefinite with trace one. Returns: List of ComputeDisplaysResults for this run, one for each possible parameter resolver.
[ "Computes", "displays", "in", "the", "supplied", "Circuit", "or", "Schedule", "." ]
python
train
python-diamond/Diamond
src/diamond/handler/graphitepickle.py
https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/diamond/handler/graphitepickle.py#L46-L55
def get_default_config_help(self): """ Returns the help text for the configuration options for this handler """ config = super(GraphitePickleHandler, self).get_default_config_help() config.update({ }) return config
[ "def", "get_default_config_help", "(", "self", ")", ":", "config", "=", "super", "(", "GraphitePickleHandler", ",", "self", ")", ".", "get_default_config_help", "(", ")", "config", ".", "update", "(", "{", "}", ")", "return", "config" ]
Returns the help text for the configuration options for this handler
[ "Returns", "the", "help", "text", "for", "the", "configuration", "options", "for", "this", "handler" ]
python
train
nicolargo/glances
glances/plugins/glances_plugin.py
https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/plugins/glances_plugin.py#L201-L215
def get_raw_history(self, item=None, nb=0): """Return the history (RAW format). - the stats history (dict of list) if item is None - the stats history for the given item (list) instead - None if item did not exist in the history """ s = self.stats_history.get(nb=nb) if item is None: return s else: if item in s: return s[item] else: return None
[ "def", "get_raw_history", "(", "self", ",", "item", "=", "None", ",", "nb", "=", "0", ")", ":", "s", "=", "self", ".", "stats_history", ".", "get", "(", "nb", "=", "nb", ")", "if", "item", "is", "None", ":", "return", "s", "else", ":", "if", "item", "in", "s", ":", "return", "s", "[", "item", "]", "else", ":", "return", "None" ]
Return the history (RAW format). - the stats history (dict of list) if item is None - the stats history for the given item (list) instead - None if item did not exist in the history
[ "Return", "the", "history", "(", "RAW", "format", ")", "." ]
python
train
Erotemic/utool
utool/experimental/euler_tour_tree_avl.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/experimental/euler_tour_tree_avl.py#L41-L73
def euler_tour(G, node=None, seen=None, visited=None): """ definition from http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.192.8615&rep=rep1&type=pdf Example: >>> # DISABLE_DOCTEST >>> from utool.experimental.euler_tour_tree_avl import * # NOQA >>> edges = [ >>> ('R', 'A'), ('R', 'B'), >>> ('B', 'C'), ('C', 'D'), ('C', 'E'), >>> ('B', 'F'), ('B', 'G'), >>> ] >>> G = nx.Graph(edges) >>> node = list(G.nodes())[0] >>> et1 = euler_tour(G, node) >>> et2 = euler_tour_dfs(G, node) """ if node is None: node = next(G.nodes()) if visited is None: assert nx.is_tree(G) visited = [] if seen is None: seen = set([]) visited.append(node) for c in G.neighbors(node): if c in seen: continue seen.add(c) euler_tour(G, c, seen, visited) visited.append(node) return visited
[ "def", "euler_tour", "(", "G", ",", "node", "=", "None", ",", "seen", "=", "None", ",", "visited", "=", "None", ")", ":", "if", "node", "is", "None", ":", "node", "=", "next", "(", "G", ".", "nodes", "(", ")", ")", "if", "visited", "is", "None", ":", "assert", "nx", ".", "is_tree", "(", "G", ")", "visited", "=", "[", "]", "if", "seen", "is", "None", ":", "seen", "=", "set", "(", "[", "]", ")", "visited", ".", "append", "(", "node", ")", "for", "c", "in", "G", ".", "neighbors", "(", "node", ")", ":", "if", "c", "in", "seen", ":", "continue", "seen", ".", "add", "(", "c", ")", "euler_tour", "(", "G", ",", "c", ",", "seen", ",", "visited", ")", "visited", ".", "append", "(", "node", ")", "return", "visited" ]
definition from http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.192.8615&rep=rep1&type=pdf Example: >>> # DISABLE_DOCTEST >>> from utool.experimental.euler_tour_tree_avl import * # NOQA >>> edges = [ >>> ('R', 'A'), ('R', 'B'), >>> ('B', 'C'), ('C', 'D'), ('C', 'E'), >>> ('B', 'F'), ('B', 'G'), >>> ] >>> G = nx.Graph(edges) >>> node = list(G.nodes())[0] >>> et1 = euler_tour(G, node) >>> et2 = euler_tour_dfs(G, node)
[ "definition", "from", "http", ":", "//", "citeseerx", ".", "ist", ".", "psu", ".", "edu", "/", "viewdoc", "/", "download?doi", "=", "10", ".", "1", ".", "1", ".", "192", ".", "8615&rep", "=", "rep1&type", "=", "pdf" ]
python
train
proycon/pynlpl
pynlpl/formats/folia.py
https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L4465-L4472
def copychildren(self, newdoc=None, idsuffix=""): """Generator creating a deep copy of the children of this element. If idsuffix is a string, if set to True, a random idsuffix will be generated including a random 32-bit hash""" if idsuffix is True: idsuffix = ".copy." + "%08x" % random.getrandbits(32) #random 32-bit hash for each copy, same one will be reused for all children for c in self: if isinstance(c, Word): yield WordReference(newdoc, id=c.id) else: yield c.copy(newdoc,idsuffix)
[ "def", "copychildren", "(", "self", ",", "newdoc", "=", "None", ",", "idsuffix", "=", "\"\"", ")", ":", "if", "idsuffix", "is", "True", ":", "idsuffix", "=", "\".copy.\"", "+", "\"%08x\"", "%", "random", ".", "getrandbits", "(", "32", ")", "#random 32-bit hash for each copy, same one will be reused for all children", "for", "c", "in", "self", ":", "if", "isinstance", "(", "c", ",", "Word", ")", ":", "yield", "WordReference", "(", "newdoc", ",", "id", "=", "c", ".", "id", ")", "else", ":", "yield", "c", ".", "copy", "(", "newdoc", ",", "idsuffix", ")" ]
Generator creating a deep copy of the children of this element. If idsuffix is a string, if set to True, a random idsuffix will be generated including a random 32-bit hash
[ "Generator", "creating", "a", "deep", "copy", "of", "the", "children", "of", "this", "element", ".", "If", "idsuffix", "is", "a", "string", "if", "set", "to", "True", "a", "random", "idsuffix", "will", "be", "generated", "including", "a", "random", "32", "-", "bit", "hash" ]
python
train
javipalanca/spade
spade/behaviour.py
https://github.com/javipalanca/spade/blob/59942bd1a1edae4c807d06cabb178d5630cbf61b/spade/behaviour.py#L118-L131
async def _start(self): """ Start coroutine. runs on_start coroutine and then runs the _step coroutine where the body of the behaviour is called. """ self.agent._alive.wait() try: await self.on_start() except Exception as e: logger.error("Exception running on_start in behaviour {}: {}".format(self, e)) self.kill(exit_code=e) await self._step() self._is_done.clear()
[ "async", "def", "_start", "(", "self", ")", ":", "self", ".", "agent", ".", "_alive", ".", "wait", "(", ")", "try", ":", "await", "self", ".", "on_start", "(", ")", "except", "Exception", "as", "e", ":", "logger", ".", "error", "(", "\"Exception running on_start in behaviour {}: {}\"", ".", "format", "(", "self", ",", "e", ")", ")", "self", ".", "kill", "(", "exit_code", "=", "e", ")", "await", "self", ".", "_step", "(", ")", "self", ".", "_is_done", ".", "clear", "(", ")" ]
Start coroutine. runs on_start coroutine and then runs the _step coroutine where the body of the behaviour is called.
[ "Start", "coroutine", ".", "runs", "on_start", "coroutine", "and", "then", "runs", "the", "_step", "coroutine", "where", "the", "body", "of", "the", "behaviour", "is", "called", "." ]
python
train
jacobtomlinson/datapoint-python
datapoint/Forecast.py
https://github.com/jacobtomlinson/datapoint-python/blob/1d3f596f21975f42c1484f5a9c3ff057de0b47ae/datapoint/Forecast.py#L82-L121
def future(self,in_days=None,in_hours=None,in_minutes=None,in_seconds=None): """ Function to return a future timestep """ future = None # Initialize variables to 0 dd, hh, mm, ss = [0 for i in range(4)] if (in_days != None): dd = dd + in_days if (in_hours != None): hh = hh + in_hours if (in_minutes != None): mm = mm + in_minutes if (in_seconds != None): ss = ss + in_seconds # Set the hours, minutes and seconds from now (minus the days) dnow = datetime.datetime.utcnow() # Now d = dnow + \ datetime.timedelta(hours=hh, minutes=mm, seconds = ss) # Time from midnight for_total_seconds = d - \ d.replace(hour=0, minute=0, second=0, microsecond=0) # Convert into minutes since midnight try: msm = for_total_seconds.total_seconds()/60. except: # For versions before 2.7 msm = self.timedelta_total_seconds(for_total_seconds)/60. if (dd<len(self.days)): for timestep in self.days[dd].timesteps: if timestep.name >= msm: future = timestep return future else: print('ERROR: requested date is outside the forecast range selected,' + str(len(self.days))) return False
[ "def", "future", "(", "self", ",", "in_days", "=", "None", ",", "in_hours", "=", "None", ",", "in_minutes", "=", "None", ",", "in_seconds", "=", "None", ")", ":", "future", "=", "None", "# Initialize variables to 0", "dd", ",", "hh", ",", "mm", ",", "ss", "=", "[", "0", "for", "i", "in", "range", "(", "4", ")", "]", "if", "(", "in_days", "!=", "None", ")", ":", "dd", "=", "dd", "+", "in_days", "if", "(", "in_hours", "!=", "None", ")", ":", "hh", "=", "hh", "+", "in_hours", "if", "(", "in_minutes", "!=", "None", ")", ":", "mm", "=", "mm", "+", "in_minutes", "if", "(", "in_seconds", "!=", "None", ")", ":", "ss", "=", "ss", "+", "in_seconds", "# Set the hours, minutes and seconds from now (minus the days)", "dnow", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", "# Now", "d", "=", "dnow", "+", "datetime", ".", "timedelta", "(", "hours", "=", "hh", ",", "minutes", "=", "mm", ",", "seconds", "=", "ss", ")", "# Time from midnight", "for_total_seconds", "=", "d", "-", "d", ".", "replace", "(", "hour", "=", "0", ",", "minute", "=", "0", ",", "second", "=", "0", ",", "microsecond", "=", "0", ")", "# Convert into minutes since midnight", "try", ":", "msm", "=", "for_total_seconds", ".", "total_seconds", "(", ")", "/", "60.", "except", ":", "# For versions before 2.7", "msm", "=", "self", ".", "timedelta_total_seconds", "(", "for_total_seconds", ")", "/", "60.", "if", "(", "dd", "<", "len", "(", "self", ".", "days", ")", ")", ":", "for", "timestep", "in", "self", ".", "days", "[", "dd", "]", ".", "timesteps", ":", "if", "timestep", ".", "name", ">=", "msm", ":", "future", "=", "timestep", "return", "future", "else", ":", "print", "(", "'ERROR: requested date is outside the forecast range selected,'", "+", "str", "(", "len", "(", "self", ".", "days", ")", ")", ")", "return", "False" ]
Function to return a future timestep
[ "Function", "to", "return", "a", "future", "timestep" ]
python
train
neithere/monk
monk/compat.py
https://github.com/neithere/monk/blob/4b2ee5152b081ac288ce8568422a027b5e7d2b1c/monk/compat.py#L63-L76
def safe_unicode(value): """ Returns: * a `unicode` instance in Python 2.x, or * a `str` instance in Python 3.x. """ if sys.version_info < (3,0): if isinstance(value, str): return value.decode('utf-8') else: return unicode(value) else: return str(value)
[ "def", "safe_unicode", "(", "value", ")", ":", "if", "sys", ".", "version_info", "<", "(", "3", ",", "0", ")", ":", "if", "isinstance", "(", "value", ",", "str", ")", ":", "return", "value", ".", "decode", "(", "'utf-8'", ")", "else", ":", "return", "unicode", "(", "value", ")", "else", ":", "return", "str", "(", "value", ")" ]
Returns: * a `unicode` instance in Python 2.x, or * a `str` instance in Python 3.x.
[ "Returns", ":" ]
python
train
sosreport/sos
sos/archive.py
https://github.com/sosreport/sos/blob/2ebc04da53dc871c8dd5243567afa4f8592dca29/sos/archive.py#L245-L318
def _check_path(self, src, path_type, dest=None, force=False): """Check a new destination path in the archive. Since it is possible for multiple plugins to collect the same paths, and since plugins can now run concurrently, it is possible for two threads to race in archive methods: historically the archive class only needed to test for the actual presence of a path, since it was impossible for another `Archive` client to enter the class while another method invocation was being dispatched. Deal with this by implementing a locking scheme for operations that modify the path structure of the archive, and by testing explicitly for conflicts with any existing content at the specified destination path. It is not an error to attempt to create a path that already exists in the archive so long as the type of the object to be added matches the type of object already found at the path. It is an error to attempt to re-create an existing path with a different path type (for example, creating a symbolic link at a path already occupied by a regular file). :param src: the source path to be copied to the archive :param path_type: the type of object to be copied :param dest: an optional destination path :param force: force file creation even if the path exists :returns: An absolute destination path if the path should be copied now or `None` otherwise """ dest = dest or self.dest_path(src) if path_type == P_DIR: dest_dir = dest else: dest_dir = os.path.split(dest)[0] if not dest_dir: return dest # Check containing directory presence and path type if os.path.exists(dest_dir) and not os.path.isdir(dest_dir): raise ValueError("path '%s' exists and is not a directory" % dest_dir) elif not os.path.exists(dest_dir): src_dir = src if path_type == P_DIR else os.path.split(src)[0] self._make_leading_paths(src_dir) def is_special(mode): return any([ stat.S_ISBLK(mode), stat.S_ISCHR(mode), stat.S_ISFIFO(mode), stat.S_ISSOCK(mode) ]) if force: return dest # Check destination path presence and type if os.path.exists(dest): # Use lstat: we care about the current object, not the referent. st = os.lstat(dest) ve_msg = "path '%s' exists and is not a %s" if path_type == P_FILE and not stat.S_ISREG(st.st_mode): raise ValueError(ve_msg % (dest, "regular file")) if path_type == P_LINK and not stat.S_ISLNK(st.st_mode): raise ValueError(ve_msg % (dest, "symbolic link")) if path_type == P_NODE and not is_special(st.st_mode): raise ValueError(ve_msg % (dest, "special file")) if path_type == P_DIR and not stat.S_ISDIR(st.st_mode): raise ValueError(ve_msg % (dest, "directory")) # Path has already been copied: skip return None return dest
[ "def", "_check_path", "(", "self", ",", "src", ",", "path_type", ",", "dest", "=", "None", ",", "force", "=", "False", ")", ":", "dest", "=", "dest", "or", "self", ".", "dest_path", "(", "src", ")", "if", "path_type", "==", "P_DIR", ":", "dest_dir", "=", "dest", "else", ":", "dest_dir", "=", "os", ".", "path", ".", "split", "(", "dest", ")", "[", "0", "]", "if", "not", "dest_dir", ":", "return", "dest", "# Check containing directory presence and path type", "if", "os", ".", "path", ".", "exists", "(", "dest_dir", ")", "and", "not", "os", ".", "path", ".", "isdir", "(", "dest_dir", ")", ":", "raise", "ValueError", "(", "\"path '%s' exists and is not a directory\"", "%", "dest_dir", ")", "elif", "not", "os", ".", "path", ".", "exists", "(", "dest_dir", ")", ":", "src_dir", "=", "src", "if", "path_type", "==", "P_DIR", "else", "os", ".", "path", ".", "split", "(", "src", ")", "[", "0", "]", "self", ".", "_make_leading_paths", "(", "src_dir", ")", "def", "is_special", "(", "mode", ")", ":", "return", "any", "(", "[", "stat", ".", "S_ISBLK", "(", "mode", ")", ",", "stat", ".", "S_ISCHR", "(", "mode", ")", ",", "stat", ".", "S_ISFIFO", "(", "mode", ")", ",", "stat", ".", "S_ISSOCK", "(", "mode", ")", "]", ")", "if", "force", ":", "return", "dest", "# Check destination path presence and type", "if", "os", ".", "path", ".", "exists", "(", "dest", ")", ":", "# Use lstat: we care about the current object, not the referent.", "st", "=", "os", ".", "lstat", "(", "dest", ")", "ve_msg", "=", "\"path '%s' exists and is not a %s\"", "if", "path_type", "==", "P_FILE", "and", "not", "stat", ".", "S_ISREG", "(", "st", ".", "st_mode", ")", ":", "raise", "ValueError", "(", "ve_msg", "%", "(", "dest", ",", "\"regular file\"", ")", ")", "if", "path_type", "==", "P_LINK", "and", "not", "stat", ".", "S_ISLNK", "(", "st", ".", "st_mode", ")", ":", "raise", "ValueError", "(", "ve_msg", "%", "(", "dest", ",", "\"symbolic link\"", ")", ")", "if", "path_type", "==", "P_NODE", "and", "not", "is_special", "(", "st", ".", "st_mode", ")", ":", "raise", "ValueError", "(", "ve_msg", "%", "(", "dest", ",", "\"special file\"", ")", ")", "if", "path_type", "==", "P_DIR", "and", "not", "stat", ".", "S_ISDIR", "(", "st", ".", "st_mode", ")", ":", "raise", "ValueError", "(", "ve_msg", "%", "(", "dest", ",", "\"directory\"", ")", ")", "# Path has already been copied: skip", "return", "None", "return", "dest" ]
Check a new destination path in the archive. Since it is possible for multiple plugins to collect the same paths, and since plugins can now run concurrently, it is possible for two threads to race in archive methods: historically the archive class only needed to test for the actual presence of a path, since it was impossible for another `Archive` client to enter the class while another method invocation was being dispatched. Deal with this by implementing a locking scheme for operations that modify the path structure of the archive, and by testing explicitly for conflicts with any existing content at the specified destination path. It is not an error to attempt to create a path that already exists in the archive so long as the type of the object to be added matches the type of object already found at the path. It is an error to attempt to re-create an existing path with a different path type (for example, creating a symbolic link at a path already occupied by a regular file). :param src: the source path to be copied to the archive :param path_type: the type of object to be copied :param dest: an optional destination path :param force: force file creation even if the path exists :returns: An absolute destination path if the path should be copied now or `None` otherwise
[ "Check", "a", "new", "destination", "path", "in", "the", "archive", "." ]
python
train
titusjan/argos
argos/utils/masks.py
https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/utils/masks.py#L206-L224
def replaceMaskedValueWithNan(self): """ Replaces values where the mask is True with the replacement value. Will change the data type to float if the data is an integer. If the data is not a float (or int) the function does nothing. """ kind = self.data.dtype.kind if kind == 'i' or kind == 'u': # signed/unsigned int self.data = self.data.astype(np.float, casting='safe') if self.data.dtype.kind != 'f': return # only replace for floats if self.mask is False: pass elif self.mask is True: self.data[:] = np.NaN else: self.data[self.mask] = np.NaN
[ "def", "replaceMaskedValueWithNan", "(", "self", ")", ":", "kind", "=", "self", ".", "data", ".", "dtype", ".", "kind", "if", "kind", "==", "'i'", "or", "kind", "==", "'u'", ":", "# signed/unsigned int", "self", ".", "data", "=", "self", ".", "data", ".", "astype", "(", "np", ".", "float", ",", "casting", "=", "'safe'", ")", "if", "self", ".", "data", ".", "dtype", ".", "kind", "!=", "'f'", ":", "return", "# only replace for floats", "if", "self", ".", "mask", "is", "False", ":", "pass", "elif", "self", ".", "mask", "is", "True", ":", "self", ".", "data", "[", ":", "]", "=", "np", ".", "NaN", "else", ":", "self", ".", "data", "[", "self", ".", "mask", "]", "=", "np", ".", "NaN" ]
Replaces values where the mask is True with the replacement value. Will change the data type to float if the data is an integer. If the data is not a float (or int) the function does nothing.
[ "Replaces", "values", "where", "the", "mask", "is", "True", "with", "the", "replacement", "value", "." ]
python
train
tonybaloney/wily
wily/cache.py
https://github.com/tonybaloney/wily/blob/bae259354a91b57d56603f0ca7403186f086a84c/wily/cache.py#L94-L139
def store(config, archiver, revision, stats): """ Store a revision record within an archiver folder. :param config: The configuration :type config: :class:`wily.config.WilyConfig` :param archiver: The name of the archiver type (e.g. 'git') :type archiver: ``str`` :param revision: The revision ID :type revision: ``str`` :param stats: The collected data :type stats: ``dict`` :return: The absolute path to the created file :rtype: ``str`` :rtype: `pathlib.Path` """ root = pathlib.Path(config.cache_path) / archiver.name if not root.exists(): logger.debug("Creating wily cache") root.mkdir() # fix absolute path references. if config.path != ".": for operator, operator_data in list(stats["operator_data"].items()): if operator_data: new_operator_data = operator_data.copy() for k, v in list(operator_data.items()): new_key = os.path.relpath(str(k), str(config.path)) del new_operator_data[k] new_operator_data[new_key] = v del stats["operator_data"][operator] stats["operator_data"][operator] = new_operator_data logger.debug(f"Creating {revision.key} output") filename = root / (revision.key + ".json") if filename.exists(): raise RuntimeError(f"File {filename} already exists, index may be corrupt.") with open(filename, "w") as out: out.write(json.dumps(stats, indent=2)) return filename
[ "def", "store", "(", "config", ",", "archiver", ",", "revision", ",", "stats", ")", ":", "root", "=", "pathlib", ".", "Path", "(", "config", ".", "cache_path", ")", "/", "archiver", ".", "name", "if", "not", "root", ".", "exists", "(", ")", ":", "logger", ".", "debug", "(", "\"Creating wily cache\"", ")", "root", ".", "mkdir", "(", ")", "# fix absolute path references.", "if", "config", ".", "path", "!=", "\".\"", ":", "for", "operator", ",", "operator_data", "in", "list", "(", "stats", "[", "\"operator_data\"", "]", ".", "items", "(", ")", ")", ":", "if", "operator_data", ":", "new_operator_data", "=", "operator_data", ".", "copy", "(", ")", "for", "k", ",", "v", "in", "list", "(", "operator_data", ".", "items", "(", ")", ")", ":", "new_key", "=", "os", ".", "path", ".", "relpath", "(", "str", "(", "k", ")", ",", "str", "(", "config", ".", "path", ")", ")", "del", "new_operator_data", "[", "k", "]", "new_operator_data", "[", "new_key", "]", "=", "v", "del", "stats", "[", "\"operator_data\"", "]", "[", "operator", "]", "stats", "[", "\"operator_data\"", "]", "[", "operator", "]", "=", "new_operator_data", "logger", ".", "debug", "(", "f\"Creating {revision.key} output\"", ")", "filename", "=", "root", "/", "(", "revision", ".", "key", "+", "\".json\"", ")", "if", "filename", ".", "exists", "(", ")", ":", "raise", "RuntimeError", "(", "f\"File {filename} already exists, index may be corrupt.\"", ")", "with", "open", "(", "filename", ",", "\"w\"", ")", "as", "out", ":", "out", ".", "write", "(", "json", ".", "dumps", "(", "stats", ",", "indent", "=", "2", ")", ")", "return", "filename" ]
Store a revision record within an archiver folder. :param config: The configuration :type config: :class:`wily.config.WilyConfig` :param archiver: The name of the archiver type (e.g. 'git') :type archiver: ``str`` :param revision: The revision ID :type revision: ``str`` :param stats: The collected data :type stats: ``dict`` :return: The absolute path to the created file :rtype: ``str`` :rtype: `pathlib.Path`
[ "Store", "a", "revision", "record", "within", "an", "archiver", "folder", "." ]
python
train
rocky/python3-trepan
trepan/lib/sighandler.py
https://github.com/rocky/python3-trepan/blob/14e91bc0acce090d67be145b1ac040cab92ac5f3/trepan/lib/sighandler.py#L275-L282
def check_and_adjust_sighandlers(self): """Check to see if any of the signal handlers we are interested in have changed or is not initially set. Change any that are not right. """ for signame in list(self.sigs.keys()): if not self.check_and_adjust_sighandler(signame, self.sigs): break pass return
[ "def", "check_and_adjust_sighandlers", "(", "self", ")", ":", "for", "signame", "in", "list", "(", "self", ".", "sigs", ".", "keys", "(", ")", ")", ":", "if", "not", "self", ".", "check_and_adjust_sighandler", "(", "signame", ",", "self", ".", "sigs", ")", ":", "break", "pass", "return" ]
Check to see if any of the signal handlers we are interested in have changed or is not initially set. Change any that are not right.
[ "Check", "to", "see", "if", "any", "of", "the", "signal", "handlers", "we", "are", "interested", "in", "have", "changed", "or", "is", "not", "initially", "set", ".", "Change", "any", "that", "are", "not", "right", "." ]
python
test
Cairnarvon/uptime
src/__init__.py
https://github.com/Cairnarvon/uptime/blob/1ddfd06bb300c00e6dc4bd2a9ddf9bf1aa27b1bb/src/__init__.py#L198-L214
def _uptime_plan9(): """Returns uptime in seconds or None, on Plan 9.""" # Apparently Plan 9 only has Python 2.2, which I'm not prepared to # support. Maybe some Linuxes implement /dev/time, though, someone was # talking about it somewhere. try: # The time file holds one 32-bit number representing the sec- # onds since start of epoch and three 64-bit numbers, repre- # senting nanoseconds since start of epoch, clock ticks, and # clock frequency. # -- cons(3) f = open('/dev/time', 'r') s, ns, ct, cf = f.read().split() f.close() return float(ct) / float(cf) except (IOError, ValueError): return None
[ "def", "_uptime_plan9", "(", ")", ":", "# Apparently Plan 9 only has Python 2.2, which I'm not prepared to", "# support. Maybe some Linuxes implement /dev/time, though, someone was", "# talking about it somewhere.", "try", ":", "# The time file holds one 32-bit number representing the sec-", "# onds since start of epoch and three 64-bit numbers, repre-", "# senting nanoseconds since start of epoch, clock ticks, and", "# clock frequency.", "# -- cons(3)", "f", "=", "open", "(", "'/dev/time'", ",", "'r'", ")", "s", ",", "ns", ",", "ct", ",", "cf", "=", "f", ".", "read", "(", ")", ".", "split", "(", ")", "f", ".", "close", "(", ")", "return", "float", "(", "ct", ")", "/", "float", "(", "cf", ")", "except", "(", "IOError", ",", "ValueError", ")", ":", "return", "None" ]
Returns uptime in seconds or None, on Plan 9.
[ "Returns", "uptime", "in", "seconds", "or", "None", "on", "Plan", "9", "." ]
python
valid
quantopian/pyfolio
pyfolio/bayesian.py
https://github.com/quantopian/pyfolio/blob/712716ab0cdebbec9fabb25eea3bf40e4354749d/pyfolio/bayesian.py#L426-L453
def compute_bayes_cone(preds, starting_value=1.): """ Compute 5, 25, 75 and 95 percentiles of cumulative returns, used for the Bayesian cone. Parameters ---------- preds : numpy.array Multiple (simulated) cumulative returns. starting_value : int (optional) Have cumulative returns start around this value. Default = 1. Returns ------- dict of percentiles over time Dictionary mapping percentiles (5, 25, 75, 95) to a timeseries. """ def scoreatpercentile(cum_preds, p): return [stats.scoreatpercentile( c, p) for c in cum_preds.T] cum_preds = np.cumprod(preds + 1, 1) * starting_value perc = {p: scoreatpercentile(cum_preds, p) for p in (5, 25, 75, 95)} return perc
[ "def", "compute_bayes_cone", "(", "preds", ",", "starting_value", "=", "1.", ")", ":", "def", "scoreatpercentile", "(", "cum_preds", ",", "p", ")", ":", "return", "[", "stats", ".", "scoreatpercentile", "(", "c", ",", "p", ")", "for", "c", "in", "cum_preds", ".", "T", "]", "cum_preds", "=", "np", ".", "cumprod", "(", "preds", "+", "1", ",", "1", ")", "*", "starting_value", "perc", "=", "{", "p", ":", "scoreatpercentile", "(", "cum_preds", ",", "p", ")", "for", "p", "in", "(", "5", ",", "25", ",", "75", ",", "95", ")", "}", "return", "perc" ]
Compute 5, 25, 75 and 95 percentiles of cumulative returns, used for the Bayesian cone. Parameters ---------- preds : numpy.array Multiple (simulated) cumulative returns. starting_value : int (optional) Have cumulative returns start around this value. Default = 1. Returns ------- dict of percentiles over time Dictionary mapping percentiles (5, 25, 75, 95) to a timeseries.
[ "Compute", "5", "25", "75", "and", "95", "percentiles", "of", "cumulative", "returns", "used", "for", "the", "Bayesian", "cone", "." ]
python
valid
ikegami-yukino/jctconv
jctconv/jctconv.py
https://github.com/ikegami-yukino/jctconv/blob/cc9bcb25b69c681b01b5c85cacdfb56c1a285407/jctconv/jctconv.py#L146-L161
def normalize(text, mode='NFKC', ignore=''): u"""Convert Half-width (Hankaku) Katakana to Full-width (Zenkaku) Katakana, Full-width (Zenkaku) ASCII and DIGIT to Half-width (Hankaku) ASCII and DIGIT. Additionally, Full-width wave dash (〜) etc. are normalized Params: <unicode> text <unicode> ignore Return: <unicode> converted_text """ text = text.replace(u'〜', u'ー').replace(u'~', u'ー') text = text.replace(u"’", "'").replace(u'”', '"').replace(u'“', '``') text = text.replace(u'―', '-').replace(u'‐', u'-') return unicodedata.normalize(mode, text)
[ "def", "normalize", "(", "text", ",", "mode", "=", "'NFKC'", ",", "ignore", "=", "''", ")", ":", "text", "=", "text", ".", "replace", "(", "u'〜', ", "u", "ー').re", "p", "l", "ace(u'~", "'", ", u'ー'", ")", "", "", "text", "=", "text", ".", "replace", "(", "u\"’\", ", "\"", "\").", "r", "e", "place(u", "'", "”', '\"", "'", ".re", "p", "l", "ace(u'“", "'", ", '``'", ")", "", "", "text", "=", "text", ".", "replace", "(", "u'―', ", "'", "').", "r", "e", "place(u", "'", "‐', u'", "-", ")", "", "return", "unicodedata", ".", "normalize", "(", "mode", ",", "text", ")" ]
u"""Convert Half-width (Hankaku) Katakana to Full-width (Zenkaku) Katakana, Full-width (Zenkaku) ASCII and DIGIT to Half-width (Hankaku) ASCII and DIGIT. Additionally, Full-width wave dash (〜) etc. are normalized Params: <unicode> text <unicode> ignore Return: <unicode> converted_text
[ "u", "Convert", "Half", "-", "width", "(", "Hankaku", ")", "Katakana", "to", "Full", "-", "width", "(", "Zenkaku", ")", "Katakana", "Full", "-", "width", "(", "Zenkaku", ")", "ASCII", "and", "DIGIT", "to", "Half", "-", "width", "(", "Hankaku", ")", "ASCII", "and", "DIGIT", ".", "Additionally", "Full", "-", "width", "wave", "dash", "(", "〜", ")", "etc", ".", "are", "normalized" ]
python
train
PredixDev/predixpy
predix/security/acs.py
https://github.com/PredixDev/predixpy/blob/a0cb34cf40f716229351bb6d90d6ecace958c81f/predix/security/acs.py#L305-L311
def _put_policy_set(self, policy_set_id, body): """ Will create or update a policy set for the given path. """ assert isinstance(body, (dict)), "PUT requires body to be a dict." uri = self._get_policy_set_uri(guid=policy_set_id) return self.service._put(uri, body)
[ "def", "_put_policy_set", "(", "self", ",", "policy_set_id", ",", "body", ")", ":", "assert", "isinstance", "(", "body", ",", "(", "dict", ")", ")", ",", "\"PUT requires body to be a dict.\"", "uri", "=", "self", ".", "_get_policy_set_uri", "(", "guid", "=", "policy_set_id", ")", "return", "self", ".", "service", ".", "_put", "(", "uri", ",", "body", ")" ]
Will create or update a policy set for the given path.
[ "Will", "create", "or", "update", "a", "policy", "set", "for", "the", "given", "path", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/utils/quantization.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/quantization.py#L89-L134
def simulated_quantize(x, num_bits, noise): """Simulate quantization to num_bits bits, with externally-stored scale. num_bits is the number of bits used to store each value. noise is a float32 Tensor containing values in [0, 1). Each value in noise should take different values across different steps, approximating a uniform distribution over [0, 1). In the case of replicated TPU training, noise should be identical across replicas in order to keep the parameters identical across replicas. The natural choice for noise would be tf.random_uniform(), but this is not possible for TPU, since there is currently no way to seed the different cores to produce identical values across replicas. Instead we use noise_from_step_num() (see below). The quantization scheme is as follows: Compute the maximum absolute value by row (call this max_abs). Store this either in an auxiliary variable or in an extra column. Divide the parameters by (max_abs / (2^(num_bits-1)-1)). This gives a float32 value in the range [-2^(num_bits-1)-1, 2^(num_bits-1)-1] Unbiased randomized roundoff by adding noise and rounding down. This produces a signed integer with num_bits bits which can then be stored. Args: x: a float32 Tensor num_bits: an integer between 1 and 22 noise: a float Tensor broadcastable to the shape of x. Returns: a float32 Tensor """ shape = x.get_shape().as_list() if not (len(shape) >= 2 and shape[-1] > 1): return x max_abs = tf.reduce_max(tf.abs(x), -1, keepdims=True) + 1e-9 max_int = 2 ** (num_bits - 1) - 1 scale = max_abs / max_int x /= scale x = tf.floor(x + noise) # dequantize before storing (since this is a simulation) x *= scale return x
[ "def", "simulated_quantize", "(", "x", ",", "num_bits", ",", "noise", ")", ":", "shape", "=", "x", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", "if", "not", "(", "len", "(", "shape", ")", ">=", "2", "and", "shape", "[", "-", "1", "]", ">", "1", ")", ":", "return", "x", "max_abs", "=", "tf", ".", "reduce_max", "(", "tf", ".", "abs", "(", "x", ")", ",", "-", "1", ",", "keepdims", "=", "True", ")", "+", "1e-9", "max_int", "=", "2", "**", "(", "num_bits", "-", "1", ")", "-", "1", "scale", "=", "max_abs", "/", "max_int", "x", "/=", "scale", "x", "=", "tf", ".", "floor", "(", "x", "+", "noise", ")", "# dequantize before storing (since this is a simulation)", "x", "*=", "scale", "return", "x" ]
Simulate quantization to num_bits bits, with externally-stored scale. num_bits is the number of bits used to store each value. noise is a float32 Tensor containing values in [0, 1). Each value in noise should take different values across different steps, approximating a uniform distribution over [0, 1). In the case of replicated TPU training, noise should be identical across replicas in order to keep the parameters identical across replicas. The natural choice for noise would be tf.random_uniform(), but this is not possible for TPU, since there is currently no way to seed the different cores to produce identical values across replicas. Instead we use noise_from_step_num() (see below). The quantization scheme is as follows: Compute the maximum absolute value by row (call this max_abs). Store this either in an auxiliary variable or in an extra column. Divide the parameters by (max_abs / (2^(num_bits-1)-1)). This gives a float32 value in the range [-2^(num_bits-1)-1, 2^(num_bits-1)-1] Unbiased randomized roundoff by adding noise and rounding down. This produces a signed integer with num_bits bits which can then be stored. Args: x: a float32 Tensor num_bits: an integer between 1 and 22 noise: a float Tensor broadcastable to the shape of x. Returns: a float32 Tensor
[ "Simulate", "quantization", "to", "num_bits", "bits", "with", "externally", "-", "stored", "scale", "." ]
python
train
aestrivex/bctpy
bct/algorithms/distance.py
https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/distance.py#L326-L421
def distance_wei_floyd(adjacency, transform=None): """ Computes the topological length of the shortest possible path connecting every pair of nodes in the network. Parameters ---------- D : (N x N) array_like Weighted/unweighted, direct/undirected connection weight/length array transform : str, optional If `adjacency` is a connection weight array, specify a transform to map input connection weights to connection lengths. Options include ['log', 'inv'], where 'log' is `-np.log(adjacency)` and 'inv' is `1/adjacency`. Default: None Returns ------- SPL : (N x N) ndarray Weighted/unweighted shortest path-length array. If `D` is a directed graph, then `SPL` is not symmetric hops : (N x N) ndarray Number of edges in the shortest path array. If `D` is unweighted, `SPL` and `hops` are identical. Pmat : (N x N) ndarray Element `[i,j]` of this array indicates the next node in the shortest path between `i` and `j`. This array is used as an input argument for function `retrieve_shortest_path()`, which returns as output the sequence of nodes comprising the shortest path between a given pair of nodes. Notes ----- There may be more than one shortest path between any pair of nodes in the network. Non-unique shortest paths are termed shortest path degeneracies and are most likely to occur in unweighted networks. When the shortest-path is degenerate, the elements of `Pmat` correspond to the first shortest path discovered by the algorithm. The input array may be either a connection weight or length array. The connection length array is typically obtained with a mapping from weight to length, such that higher weights are mapped to shorter lengths (see argument `transform`, above). Originally written in Matlab by Andrea Avena-Koenigsberger (IU, 2012) References ---------- .. [1] Floyd, R. W. (1962). Algorithm 97: shortest path. Communications of the ACM, 5(6), 345. .. [2] Roy, B. (1959). Transitivite et connexite. Comptes Rendus Hebdomadaires Des Seances De L Academie Des Sciences, 249(2), 216-218. .. [3] Warshall, S. (1962). A theorem on boolean matrices. Journal of the ACM (JACM), 9(1), 11-12. .. [4] https://en.wikipedia.org/wiki/Floyd%E2%80%93Warshall_algorithm """ if transform is not None: if transform == 'log': if np.logical_or(adjacency > 1, adjacency < 0).any(): raise ValueError("Connection strengths must be in the " + "interval [0,1) to use the transform " + "-log(w_ij).") SPL = -np.log(adjacency) elif transform == 'inv': SPL = 1. / adjacency else: raise ValueError("Unexpected transform type. Only 'log' and " + "'inv' are accepted") else: SPL = adjacency.copy().astype('float') SPL[SPL == 0] = np.inf n = adjacency.shape[1] flag_find_paths = True hops = np.array(adjacency != 0).astype('float') Pmat = np.repeat(np.atleast_2d(np.arange(0, n)), n, 0) for k in range(n): i2k_k2j = np.repeat(SPL[:, [k]], n, 1) + np.repeat(SPL[[k], :], n, 0) if flag_find_paths: path = SPL > i2k_k2j i, j = np.where(path) hops[path] = hops[i, k] + hops[k, j] Pmat[path] = Pmat[i, k] SPL = np.min(np.stack([SPL, i2k_k2j], 2), 2) I = np.eye(n) > 0 SPL[I] = 0 if flag_find_paths: hops[I], Pmat[I] = 0, 0 return SPL, hops, Pmat
[ "def", "distance_wei_floyd", "(", "adjacency", ",", "transform", "=", "None", ")", ":", "if", "transform", "is", "not", "None", ":", "if", "transform", "==", "'log'", ":", "if", "np", ".", "logical_or", "(", "adjacency", ">", "1", ",", "adjacency", "<", "0", ")", ".", "any", "(", ")", ":", "raise", "ValueError", "(", "\"Connection strengths must be in the \"", "+", "\"interval [0,1) to use the transform \"", "+", "\"-log(w_ij).\"", ")", "SPL", "=", "-", "np", ".", "log", "(", "adjacency", ")", "elif", "transform", "==", "'inv'", ":", "SPL", "=", "1.", "/", "adjacency", "else", ":", "raise", "ValueError", "(", "\"Unexpected transform type. Only 'log' and \"", "+", "\"'inv' are accepted\"", ")", "else", ":", "SPL", "=", "adjacency", ".", "copy", "(", ")", ".", "astype", "(", "'float'", ")", "SPL", "[", "SPL", "==", "0", "]", "=", "np", ".", "inf", "n", "=", "adjacency", ".", "shape", "[", "1", "]", "flag_find_paths", "=", "True", "hops", "=", "np", ".", "array", "(", "adjacency", "!=", "0", ")", ".", "astype", "(", "'float'", ")", "Pmat", "=", "np", ".", "repeat", "(", "np", ".", "atleast_2d", "(", "np", ".", "arange", "(", "0", ",", "n", ")", ")", ",", "n", ",", "0", ")", "for", "k", "in", "range", "(", "n", ")", ":", "i2k_k2j", "=", "np", ".", "repeat", "(", "SPL", "[", ":", ",", "[", "k", "]", "]", ",", "n", ",", "1", ")", "+", "np", ".", "repeat", "(", "SPL", "[", "[", "k", "]", ",", ":", "]", ",", "n", ",", "0", ")", "if", "flag_find_paths", ":", "path", "=", "SPL", ">", "i2k_k2j", "i", ",", "j", "=", "np", ".", "where", "(", "path", ")", "hops", "[", "path", "]", "=", "hops", "[", "i", ",", "k", "]", "+", "hops", "[", "k", ",", "j", "]", "Pmat", "[", "path", "]", "=", "Pmat", "[", "i", ",", "k", "]", "SPL", "=", "np", ".", "min", "(", "np", ".", "stack", "(", "[", "SPL", ",", "i2k_k2j", "]", ",", "2", ")", ",", "2", ")", "I", "=", "np", ".", "eye", "(", "n", ")", ">", "0", "SPL", "[", "I", "]", "=", "0", "if", "flag_find_paths", ":", "hops", "[", "I", "]", ",", "Pmat", "[", "I", "]", "=", "0", ",", "0", "return", "SPL", ",", "hops", ",", "Pmat" ]
Computes the topological length of the shortest possible path connecting every pair of nodes in the network. Parameters ---------- D : (N x N) array_like Weighted/unweighted, direct/undirected connection weight/length array transform : str, optional If `adjacency` is a connection weight array, specify a transform to map input connection weights to connection lengths. Options include ['log', 'inv'], where 'log' is `-np.log(adjacency)` and 'inv' is `1/adjacency`. Default: None Returns ------- SPL : (N x N) ndarray Weighted/unweighted shortest path-length array. If `D` is a directed graph, then `SPL` is not symmetric hops : (N x N) ndarray Number of edges in the shortest path array. If `D` is unweighted, `SPL` and `hops` are identical. Pmat : (N x N) ndarray Element `[i,j]` of this array indicates the next node in the shortest path between `i` and `j`. This array is used as an input argument for function `retrieve_shortest_path()`, which returns as output the sequence of nodes comprising the shortest path between a given pair of nodes. Notes ----- There may be more than one shortest path between any pair of nodes in the network. Non-unique shortest paths are termed shortest path degeneracies and are most likely to occur in unweighted networks. When the shortest-path is degenerate, the elements of `Pmat` correspond to the first shortest path discovered by the algorithm. The input array may be either a connection weight or length array. The connection length array is typically obtained with a mapping from weight to length, such that higher weights are mapped to shorter lengths (see argument `transform`, above). Originally written in Matlab by Andrea Avena-Koenigsberger (IU, 2012) References ---------- .. [1] Floyd, R. W. (1962). Algorithm 97: shortest path. Communications of the ACM, 5(6), 345. .. [2] Roy, B. (1959). Transitivite et connexite. Comptes Rendus Hebdomadaires Des Seances De L Academie Des Sciences, 249(2), 216-218. .. [3] Warshall, S. (1962). A theorem on boolean matrices. Journal of the ACM (JACM), 9(1), 11-12. .. [4] https://en.wikipedia.org/wiki/Floyd%E2%80%93Warshall_algorithm
[ "Computes", "the", "topological", "length", "of", "the", "shortest", "possible", "path", "connecting", "every", "pair", "of", "nodes", "in", "the", "network", "." ]
python
train
bspaans/python-mingus
mingus/extra/musicxml.py
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/extra/musicxml.py#L303-L324
def write_Composition(composition, filename, zip=False): """Create an XML file (or MXL if compressed) for a given composition.""" text = from_Composition(composition) if not zip: f = open(filename + '.xml', 'w') f.write(text) f.close() else: import zipfile import os zf = zipfile.ZipFile(filename + '.mxl', mode='w', compression=zipfile.ZIP_DEFLATED) zi = zipfile.ZipInfo('META-INF' + os.sep + 'container.xml') zi.external_attr = 0660 << 16L zf.writestr(zi, "<?xml version='1.0' encoding='UTF-8'?>" "<container><rootfiles><rootfile full-path='{0}.xml'/>" "</rootfiles></container>".format(filename)) zi = zipfile.ZipInfo(filename + '.xml') zi.external_attr = 0660 << 16L zf.writestr(zi, text) zf.close()
[ "def", "write_Composition", "(", "composition", ",", "filename", ",", "zip", "=", "False", ")", ":", "text", "=", "from_Composition", "(", "composition", ")", "if", "not", "zip", ":", "f", "=", "open", "(", "filename", "+", "'.xml'", ",", "'w'", ")", "f", ".", "write", "(", "text", ")", "f", ".", "close", "(", ")", "else", ":", "import", "zipfile", "import", "os", "zf", "=", "zipfile", ".", "ZipFile", "(", "filename", "+", "'.mxl'", ",", "mode", "=", "'w'", ",", "compression", "=", "zipfile", ".", "ZIP_DEFLATED", ")", "zi", "=", "zipfile", ".", "ZipInfo", "(", "'META-INF'", "+", "os", ".", "sep", "+", "'container.xml'", ")", "zi", ".", "external_attr", "=", "0660", "<<", "16L", "zf", ".", "writestr", "(", "zi", ",", "\"<?xml version='1.0' encoding='UTF-8'?>\"", "\"<container><rootfiles><rootfile full-path='{0}.xml'/>\"", "\"</rootfiles></container>\"", ".", "format", "(", "filename", ")", ")", "zi", "=", "zipfile", ".", "ZipInfo", "(", "filename", "+", "'.xml'", ")", "zi", ".", "external_attr", "=", "0660", "<<", "16L", "zf", ".", "writestr", "(", "zi", ",", "text", ")", "zf", ".", "close", "(", ")" ]
Create an XML file (or MXL if compressed) for a given composition.
[ "Create", "an", "XML", "file", "(", "or", "MXL", "if", "compressed", ")", "for", "a", "given", "composition", "." ]
python
train
foremast/foremast
src/foremast/awslambda/cloudwatch_log_event/destroy_cloudwatch_log_event/destroy_cloudwatch_log_event.py
https://github.com/foremast/foremast/blob/fb70f29b8ce532f061685a17d120486e47b215ba/src/foremast/awslambda/cloudwatch_log_event/destroy_cloudwatch_log_event/destroy_cloudwatch_log_event.py#L24-L42
def destroy_cloudwatch_log_event(app='', env='dev', region=''): """Destroy Cloudwatch log event. Args: app (str): Spinnaker Application name. env (str): Deployment environment. region (str): AWS region. Returns: bool: True upon successful completion. """ session = boto3.Session(profile_name=env, region_name=region) cloudwatch_client = session.client('logs') # FIXME: see below # TODO: Log group name is required, where do we get it if it is not in application-master-env.json? cloudwatch_client.delete_subscription_filter(logGroupName='/aws/lambda/awslimitchecker', filterName=app) return True
[ "def", "destroy_cloudwatch_log_event", "(", "app", "=", "''", ",", "env", "=", "'dev'", ",", "region", "=", "''", ")", ":", "session", "=", "boto3", ".", "Session", "(", "profile_name", "=", "env", ",", "region_name", "=", "region", ")", "cloudwatch_client", "=", "session", ".", "client", "(", "'logs'", ")", "# FIXME: see below", "# TODO: Log group name is required, where do we get it if it is not in application-master-env.json?", "cloudwatch_client", ".", "delete_subscription_filter", "(", "logGroupName", "=", "'/aws/lambda/awslimitchecker'", ",", "filterName", "=", "app", ")", "return", "True" ]
Destroy Cloudwatch log event. Args: app (str): Spinnaker Application name. env (str): Deployment environment. region (str): AWS region. Returns: bool: True upon successful completion.
[ "Destroy", "Cloudwatch", "log", "event", "." ]
python
train
commx/python-rrdtool
setup.py
https://github.com/commx/python-rrdtool/blob/74b7dee35c17a2558da475369699ef63408b7b6c/setup.py#L42-L119
def compile_extensions(macros, compat=False): """ Compiler subroutine to test whether some functions are available on the target system. Since the rrdtool headers shipped with most packages do not disclose any versioning information, we cannot test whether a given function is available that way. Instead, use this to manually try to compile code and see if it works. Taken from http://stackoverflow.com/questions/28843765/setup-py-check-if-non-python-library-dependency-exists. """ import distutils.sysconfig import distutils.ccompiler import tempfile import shutil from textwrap import dedent # common vars libraries = ['rrd'] include_dirs = [package_dir, '/usr/local/include'] library_dirs = ['/usr/local/lib'] compiler_args = dict( libraries=libraries, include_dirs=include_dirs, library_dirs=library_dirs, define_macros=macros) exts = [Extension('rrdtool', sources=['rrdtoolmodule.c'], **compiler_args)] if compat: return exts # in non-compat mode, try to link to check if the new symbols are present in librrd c_code = dedent(''' #include <rrd.h> #include "rrdtoolmodule.h" int main(int argc, char *argv[]) { rrd_fetch_cb_register(NULL); /* exists in rrdtool >= 1.5.0 */ return 0; } ''') tmp_dir = tempfile.mkdtemp(prefix='tmp_python_rrdtool') bin_file_name = os.path.join(tmp_dir, 'test_rrdtool') file_name = bin_file_name + '.c' with open(file_name, 'w') as fp: fp.write(c_code) # try to compile it compiler = distutils.ccompiler.new_compiler() assert isinstance(compiler, distutils.ccompiler.CCompiler) for s in include_dirs: compiler.add_include_dir(s) for s in library_dirs: compiler.add_library_dir(s) for s in libraries: compiler.add_library(s) for s in macros: compiler.define_macro(*s) distutils.sysconfig.customize_compiler(compiler) try: compiler.link_executable( compiler.compile([file_name]), bin_file_name, libraries=libraries) except CompileError: sys.exit('Error: Unable to compile the binary module. Do you have the rrdtool header and libraries installed?') ret = None except LinkError as exc: shutil.rmtree(tmp_dir) raise # re-raise else: return exts # seems to be available, compile in regular way shutil.rmtree(tmp_dir) return ret
[ "def", "compile_extensions", "(", "macros", ",", "compat", "=", "False", ")", ":", "import", "distutils", ".", "sysconfig", "import", "distutils", ".", "ccompiler", "import", "tempfile", "import", "shutil", "from", "textwrap", "import", "dedent", "# common vars", "libraries", "=", "[", "'rrd'", "]", "include_dirs", "=", "[", "package_dir", ",", "'/usr/local/include'", "]", "library_dirs", "=", "[", "'/usr/local/lib'", "]", "compiler_args", "=", "dict", "(", "libraries", "=", "libraries", ",", "include_dirs", "=", "include_dirs", ",", "library_dirs", "=", "library_dirs", ",", "define_macros", "=", "macros", ")", "exts", "=", "[", "Extension", "(", "'rrdtool'", ",", "sources", "=", "[", "'rrdtoolmodule.c'", "]", ",", "*", "*", "compiler_args", ")", "]", "if", "compat", ":", "return", "exts", "# in non-compat mode, try to link to check if the new symbols are present in librrd", "c_code", "=", "dedent", "(", "'''\n #include <rrd.h>\n #include \"rrdtoolmodule.h\"\n\n int main(int argc, char *argv[]) {\n rrd_fetch_cb_register(NULL); /* exists in rrdtool >= 1.5.0 */\n return 0;\n }\n '''", ")", "tmp_dir", "=", "tempfile", ".", "mkdtemp", "(", "prefix", "=", "'tmp_python_rrdtool'", ")", "bin_file_name", "=", "os", ".", "path", ".", "join", "(", "tmp_dir", ",", "'test_rrdtool'", ")", "file_name", "=", "bin_file_name", "+", "'.c'", "with", "open", "(", "file_name", ",", "'w'", ")", "as", "fp", ":", "fp", ".", "write", "(", "c_code", ")", "# try to compile it", "compiler", "=", "distutils", ".", "ccompiler", ".", "new_compiler", "(", ")", "assert", "isinstance", "(", "compiler", ",", "distutils", ".", "ccompiler", ".", "CCompiler", ")", "for", "s", "in", "include_dirs", ":", "compiler", ".", "add_include_dir", "(", "s", ")", "for", "s", "in", "library_dirs", ":", "compiler", ".", "add_library_dir", "(", "s", ")", "for", "s", "in", "libraries", ":", "compiler", ".", "add_library", "(", "s", ")", "for", "s", "in", "macros", ":", "compiler", ".", "define_macro", "(", "*", "s", ")", "distutils", ".", "sysconfig", ".", "customize_compiler", "(", "compiler", ")", "try", ":", "compiler", ".", "link_executable", "(", "compiler", ".", "compile", "(", "[", "file_name", "]", ")", ",", "bin_file_name", ",", "libraries", "=", "libraries", ")", "except", "CompileError", ":", "sys", ".", "exit", "(", "'Error: Unable to compile the binary module. Do you have the rrdtool header and libraries installed?'", ")", "ret", "=", "None", "except", "LinkError", "as", "exc", ":", "shutil", ".", "rmtree", "(", "tmp_dir", ")", "raise", "# re-raise", "else", ":", "return", "exts", "# seems to be available, compile in regular way", "shutil", ".", "rmtree", "(", "tmp_dir", ")", "return", "ret" ]
Compiler subroutine to test whether some functions are available on the target system. Since the rrdtool headers shipped with most packages do not disclose any versioning information, we cannot test whether a given function is available that way. Instead, use this to manually try to compile code and see if it works. Taken from http://stackoverflow.com/questions/28843765/setup-py-check-if-non-python-library-dependency-exists.
[ "Compiler", "subroutine", "to", "test", "whether", "some", "functions", "are", "available", "on", "the", "target", "system", ".", "Since", "the", "rrdtool", "headers", "shipped", "with", "most", "packages", "do", "not", "disclose", "any", "versioning", "information", "we", "cannot", "test", "whether", "a", "given", "function", "is", "available", "that", "way", ".", "Instead", "use", "this", "to", "manually", "try", "to", "compile", "code", "and", "see", "if", "it", "works", "." ]
python
train
marcotcr/lime
lime/utils/generic_utils.py
https://github.com/marcotcr/lime/blob/08133d47df00ed918e22005e0c98f6eefd5a1d71/lime/utils/generic_utils.py#L6-L39
def has_arg(fn, arg_name): """Checks if a callable accepts a given keyword argument. Args: fn: callable to inspect arg_name: string, keyword argument name to check Returns: bool, whether `fn` accepts a `arg_name` keyword argument. """ if sys.version_info < (3,): if isinstance(fn, types.FunctionType) or isinstance(fn, types.MethodType): arg_spec = inspect.getargspec(fn) else: try: arg_spec = inspect.getargspec(fn.__call__) except AttributeError: return False return (arg_name in arg_spec.args) elif sys.version_info < (3, 6): arg_spec = inspect.getfullargspec(fn) return (arg_name in arg_spec.args or arg_name in arg_spec.kwonlyargs) else: try: signature = inspect.signature(fn) except ValueError: # handling Cython signature = inspect.signature(fn.__call__) parameter = signature.parameters.get(arg_name) if parameter is None: return False return (parameter.kind in (inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.KEYWORD_ONLY))
[ "def", "has_arg", "(", "fn", ",", "arg_name", ")", ":", "if", "sys", ".", "version_info", "<", "(", "3", ",", ")", ":", "if", "isinstance", "(", "fn", ",", "types", ".", "FunctionType", ")", "or", "isinstance", "(", "fn", ",", "types", ".", "MethodType", ")", ":", "arg_spec", "=", "inspect", ".", "getargspec", "(", "fn", ")", "else", ":", "try", ":", "arg_spec", "=", "inspect", ".", "getargspec", "(", "fn", ".", "__call__", ")", "except", "AttributeError", ":", "return", "False", "return", "(", "arg_name", "in", "arg_spec", ".", "args", ")", "elif", "sys", ".", "version_info", "<", "(", "3", ",", "6", ")", ":", "arg_spec", "=", "inspect", ".", "getfullargspec", "(", "fn", ")", "return", "(", "arg_name", "in", "arg_spec", ".", "args", "or", "arg_name", "in", "arg_spec", ".", "kwonlyargs", ")", "else", ":", "try", ":", "signature", "=", "inspect", ".", "signature", "(", "fn", ")", "except", "ValueError", ":", "# handling Cython", "signature", "=", "inspect", ".", "signature", "(", "fn", ".", "__call__", ")", "parameter", "=", "signature", ".", "parameters", ".", "get", "(", "arg_name", ")", "if", "parameter", "is", "None", ":", "return", "False", "return", "(", "parameter", ".", "kind", "in", "(", "inspect", ".", "Parameter", ".", "POSITIONAL_OR_KEYWORD", ",", "inspect", ".", "Parameter", ".", "KEYWORD_ONLY", ")", ")" ]
Checks if a callable accepts a given keyword argument. Args: fn: callable to inspect arg_name: string, keyword argument name to check Returns: bool, whether `fn` accepts a `arg_name` keyword argument.
[ "Checks", "if", "a", "callable", "accepts", "a", "given", "keyword", "argument", "." ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_ip_policy.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_ip_policy.py#L256-L275
def hide_routemap_holder_route_map_content_match_extcommunity_extcommunity_num(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") hide_routemap_holder = ET.SubElement(config, "hide-routemap-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy") route_map = ET.SubElement(hide_routemap_holder, "route-map") name_key = ET.SubElement(route_map, "name") name_key.text = kwargs.pop('name') action_rm_key = ET.SubElement(route_map, "action-rm") action_rm_key.text = kwargs.pop('action_rm') instance_key = ET.SubElement(route_map, "instance") instance_key.text = kwargs.pop('instance') content = ET.SubElement(route_map, "content") match = ET.SubElement(content, "match") extcommunity = ET.SubElement(match, "extcommunity") extcommunity_num = ET.SubElement(extcommunity, "extcommunity-num") extcommunity_num.text = kwargs.pop('extcommunity_num') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "hide_routemap_holder_route_map_content_match_extcommunity_extcommunity_num", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "hide_routemap_holder", "=", "ET", ".", "SubElement", "(", "config", ",", "\"hide-routemap-holder\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-ip-policy\"", ")", "route_map", "=", "ET", ".", "SubElement", "(", "hide_routemap_holder", ",", "\"route-map\"", ")", "name_key", "=", "ET", ".", "SubElement", "(", "route_map", ",", "\"name\"", ")", "name_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'name'", ")", "action_rm_key", "=", "ET", ".", "SubElement", "(", "route_map", ",", "\"action-rm\"", ")", "action_rm_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'action_rm'", ")", "instance_key", "=", "ET", ".", "SubElement", "(", "route_map", ",", "\"instance\"", ")", "instance_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'instance'", ")", "content", "=", "ET", ".", "SubElement", "(", "route_map", ",", "\"content\"", ")", "match", "=", "ET", ".", "SubElement", "(", "content", ",", "\"match\"", ")", "extcommunity", "=", "ET", ".", "SubElement", "(", "match", ",", "\"extcommunity\"", ")", "extcommunity_num", "=", "ET", ".", "SubElement", "(", "extcommunity", ",", "\"extcommunity-num\"", ")", "extcommunity_num", ".", "text", "=", "kwargs", ".", "pop", "(", "'extcommunity_num'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
stephenmcd/gnotty
gnotty/bots/base.py
https://github.com/stephenmcd/gnotty/blob/bea3762dc9cbc3cb21a5ae7224091cf027273c40/gnotty/bots/base.py#L57-L65
def _dispatcher(self, connection, event): """ This is the method in ``SimpleIRCClient`` that all IRC events get passed through. Here we map events to our own custom event handlers, and call them. """ super(BaseBot, self)._dispatcher(connection, event) for handler in self.events[event.eventtype()]: handler(self, connection, event)
[ "def", "_dispatcher", "(", "self", ",", "connection", ",", "event", ")", ":", "super", "(", "BaseBot", ",", "self", ")", ".", "_dispatcher", "(", "connection", ",", "event", ")", "for", "handler", "in", "self", ".", "events", "[", "event", ".", "eventtype", "(", ")", "]", ":", "handler", "(", "self", ",", "connection", ",", "event", ")" ]
This is the method in ``SimpleIRCClient`` that all IRC events get passed through. Here we map events to our own custom event handlers, and call them.
[ "This", "is", "the", "method", "in", "SimpleIRCClient", "that", "all", "IRC", "events", "get", "passed", "through", ".", "Here", "we", "map", "events", "to", "our", "own", "custom", "event", "handlers", "and", "call", "them", "." ]
python
train
crytic/slither
slither/core/declarations/function.py
https://github.com/crytic/slither/blob/04c147f7e50223c6af458ca430befae747ccd259/slither/core/declarations/function.py#L763-L773
def all_solidity_variables_used_as_args(self): """ Return the Soldiity variables directly used in a call Use of the IR to filter index access Used to catch check(msg.sender) """ if self._all_solidity_variables_used_as_args is None: self._all_solidity_variables_used_as_args = self._explore_functions( lambda x: self._explore_func_nodes(x, self._solidity_variable_in_internal_calls)) return self._all_solidity_variables_used_as_args
[ "def", "all_solidity_variables_used_as_args", "(", "self", ")", ":", "if", "self", ".", "_all_solidity_variables_used_as_args", "is", "None", ":", "self", ".", "_all_solidity_variables_used_as_args", "=", "self", ".", "_explore_functions", "(", "lambda", "x", ":", "self", ".", "_explore_func_nodes", "(", "x", ",", "self", ".", "_solidity_variable_in_internal_calls", ")", ")", "return", "self", ".", "_all_solidity_variables_used_as_args" ]
Return the Soldiity variables directly used in a call Use of the IR to filter index access Used to catch check(msg.sender)
[ "Return", "the", "Soldiity", "variables", "directly", "used", "in", "a", "call" ]
python
train
apache/incubator-heron
heron/tools/admin/src/python/standalone.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/tools/admin/src/python/standalone.py#L554-L571
def wait_for_master_to_start(single_master): ''' Wait for a nomad master to start ''' i = 0 while True: try: r = requests.get("http://%s:4646/v1/status/leader" % single_master) if r.status_code == 200: break except: Log.debug(sys.exc_info()[0]) Log.info("Waiting for cluster to come up... %s" % i) time.sleep(1) if i > 10: Log.error("Failed to start Nomad Cluster!") sys.exit(-1) i = i + 1
[ "def", "wait_for_master_to_start", "(", "single_master", ")", ":", "i", "=", "0", "while", "True", ":", "try", ":", "r", "=", "requests", ".", "get", "(", "\"http://%s:4646/v1/status/leader\"", "%", "single_master", ")", "if", "r", ".", "status_code", "==", "200", ":", "break", "except", ":", "Log", ".", "debug", "(", "sys", ".", "exc_info", "(", ")", "[", "0", "]", ")", "Log", ".", "info", "(", "\"Waiting for cluster to come up... %s\"", "%", "i", ")", "time", ".", "sleep", "(", "1", ")", "if", "i", ">", "10", ":", "Log", ".", "error", "(", "\"Failed to start Nomad Cluster!\"", ")", "sys", ".", "exit", "(", "-", "1", ")", "i", "=", "i", "+", "1" ]
Wait for a nomad master to start
[ "Wait", "for", "a", "nomad", "master", "to", "start" ]
python
valid
RJT1990/pyflux
pyflux/gas/gasllt.py
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/gas/gasllt.py#L311-L356
def _mean_prediction(self, theta, theta_t, Y, scores, h, t_params): """ Creates a h-step ahead mean prediction Parameters ---------- theta : np.array The past local level theta_t : np.array The past local linear trend Y : np.array The past data scores : np.array The past scores h : int How many steps ahead for the prediction t_params : np.array A vector of (transformed) latent variables Returns ---------- Y_exp : np.array Vector of past values and predictions """ Y_exp = Y.copy() theta_exp = theta.copy() theta_t_exp = theta_t.copy() scores_exp = scores.copy() #(TODO: vectorize the inner construction here) for t in range(0,h): new_value1 = theta_t_exp[-1] + theta_exp[-1] + t_params[0]*scores_exp[-1] new_value2 = theta_t_exp[-1] + t_params[1]*scores_exp[-1] if self.model_name2 == "Exponential": Y_exp = np.append(Y_exp, [1.0/self.link(new_value1)]) else: Y_exp = np.append(Y_exp, [self.link(new_value1)]) theta_exp = np.append(theta_exp, [new_value1]) # For indexing consistency theta_t_exp = np.append(theta_t_exp, [new_value2]) scores_exp = np.append(scores_exp, [0]) # expectation of score is zero return Y_exp
[ "def", "_mean_prediction", "(", "self", ",", "theta", ",", "theta_t", ",", "Y", ",", "scores", ",", "h", ",", "t_params", ")", ":", "Y_exp", "=", "Y", ".", "copy", "(", ")", "theta_exp", "=", "theta", ".", "copy", "(", ")", "theta_t_exp", "=", "theta_t", ".", "copy", "(", ")", "scores_exp", "=", "scores", ".", "copy", "(", ")", "#(TODO: vectorize the inner construction here) ", "for", "t", "in", "range", "(", "0", ",", "h", ")", ":", "new_value1", "=", "theta_t_exp", "[", "-", "1", "]", "+", "theta_exp", "[", "-", "1", "]", "+", "t_params", "[", "0", "]", "*", "scores_exp", "[", "-", "1", "]", "new_value2", "=", "theta_t_exp", "[", "-", "1", "]", "+", "t_params", "[", "1", "]", "*", "scores_exp", "[", "-", "1", "]", "if", "self", ".", "model_name2", "==", "\"Exponential\"", ":", "Y_exp", "=", "np", ".", "append", "(", "Y_exp", ",", "[", "1.0", "/", "self", ".", "link", "(", "new_value1", ")", "]", ")", "else", ":", "Y_exp", "=", "np", ".", "append", "(", "Y_exp", ",", "[", "self", ".", "link", "(", "new_value1", ")", "]", ")", "theta_exp", "=", "np", ".", "append", "(", "theta_exp", ",", "[", "new_value1", "]", ")", "# For indexing consistency", "theta_t_exp", "=", "np", ".", "append", "(", "theta_t_exp", ",", "[", "new_value2", "]", ")", "scores_exp", "=", "np", ".", "append", "(", "scores_exp", ",", "[", "0", "]", ")", "# expectation of score is zero", "return", "Y_exp" ]
Creates a h-step ahead mean prediction Parameters ---------- theta : np.array The past local level theta_t : np.array The past local linear trend Y : np.array The past data scores : np.array The past scores h : int How many steps ahead for the prediction t_params : np.array A vector of (transformed) latent variables Returns ---------- Y_exp : np.array Vector of past values and predictions
[ "Creates", "a", "h", "-", "step", "ahead", "mean", "prediction" ]
python
train
annoviko/pyclustering
pyclustering/cluster/fcm.py
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/fcm.py#L279-L287
def __calculate_changes(self, updated_centers): """! @brief Calculate changes between centers. @return (float) Maximum change between centers. """ changes = numpy.sum(numpy.square(self.__centers - updated_centers), axis=1).T return numpy.max(changes)
[ "def", "__calculate_changes", "(", "self", ",", "updated_centers", ")", ":", "changes", "=", "numpy", ".", "sum", "(", "numpy", ".", "square", "(", "self", ".", "__centers", "-", "updated_centers", ")", ",", "axis", "=", "1", ")", ".", "T", "return", "numpy", ".", "max", "(", "changes", ")" ]
! @brief Calculate changes between centers. @return (float) Maximum change between centers.
[ "!" ]
python
valid
autokey/autokey
lib/autokey/iomediator/_iomediator.py
https://github.com/autokey/autokey/blob/35decb72f286ce68cd2a1f09ace8891a520b58d1/lib/autokey/iomediator/_iomediator.py#L124-L161
def send_string(self, string: str): """ Sends the given string for output. """ if not string: return string = string.replace('\n', "<enter>") string = string.replace('\t', "<tab>") _logger.debug("Send via event interface") self.__clearModifiers() modifiers = [] for section in KEY_SPLIT_RE.split(string): if len(section) > 0: if Key.is_key(section[:-1]) and section[-1] == '+' and section[:-1] in MODIFIERS: # Section is a modifier application (modifier followed by '+') modifiers.append(section[:-1]) else: if len(modifiers) > 0: # Modifiers ready for application - send modified key if Key.is_key(section): self.interface.send_modified_key(section, modifiers) modifiers = [] else: self.interface.send_modified_key(section[0], modifiers) if len(section) > 1: self.interface.send_string(section[1:]) modifiers = [] else: # Normal string/key operation if Key.is_key(section): self.interface.send_key(section) else: self.interface.send_string(section) self.__reapplyModifiers()
[ "def", "send_string", "(", "self", ",", "string", ":", "str", ")", ":", "if", "not", "string", ":", "return", "string", "=", "string", ".", "replace", "(", "'\\n'", ",", "\"<enter>\"", ")", "string", "=", "string", ".", "replace", "(", "'\\t'", ",", "\"<tab>\"", ")", "_logger", ".", "debug", "(", "\"Send via event interface\"", ")", "self", ".", "__clearModifiers", "(", ")", "modifiers", "=", "[", "]", "for", "section", "in", "KEY_SPLIT_RE", ".", "split", "(", "string", ")", ":", "if", "len", "(", "section", ")", ">", "0", ":", "if", "Key", ".", "is_key", "(", "section", "[", ":", "-", "1", "]", ")", "and", "section", "[", "-", "1", "]", "==", "'+'", "and", "section", "[", ":", "-", "1", "]", "in", "MODIFIERS", ":", "# Section is a modifier application (modifier followed by '+')", "modifiers", ".", "append", "(", "section", "[", ":", "-", "1", "]", ")", "else", ":", "if", "len", "(", "modifiers", ")", ">", "0", ":", "# Modifiers ready for application - send modified key", "if", "Key", ".", "is_key", "(", "section", ")", ":", "self", ".", "interface", ".", "send_modified_key", "(", "section", ",", "modifiers", ")", "modifiers", "=", "[", "]", "else", ":", "self", ".", "interface", ".", "send_modified_key", "(", "section", "[", "0", "]", ",", "modifiers", ")", "if", "len", "(", "section", ")", ">", "1", ":", "self", ".", "interface", ".", "send_string", "(", "section", "[", "1", ":", "]", ")", "modifiers", "=", "[", "]", "else", ":", "# Normal string/key operation", "if", "Key", ".", "is_key", "(", "section", ")", ":", "self", ".", "interface", ".", "send_key", "(", "section", ")", "else", ":", "self", ".", "interface", ".", "send_string", "(", "section", ")", "self", ".", "__reapplyModifiers", "(", ")" ]
Sends the given string for output.
[ "Sends", "the", "given", "string", "for", "output", "." ]
python
train
mushkevych/scheduler
synergy/system/time_trigger_factory.py
https://github.com/mushkevych/scheduler/blob/6740331360f49083c208085fb5a60ce80ebf418b/synergy/system/time_trigger_factory.py#L36-L49
def format_time_trigger_string(timer_instance): """ :param timer_instance: either instance of RepeatTimer or EventClock :return: human-readable and editable string in one of two formats: - 'at Day_of_Week-HH:MM, ..., Day_of_Week-HH:MM' - 'every NNN' """ if isinstance(timer_instance, RepeatTimer): return TRIGGER_PREAMBLE_EVERY + str(timer_instance.interval_new) elif isinstance(timer_instance, EventClock): timestamps = [repr(x) for x in timer_instance.timestamps] return TRIGGER_PREAMBLE_AT + ','.join(timestamps) else: raise ValueError('Unknown timer instance type {0}'.format(timer_instance.__class__.__name__))
[ "def", "format_time_trigger_string", "(", "timer_instance", ")", ":", "if", "isinstance", "(", "timer_instance", ",", "RepeatTimer", ")", ":", "return", "TRIGGER_PREAMBLE_EVERY", "+", "str", "(", "timer_instance", ".", "interval_new", ")", "elif", "isinstance", "(", "timer_instance", ",", "EventClock", ")", ":", "timestamps", "=", "[", "repr", "(", "x", ")", "for", "x", "in", "timer_instance", ".", "timestamps", "]", "return", "TRIGGER_PREAMBLE_AT", "+", "','", ".", "join", "(", "timestamps", ")", "else", ":", "raise", "ValueError", "(", "'Unknown timer instance type {0}'", ".", "format", "(", "timer_instance", ".", "__class__", ".", "__name__", ")", ")" ]
:param timer_instance: either instance of RepeatTimer or EventClock :return: human-readable and editable string in one of two formats: - 'at Day_of_Week-HH:MM, ..., Day_of_Week-HH:MM' - 'every NNN'
[ ":", "param", "timer_instance", ":", "either", "instance", "of", "RepeatTimer", "or", "EventClock", ":", "return", ":", "human", "-", "readable", "and", "editable", "string", "in", "one", "of", "two", "formats", ":", "-", "at", "Day_of_Week", "-", "HH", ":", "MM", "...", "Day_of_Week", "-", "HH", ":", "MM", "-", "every", "NNN" ]
python
train
phaethon/kamene
kamene/contrib/gsm_um.py
https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/contrib/gsm_um.py#L1781-L1790
def facilityMsToNet(SsVersionIndicator_presence=0): """FACILITY Section 9.3.9.2""" a = TpPd(pd=0x3) b = MessageType(mesType=0x3a) # 00111010 c = Facility() packet = a / b / c if SsVersionIndicator_presence is 1: d = SsVersionIndicatorHdr(ieiSVI=0x7F, eightBitSVI=0x0) packet = packet / d return packet
[ "def", "facilityMsToNet", "(", "SsVersionIndicator_presence", "=", "0", ")", ":", "a", "=", "TpPd", "(", "pd", "=", "0x3", ")", "b", "=", "MessageType", "(", "mesType", "=", "0x3a", ")", "# 00111010", "c", "=", "Facility", "(", ")", "packet", "=", "a", "/", "b", "/", "c", "if", "SsVersionIndicator_presence", "is", "1", ":", "d", "=", "SsVersionIndicatorHdr", "(", "ieiSVI", "=", "0x7F", ",", "eightBitSVI", "=", "0x0", ")", "packet", "=", "packet", "/", "d", "return", "packet" ]
FACILITY Section 9.3.9.2
[ "FACILITY", "Section", "9", ".", "3", ".", "9", ".", "2" ]
python
train
dacut/meterer
meterer/client.py
https://github.com/dacut/meterer/blob/441e7f021e3302597f56876948a40fe8799a3375/meterer/client.py#L151-L175
def set_limits_for_pool(self, pool, **kw): """ meterer.set_limits_for_pool(pool, [time_period=value]) Sets the limits for the given pool. The valid time_periods are: year, month, week, day, hour. """ pool_limits = {} for time_period in ["year", "month", "week", "day", "hour"]: if time_period not in kw: continue limit = kw.pop(time_period) if limit is not None and not isinstance(limit, (float, int)): raise TypeError("%s must be a float or int or None", time_period) pool_limits[time_period] = limit if kw: raise ValueError("Unknown time periods specified: %s" % ", ".join(kw.keys())) self.cache.set("LIMIT:%s" % pool, json_dumps(pool_limits)) return
[ "def", "set_limits_for_pool", "(", "self", ",", "pool", ",", "*", "*", "kw", ")", ":", "pool_limits", "=", "{", "}", "for", "time_period", "in", "[", "\"year\"", ",", "\"month\"", ",", "\"week\"", ",", "\"day\"", ",", "\"hour\"", "]", ":", "if", "time_period", "not", "in", "kw", ":", "continue", "limit", "=", "kw", ".", "pop", "(", "time_period", ")", "if", "limit", "is", "not", "None", "and", "not", "isinstance", "(", "limit", ",", "(", "float", ",", "int", ")", ")", ":", "raise", "TypeError", "(", "\"%s must be a float or int or None\"", ",", "time_period", ")", "pool_limits", "[", "time_period", "]", "=", "limit", "if", "kw", ":", "raise", "ValueError", "(", "\"Unknown time periods specified: %s\"", "%", "\", \"", ".", "join", "(", "kw", ".", "keys", "(", ")", ")", ")", "self", ".", "cache", ".", "set", "(", "\"LIMIT:%s\"", "%", "pool", ",", "json_dumps", "(", "pool_limits", ")", ")", "return" ]
meterer.set_limits_for_pool(pool, [time_period=value]) Sets the limits for the given pool. The valid time_periods are: year, month, week, day, hour.
[ "meterer", ".", "set_limits_for_pool", "(", "pool", "[", "time_period", "=", "value", "]", ")" ]
python
train
RI-imaging/qpsphere
qpsphere/models/_bhfield/wrap.py
https://github.com/RI-imaging/qpsphere/blob/3cfa0e9fb8e81be8c820abbeccd47242e7972ac1/qpsphere/models/_bhfield/wrap.py#L306-L316
def check_simulation(wdir): """ Check bhdebug.txt to make sure that you specify enough digits to overcome roundoff errors. """ wdir = pathlib.Path(wdir) field = wdir / "V_0Ereim.dat" if not (field.exists() and field.stat().st_size > 130): msg = "Output {} does not exist or is too small!".format(field) raise BHFIELDExecutionError(msg)
[ "def", "check_simulation", "(", "wdir", ")", ":", "wdir", "=", "pathlib", ".", "Path", "(", "wdir", ")", "field", "=", "wdir", "/", "\"V_0Ereim.dat\"", "if", "not", "(", "field", ".", "exists", "(", ")", "and", "field", ".", "stat", "(", ")", ".", "st_size", ">", "130", ")", ":", "msg", "=", "\"Output {} does not exist or is too small!\"", ".", "format", "(", "field", ")", "raise", "BHFIELDExecutionError", "(", "msg", ")" ]
Check bhdebug.txt to make sure that you specify enough digits to overcome roundoff errors.
[ "Check", "bhdebug", ".", "txt", "to", "make", "sure", "that", "you", "specify", "enough", "digits", "to", "overcome", "roundoff", "errors", "." ]
python
train
roman-neuhauser/py-impala
impala/__init__.py
https://github.com/roman-neuhauser/py-impala/blob/8a22def2744460d20c620beb24c00332e77125d5/impala/__init__.py#L119-L216
def load_module(ldr, fqname): '''Load `fqname` from under `ldr.fspath`. The `fqname` argument is the fully qualified module name, eg. "spam.eggs.ham". As explained above, when :: finder.find_module("spam.eggs.ham") is called, "spam.eggs" has already been imported and added to `sys.modules`. However, the `find_module()` method isn't necessarily always called during an actual import: meta tools that analyze import dependencies (such as freeze, Installer or py2exe) don't actually load modules, so a finder shouldn't depend on the parent package being available in `sys.modules`. The `load_module()` method has a few responsibilities that it must fulfill before it runs any code: * If there is an existing module object named 'fullname' in `sys.modules`, the loader must use that existing module. (Otherwise, the `reload()` builtin will not work correctly.) If a module named 'fullname' does not exist in `sys.modules`, the loader must create a new module object and add it to `sys.modules`. Note that the module object must be in `sys.modules` before the loader executes the module code. This is crucial because the module code may (directly or indirectly) import itself; adding it to `sys.modules` beforehand prevents unbounded recursion in the worst case and multiple loading in the best. If the load fails, the loader needs to remove any module it may have inserted into `sys.modules`. If the module was already in `sys.modules` then the loader should leave it alone. * The `__file__` attribute must be set. This must be a string, but it may be a dummy value, for example "<frozen>". The privilege of not having a `__file__` attribute at all is reserved for built-in modules. * The `__name__` attribute must be set. If one uses `imp.new_module()` then the attribute is set automatically. * If it's a package, the __path__ variable must be set. This must be a list, but may be empty if `__path__` has no further significance to the importer (more on this later). * The `__loader__` attribute must be set to the loader object. This is mostly for introspection and reloading, but can be used for importer-specific extras, for example getting data associated with an importer. The `__package__` attribute [8] must be set. If the module is a Python module (as opposed to a built-in module or a dynamically loaded extension), it should execute the module's code in the module's global name space (`module.__dict__`). [8] PEP 366: Main module explicit relative imports http://www.python.org/dev/peps/pep-0366/ ''' scope = ldr.scope.split('.') modpath = fqname.split('.') if scope != modpath[0:len(scope)]: raise AssertionError( "%s responsible for %s got request for %s" % ( ldr.__class__.__name__, ldr.scope, fqname, ) ) if fqname in sys.modules: mod = sys.modules[fqname] else: mod = sys.modules.setdefault(fqname, types.ModuleType(fqname)) mod.__loader__ = ldr fspath = ldr.path_to(fqname) mod.__file__ = str(fspath) if fs.is_package(fspath): mod.__path__ = [ldr.fspath] mod.__package__ = str(fqname) else: mod.__package__ = str(fqname.rpartition('.')[0]) exec(fs.get_code(fspath), mod.__dict__) return mod
[ "def", "load_module", "(", "ldr", ",", "fqname", ")", ":", "scope", "=", "ldr", ".", "scope", ".", "split", "(", "'.'", ")", "modpath", "=", "fqname", ".", "split", "(", "'.'", ")", "if", "scope", "!=", "modpath", "[", "0", ":", "len", "(", "scope", ")", "]", ":", "raise", "AssertionError", "(", "\"%s responsible for %s got request for %s\"", "%", "(", "ldr", ".", "__class__", ".", "__name__", ",", "ldr", ".", "scope", ",", "fqname", ",", ")", ")", "if", "fqname", "in", "sys", ".", "modules", ":", "mod", "=", "sys", ".", "modules", "[", "fqname", "]", "else", ":", "mod", "=", "sys", ".", "modules", ".", "setdefault", "(", "fqname", ",", "types", ".", "ModuleType", "(", "fqname", ")", ")", "mod", ".", "__loader__", "=", "ldr", "fspath", "=", "ldr", ".", "path_to", "(", "fqname", ")", "mod", ".", "__file__", "=", "str", "(", "fspath", ")", "if", "fs", ".", "is_package", "(", "fspath", ")", ":", "mod", ".", "__path__", "=", "[", "ldr", ".", "fspath", "]", "mod", ".", "__package__", "=", "str", "(", "fqname", ")", "else", ":", "mod", ".", "__package__", "=", "str", "(", "fqname", ".", "rpartition", "(", "'.'", ")", "[", "0", "]", ")", "exec", "(", "fs", ".", "get_code", "(", "fspath", ")", ",", "mod", ".", "__dict__", ")", "return", "mod" ]
Load `fqname` from under `ldr.fspath`. The `fqname` argument is the fully qualified module name, eg. "spam.eggs.ham". As explained above, when :: finder.find_module("spam.eggs.ham") is called, "spam.eggs" has already been imported and added to `sys.modules`. However, the `find_module()` method isn't necessarily always called during an actual import: meta tools that analyze import dependencies (such as freeze, Installer or py2exe) don't actually load modules, so a finder shouldn't depend on the parent package being available in `sys.modules`. The `load_module()` method has a few responsibilities that it must fulfill before it runs any code: * If there is an existing module object named 'fullname' in `sys.modules`, the loader must use that existing module. (Otherwise, the `reload()` builtin will not work correctly.) If a module named 'fullname' does not exist in `sys.modules`, the loader must create a new module object and add it to `sys.modules`. Note that the module object must be in `sys.modules` before the loader executes the module code. This is crucial because the module code may (directly or indirectly) import itself; adding it to `sys.modules` beforehand prevents unbounded recursion in the worst case and multiple loading in the best. If the load fails, the loader needs to remove any module it may have inserted into `sys.modules`. If the module was already in `sys.modules` then the loader should leave it alone. * The `__file__` attribute must be set. This must be a string, but it may be a dummy value, for example "<frozen>". The privilege of not having a `__file__` attribute at all is reserved for built-in modules. * The `__name__` attribute must be set. If one uses `imp.new_module()` then the attribute is set automatically. * If it's a package, the __path__ variable must be set. This must be a list, but may be empty if `__path__` has no further significance to the importer (more on this later). * The `__loader__` attribute must be set to the loader object. This is mostly for introspection and reloading, but can be used for importer-specific extras, for example getting data associated with an importer. The `__package__` attribute [8] must be set. If the module is a Python module (as opposed to a built-in module or a dynamically loaded extension), it should execute the module's code in the module's global name space (`module.__dict__`). [8] PEP 366: Main module explicit relative imports http://www.python.org/dev/peps/pep-0366/
[ "Load", "fqname", "from", "under", "ldr", ".", "fspath", "." ]
python
train
data-8/datascience
datascience/maps.py
https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/maps.py#L42-L44
def show(self): """Publish HTML.""" IPython.display.display(IPython.display.HTML(self.as_html()))
[ "def", "show", "(", "self", ")", ":", "IPython", ".", "display", ".", "display", "(", "IPython", ".", "display", ".", "HTML", "(", "self", ".", "as_html", "(", ")", ")", ")" ]
Publish HTML.
[ "Publish", "HTML", "." ]
python
train
saltstack/salt
salt/ext/ipaddress.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/ext/ipaddress.py#L251-L268
def _count_righthand_zero_bits(number, bits): """Count the number of zero bits on the right hand side. Args: number: an integer. bits: maximum number of bits to count. Returns: The number of zero bits on the right hand side of the number. """ if number == 0: return bits for i in range(bits): if (number >> i) & 1: return i # All bits of interest were zero, even if there are more in the number return bits
[ "def", "_count_righthand_zero_bits", "(", "number", ",", "bits", ")", ":", "if", "number", "==", "0", ":", "return", "bits", "for", "i", "in", "range", "(", "bits", ")", ":", "if", "(", "number", ">>", "i", ")", "&", "1", ":", "return", "i", "# All bits of interest were zero, even if there are more in the number", "return", "bits" ]
Count the number of zero bits on the right hand side. Args: number: an integer. bits: maximum number of bits to count. Returns: The number of zero bits on the right hand side of the number.
[ "Count", "the", "number", "of", "zero", "bits", "on", "the", "right", "hand", "side", "." ]
python
train
DerMitch/fritzbox-smarthome
fritzhome/__main__.py
https://github.com/DerMitch/fritzbox-smarthome/blob/84cbd7c1b33e6256add041b0395ff5fccc01f103/fritzhome/__main__.py#L40-L62
def actors(context): """Display a list of actors""" fritz = context.obj fritz.login() for actor in fritz.get_actors(): click.echo("{} ({} {}; AIN {} )".format( actor.name, actor.manufacturer, actor.productname, actor.actor_id, )) if actor.has_temperature: click.echo("Temp: act {} target {}; battery (low): {}".format( actor.temperature, actor.target_temperature, actor.battery_low, )) click.echo("Temp (via get): act {} target {}".format( actor.get_temperature(), actor.get_target_temperature(), ))
[ "def", "actors", "(", "context", ")", ":", "fritz", "=", "context", ".", "obj", "fritz", ".", "login", "(", ")", "for", "actor", "in", "fritz", ".", "get_actors", "(", ")", ":", "click", ".", "echo", "(", "\"{} ({} {}; AIN {} )\"", ".", "format", "(", "actor", ".", "name", ",", "actor", ".", "manufacturer", ",", "actor", ".", "productname", ",", "actor", ".", "actor_id", ",", ")", ")", "if", "actor", ".", "has_temperature", ":", "click", ".", "echo", "(", "\"Temp: act {} target {}; battery (low): {}\"", ".", "format", "(", "actor", ".", "temperature", ",", "actor", ".", "target_temperature", ",", "actor", ".", "battery_low", ",", ")", ")", "click", ".", "echo", "(", "\"Temp (via get): act {} target {}\"", ".", "format", "(", "actor", ".", "get_temperature", "(", ")", ",", "actor", ".", "get_target_temperature", "(", ")", ",", ")", ")" ]
Display a list of actors
[ "Display", "a", "list", "of", "actors" ]
python
train
carpedm20/fbchat
fbchat/_client.py
https://github.com/carpedm20/fbchat/blob/f480d68b5773473e6daba7f66075ee30e8d737a8/fbchat/_client.py#L3653-L3681
def onReactionAdded( self, mid=None, reaction=None, author_id=None, thread_id=None, thread_type=None, ts=None, msg=None, ): """ Called when the client is listening, and somebody reacts to a message :param mid: Message ID, that user reacted to :param reaction: Reaction :param add_reaction: Whether user added or removed reaction :param author_id: The ID of the person who reacted to the message :param thread_id: Thread ID that the action was sent to. See :ref:`intro_threads` :param thread_type: Type of thread that the action was sent to. See :ref:`intro_threads` :param ts: A timestamp of the action :param msg: A full set of the data recieved :type reaction: models.MessageReaction :type thread_type: models.ThreadType """ log.info( "{} reacted to message {} with {} in {} ({})".format( author_id, mid, reaction.name, thread_id, thread_type.name ) )
[ "def", "onReactionAdded", "(", "self", ",", "mid", "=", "None", ",", "reaction", "=", "None", ",", "author_id", "=", "None", ",", "thread_id", "=", "None", ",", "thread_type", "=", "None", ",", "ts", "=", "None", ",", "msg", "=", "None", ",", ")", ":", "log", ".", "info", "(", "\"{} reacted to message {} with {} in {} ({})\"", ".", "format", "(", "author_id", ",", "mid", ",", "reaction", ".", "name", ",", "thread_id", ",", "thread_type", ".", "name", ")", ")" ]
Called when the client is listening, and somebody reacts to a message :param mid: Message ID, that user reacted to :param reaction: Reaction :param add_reaction: Whether user added or removed reaction :param author_id: The ID of the person who reacted to the message :param thread_id: Thread ID that the action was sent to. See :ref:`intro_threads` :param thread_type: Type of thread that the action was sent to. See :ref:`intro_threads` :param ts: A timestamp of the action :param msg: A full set of the data recieved :type reaction: models.MessageReaction :type thread_type: models.ThreadType
[ "Called", "when", "the", "client", "is", "listening", "and", "somebody", "reacts", "to", "a", "message" ]
python
train
instacart/lore
lore/env.py
https://github.com/instacart/lore/blob/0367bde9a52e69162832906acc61e8d65c5ec5d4/lore/env.py#L82-L123
def require(packages): """Ensures that a pypi package has been installed into the App's python environment. If not, the package will be installed and your env will be rebooted. Example: :: lore.env.require('pandas') # -> pandas is required. Dependencies added to requirements.txt :param packages: requirements.txt style name and versions of packages :type packages: [unicode] """ global INSTALLED_PACKAGES, _new_requirements if _new_requirements: INSTALLED_PACKAGES = None set_installed_packages() if not INSTALLED_PACKAGES: return if not isinstance(packages, list): packages = [packages] missing = [] for package in packages: name = re.split(r'[!<>=]', package)[0].lower() if name not in INSTALLED_PACKAGES: print(ansi.info() + ' %s is required.' % package) missing += [package] if missing: mode = 'a' if os.path.exists(REQUIREMENTS) else 'w' with open(REQUIREMENTS, mode) as requirements: requirements.write('\n' + '\n'.join(missing) + '\n') print(ansi.info() + ' Dependencies added to requirements.txt. Rebooting.') _new_requirements = True import lore.__main__ lore.__main__.install(None, None) reboot('--env-checked')
[ "def", "require", "(", "packages", ")", ":", "global", "INSTALLED_PACKAGES", ",", "_new_requirements", "if", "_new_requirements", ":", "INSTALLED_PACKAGES", "=", "None", "set_installed_packages", "(", ")", "if", "not", "INSTALLED_PACKAGES", ":", "return", "if", "not", "isinstance", "(", "packages", ",", "list", ")", ":", "packages", "=", "[", "packages", "]", "missing", "=", "[", "]", "for", "package", "in", "packages", ":", "name", "=", "re", ".", "split", "(", "r'[!<>=]'", ",", "package", ")", "[", "0", "]", ".", "lower", "(", ")", "if", "name", "not", "in", "INSTALLED_PACKAGES", ":", "print", "(", "ansi", ".", "info", "(", ")", "+", "' %s is required.'", "%", "package", ")", "missing", "+=", "[", "package", "]", "if", "missing", ":", "mode", "=", "'a'", "if", "os", ".", "path", ".", "exists", "(", "REQUIREMENTS", ")", "else", "'w'", "with", "open", "(", "REQUIREMENTS", ",", "mode", ")", "as", "requirements", ":", "requirements", ".", "write", "(", "'\\n'", "+", "'\\n'", ".", "join", "(", "missing", ")", "+", "'\\n'", ")", "print", "(", "ansi", ".", "info", "(", ")", "+", "' Dependencies added to requirements.txt. Rebooting.'", ")", "_new_requirements", "=", "True", "import", "lore", ".", "__main__", "lore", ".", "__main__", ".", "install", "(", "None", ",", "None", ")", "reboot", "(", "'--env-checked'", ")" ]
Ensures that a pypi package has been installed into the App's python environment. If not, the package will be installed and your env will be rebooted. Example: :: lore.env.require('pandas') # -> pandas is required. Dependencies added to requirements.txt :param packages: requirements.txt style name and versions of packages :type packages: [unicode]
[ "Ensures", "that", "a", "pypi", "package", "has", "been", "installed", "into", "the", "App", "s", "python", "environment", ".", "If", "not", "the", "package", "will", "be", "installed", "and", "your", "env", "will", "be", "rebooted", "." ]
python
train
apple/turicreate
deps/src/libxml2-2.9.1/python/libxml2.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L3997-L4001
def htmlDocContentDumpOutput(self, buf, encoding): """Dump an HTML document. Formating return/spaces are added. """ if buf is None: buf__o = None else: buf__o = buf._o libxml2mod.htmlDocContentDumpOutput(buf__o, self._o, encoding)
[ "def", "htmlDocContentDumpOutput", "(", "self", ",", "buf", ",", "encoding", ")", ":", "if", "buf", "is", "None", ":", "buf__o", "=", "None", "else", ":", "buf__o", "=", "buf", ".", "_o", "libxml2mod", ".", "htmlDocContentDumpOutput", "(", "buf__o", ",", "self", ".", "_o", ",", "encoding", ")" ]
Dump an HTML document. Formating return/spaces are added.
[ "Dump", "an", "HTML", "document", ".", "Formating", "return", "/", "spaces", "are", "added", "." ]
python
train
projectshift/shift-boiler
boiler/user/util/oauth_providers.py
https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/user/util/oauth_providers.py#L57-L65
def register_token_getter(self, provider): """ Register callback to retrieve token from session """ app = oauth.remote_apps[provider] decorator = getattr(app, 'tokengetter') def getter(token=None): return self.token_getter(provider, token) decorator(getter)
[ "def", "register_token_getter", "(", "self", ",", "provider", ")", ":", "app", "=", "oauth", ".", "remote_apps", "[", "provider", "]", "decorator", "=", "getattr", "(", "app", ",", "'tokengetter'", ")", "def", "getter", "(", "token", "=", "None", ")", ":", "return", "self", ".", "token_getter", "(", "provider", ",", "token", ")", "decorator", "(", "getter", ")" ]
Register callback to retrieve token from session
[ "Register", "callback", "to", "retrieve", "token", "from", "session" ]
python
train
lsst-sqre/zenodio
zenodio/harvest.py
https://github.com/lsst-sqre/zenodio/blob/24283e84bee5714450e4f206ec024c4d32f2e761/zenodio/harvest.py#L258-L317
def _pluralize(value, item_key): """"Force the value of a datacite3 key to be a list. >>> _pluralize(xml_input['authors'], 'author') ['Sick, Jonathan', 'Economou, Frossie'] Background ---------- When `xmltodict` proceses metadata, it turns XML tags into new key-value pairs whenever possible, even if the value should semantically be treated as a `list`. For example .. code-block:: xml <authors> <author>Sick, Jonathan</author> </authors Would be rendered by `xmltodict` as:: {'authors': {'author': 'Sick, Jonathan'}} While .. code-block:: xml <authors> <author>Sick, Jonathan</author> <author>Economou, Frossie</author> </authors is rendered by `xmltodict` as:: {'authors': [{'author': ['Sick, Jonathan', 'Economou, Frossie']}} This function ensures that values are *always* lists so that they can be treated uniformly. Parameters ---------- value : obj The value of a key from datacite metadata extracted by `xmltodict`. For example, `xmldict['authors']`. item_key : str Name of the tag for each item; for example, with the `'authors'` key the item key is `'author'`. Returns ------- item_values : list List of values of all items. """ v = value[item_key] if not isinstance(v, list): # Force a singular value to be a list return [v] else: return v
[ "def", "_pluralize", "(", "value", ",", "item_key", ")", ":", "v", "=", "value", "[", "item_key", "]", "if", "not", "isinstance", "(", "v", ",", "list", ")", ":", "# Force a singular value to be a list", "return", "[", "v", "]", "else", ":", "return", "v" ]
Force the value of a datacite3 key to be a list. >>> _pluralize(xml_input['authors'], 'author') ['Sick, Jonathan', 'Economou, Frossie'] Background ---------- When `xmltodict` proceses metadata, it turns XML tags into new key-value pairs whenever possible, even if the value should semantically be treated as a `list`. For example .. code-block:: xml <authors> <author>Sick, Jonathan</author> </authors Would be rendered by `xmltodict` as:: {'authors': {'author': 'Sick, Jonathan'}} While .. code-block:: xml <authors> <author>Sick, Jonathan</author> <author>Economou, Frossie</author> </authors is rendered by `xmltodict` as:: {'authors': [{'author': ['Sick, Jonathan', 'Economou, Frossie']}} This function ensures that values are *always* lists so that they can be treated uniformly. Parameters ---------- value : obj The value of a key from datacite metadata extracted by `xmltodict`. For example, `xmldict['authors']`. item_key : str Name of the tag for each item; for example, with the `'authors'` key the item key is `'author'`. Returns ------- item_values : list List of values of all items.
[ "Force", "the", "value", "of", "a", "datacite3", "key", "to", "be", "a", "list", "." ]
python
train
ChrisBeaumont/smother
smother/cli.py
https://github.com/ChrisBeaumont/smother/blob/65d1ea6ae0060d213b0dcbb983c5aa8e7fee07bb/smother/cli.py#L59-L65
def diff(ctx, branch): """ Determine which tests intersect a git diff. """ diff = GitDiffReporter(branch) regions = diff.changed_intervals() _report_from_regions(regions, ctx.obj, file_factory=diff.old_file)
[ "def", "diff", "(", "ctx", ",", "branch", ")", ":", "diff", "=", "GitDiffReporter", "(", "branch", ")", "regions", "=", "diff", ".", "changed_intervals", "(", ")", "_report_from_regions", "(", "regions", ",", "ctx", ".", "obj", ",", "file_factory", "=", "diff", ".", "old_file", ")" ]
Determine which tests intersect a git diff.
[ "Determine", "which", "tests", "intersect", "a", "git", "diff", "." ]
python
train
espenak/djangosenchatools
djangosenchatools/buildserver.py
https://github.com/espenak/djangosenchatools/blob/da1bca9365300de303e833de4b4bd57671c1d11a/djangosenchatools/buildserver.py#L23-L38
def run(self): """ Sets up the live server and databases, and then loops over handling http requests. """ server_address = (self.host, self.port) threading = True if threading: httpd_cls = type('WSGIServer', (ThreadingMixIn, WSGIServer), {}) else: httpd_cls = WSGIServer self.httpd = httpd_cls(server_address, WSGIRequestHandler, ipv6=False) wsgi_handler = get_internal_wsgi_application() self.httpd.set_app(wsgi_handler) self.is_ready.set() self.httpd.serve_forever()
[ "def", "run", "(", "self", ")", ":", "server_address", "=", "(", "self", ".", "host", ",", "self", ".", "port", ")", "threading", "=", "True", "if", "threading", ":", "httpd_cls", "=", "type", "(", "'WSGIServer'", ",", "(", "ThreadingMixIn", ",", "WSGIServer", ")", ",", "{", "}", ")", "else", ":", "httpd_cls", "=", "WSGIServer", "self", ".", "httpd", "=", "httpd_cls", "(", "server_address", ",", "WSGIRequestHandler", ",", "ipv6", "=", "False", ")", "wsgi_handler", "=", "get_internal_wsgi_application", "(", ")", "self", ".", "httpd", ".", "set_app", "(", "wsgi_handler", ")", "self", ".", "is_ready", ".", "set", "(", ")", "self", ".", "httpd", ".", "serve_forever", "(", ")" ]
Sets up the live server and databases, and then loops over handling http requests.
[ "Sets", "up", "the", "live", "server", "and", "databases", "and", "then", "loops", "over", "handling", "http", "requests", "." ]
python
train
manahl/arctic
arctic/date/_util.py
https://github.com/manahl/arctic/blob/57e110b6e182dbab00e7e214dc26f7d9ec47c120/arctic/date/_util.py#L22-L82
def string_to_daterange(str_range, delimiter='-', as_dates=False, interval=CLOSED_CLOSED): """ Convert a string to a DateRange type. If you put only one date, it generates the relevant range for just that date or datetime till 24 hours later. You can optionally use mixtures of []/() around the DateRange for OPEN/CLOSED interval behaviour. Parameters ---------- str_range : `String` The range as a string of dates separated by one delimiter. delimiter : `String` The separator between the dates, using '-' as default. as_dates : `Boolean` True if you want the date-range to use datetime.date rather than datetime.datetime. interval : `int` CLOSED_CLOSED, OPEN_CLOSED, CLOSED_OPEN or OPEN_OPEN. **Default is CLOSED_CLOSED**. Returns ------- `arctic.date.DateRange` : the DateRange parsed from the string. Examples -------- >>> from arctic.date import string_to_daterange >>> string_to_daterange('20111020', as_dates=True) DateRange(start=datetime.date(2011, 10, 20), end=datetime.date(2011, 10, 21)) >>> string_to_daterange('201110201030') DateRange(start=datetime.datetime(2011, 10, 20, 10, 30), end=datetime.datetime(2011, 10, 21, 10, 30)) >>> string_to_daterange('20111020-20120120', as_dates=True) DateRange(start=datetime.date(2011, 10, 20), end=datetime.date(2012, 1, 20)) >>> string_to_daterange('[20111020-20120120)', as_dates=True) DateRange(start=datetime.date(2011, 10, 20), end=datetime.date(2012, 1, 20)) """ num_dates = str_range.count(delimiter) + 1 if num_dates > 2: raise ValueError('Too many dates in input string [%s] with delimiter (%s)' % (str_range, delimiter)) # Allow the user to use the [date-date), etc. range syntax to specify the interval. range_mode = Ranges.get(str_range[0] + str_range[-1], None) if range_mode: return string_to_daterange(str_range[1:-1], delimiter, as_dates, interval=range_mode) if as_dates: parse_dt = lambda s: parse(s).date() if s else None else: parse_dt = lambda s: parse(s) if s else None if num_dates == 2: d = [parse_dt(x) for x in str_range.split(delimiter)] oc = interval else: start = parse_dt(str_range) d = [start, start + datetime.timedelta(1)] oc = CLOSED_OPEN # Always use closed-open for a single date/datetime. return DateRange(d[0], d[1], oc)
[ "def", "string_to_daterange", "(", "str_range", ",", "delimiter", "=", "'-'", ",", "as_dates", "=", "False", ",", "interval", "=", "CLOSED_CLOSED", ")", ":", "num_dates", "=", "str_range", ".", "count", "(", "delimiter", ")", "+", "1", "if", "num_dates", ">", "2", ":", "raise", "ValueError", "(", "'Too many dates in input string [%s] with delimiter (%s)'", "%", "(", "str_range", ",", "delimiter", ")", ")", "# Allow the user to use the [date-date), etc. range syntax to specify the interval.", "range_mode", "=", "Ranges", ".", "get", "(", "str_range", "[", "0", "]", "+", "str_range", "[", "-", "1", "]", ",", "None", ")", "if", "range_mode", ":", "return", "string_to_daterange", "(", "str_range", "[", "1", ":", "-", "1", "]", ",", "delimiter", ",", "as_dates", ",", "interval", "=", "range_mode", ")", "if", "as_dates", ":", "parse_dt", "=", "lambda", "s", ":", "parse", "(", "s", ")", ".", "date", "(", ")", "if", "s", "else", "None", "else", ":", "parse_dt", "=", "lambda", "s", ":", "parse", "(", "s", ")", "if", "s", "else", "None", "if", "num_dates", "==", "2", ":", "d", "=", "[", "parse_dt", "(", "x", ")", "for", "x", "in", "str_range", ".", "split", "(", "delimiter", ")", "]", "oc", "=", "interval", "else", ":", "start", "=", "parse_dt", "(", "str_range", ")", "d", "=", "[", "start", ",", "start", "+", "datetime", ".", "timedelta", "(", "1", ")", "]", "oc", "=", "CLOSED_OPEN", "# Always use closed-open for a single date/datetime.", "return", "DateRange", "(", "d", "[", "0", "]", ",", "d", "[", "1", "]", ",", "oc", ")" ]
Convert a string to a DateRange type. If you put only one date, it generates the relevant range for just that date or datetime till 24 hours later. You can optionally use mixtures of []/() around the DateRange for OPEN/CLOSED interval behaviour. Parameters ---------- str_range : `String` The range as a string of dates separated by one delimiter. delimiter : `String` The separator between the dates, using '-' as default. as_dates : `Boolean` True if you want the date-range to use datetime.date rather than datetime.datetime. interval : `int` CLOSED_CLOSED, OPEN_CLOSED, CLOSED_OPEN or OPEN_OPEN. **Default is CLOSED_CLOSED**. Returns ------- `arctic.date.DateRange` : the DateRange parsed from the string. Examples -------- >>> from arctic.date import string_to_daterange >>> string_to_daterange('20111020', as_dates=True) DateRange(start=datetime.date(2011, 10, 20), end=datetime.date(2011, 10, 21)) >>> string_to_daterange('201110201030') DateRange(start=datetime.datetime(2011, 10, 20, 10, 30), end=datetime.datetime(2011, 10, 21, 10, 30)) >>> string_to_daterange('20111020-20120120', as_dates=True) DateRange(start=datetime.date(2011, 10, 20), end=datetime.date(2012, 1, 20)) >>> string_to_daterange('[20111020-20120120)', as_dates=True) DateRange(start=datetime.date(2011, 10, 20), end=datetime.date(2012, 1, 20))
[ "Convert", "a", "string", "to", "a", "DateRange", "type", ".", "If", "you", "put", "only", "one", "date", "it", "generates", "the", "relevant", "range", "for", "just", "that", "date", "or", "datetime", "till", "24", "hours", "later", ".", "You", "can", "optionally", "use", "mixtures", "of", "[]", "/", "()", "around", "the", "DateRange", "for", "OPEN", "/", "CLOSED", "interval", "behaviour", "." ]
python
train
wtolson/gnsq
gnsq/nsqd.py
https://github.com/wtolson/gnsq/blob/0fd02578b2c9c5fa30626d78579db2a46c10edac/gnsq/nsqd.py#L580-L583
def empty_topic(self, topic): """Empty all the queued messages for an existing topic.""" nsq.assert_valid_topic_name(topic) return self._request('POST', '/topic/empty', fields={'topic': topic})
[ "def", "empty_topic", "(", "self", ",", "topic", ")", ":", "nsq", ".", "assert_valid_topic_name", "(", "topic", ")", "return", "self", ".", "_request", "(", "'POST'", ",", "'/topic/empty'", ",", "fields", "=", "{", "'topic'", ":", "topic", "}", ")" ]
Empty all the queued messages for an existing topic.
[ "Empty", "all", "the", "queued", "messages", "for", "an", "existing", "topic", "." ]
python
train
vertexproject/synapse
synapse/lib/agenda.py
https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/lib/agenda.py#L536-L558
async def delete(self, iden): ''' Delete an appointment ''' appt = self.appts.get(iden) if appt is None: raise s_exc.NoSuchIden() try: heappos = self.apptheap.index(appt) except ValueError: pass # this is OK, just a non-recurring appt that has no more records else: # If we're already the last item, just delete it if heappos == len(self.apptheap) - 1: del self.apptheap[heappos] else: # put the last item at the current position and reheap self.apptheap[heappos] = self.apptheap.pop() heapq.heapify(self.apptheap) del self.appts[iden] await self._hivedict.pop(iden)
[ "async", "def", "delete", "(", "self", ",", "iden", ")", ":", "appt", "=", "self", ".", "appts", ".", "get", "(", "iden", ")", "if", "appt", "is", "None", ":", "raise", "s_exc", ".", "NoSuchIden", "(", ")", "try", ":", "heappos", "=", "self", ".", "apptheap", ".", "index", "(", "appt", ")", "except", "ValueError", ":", "pass", "# this is OK, just a non-recurring appt that has no more records", "else", ":", "# If we're already the last item, just delete it", "if", "heappos", "==", "len", "(", "self", ".", "apptheap", ")", "-", "1", ":", "del", "self", ".", "apptheap", "[", "heappos", "]", "else", ":", "# put the last item at the current position and reheap", "self", ".", "apptheap", "[", "heappos", "]", "=", "self", ".", "apptheap", ".", "pop", "(", ")", "heapq", ".", "heapify", "(", "self", ".", "apptheap", ")", "del", "self", ".", "appts", "[", "iden", "]", "await", "self", ".", "_hivedict", ".", "pop", "(", "iden", ")" ]
Delete an appointment
[ "Delete", "an", "appointment" ]
python
train
nerdvegas/rez
src/rez/vendor/amqp/method_framing.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/amqp/method_framing.py#L99-L130
def _next_method(self): """Read the next method from the source, once one complete method has been assembled it is placed in the internal queue.""" queue = self.queue put = self._quick_put read_frame = self.source.read_frame while not queue: try: frame_type, channel, payload = read_frame() except Exception as exc: # # Connection was closed? Framing Error? # put(exc) break self.bytes_recv += 1 if frame_type not in (self.expected_types[channel], 8): put(( channel, UnexpectedFrame( 'Received frame {0} while expecting type: {1}'.format( frame_type, self.expected_types[channel])))) elif frame_type == 1: self._process_method_frame(channel, payload) elif frame_type == 2: self._process_content_header(channel, payload) elif frame_type == 3: self._process_content_body(channel, payload) elif frame_type == 8: self._process_heartbeat(channel, payload)
[ "def", "_next_method", "(", "self", ")", ":", "queue", "=", "self", ".", "queue", "put", "=", "self", ".", "_quick_put", "read_frame", "=", "self", ".", "source", ".", "read_frame", "while", "not", "queue", ":", "try", ":", "frame_type", ",", "channel", ",", "payload", "=", "read_frame", "(", ")", "except", "Exception", "as", "exc", ":", "#", "# Connection was closed? Framing Error?", "#", "put", "(", "exc", ")", "break", "self", ".", "bytes_recv", "+=", "1", "if", "frame_type", "not", "in", "(", "self", ".", "expected_types", "[", "channel", "]", ",", "8", ")", ":", "put", "(", "(", "channel", ",", "UnexpectedFrame", "(", "'Received frame {0} while expecting type: {1}'", ".", "format", "(", "frame_type", ",", "self", ".", "expected_types", "[", "channel", "]", ")", ")", ")", ")", "elif", "frame_type", "==", "1", ":", "self", ".", "_process_method_frame", "(", "channel", ",", "payload", ")", "elif", "frame_type", "==", "2", ":", "self", ".", "_process_content_header", "(", "channel", ",", "payload", ")", "elif", "frame_type", "==", "3", ":", "self", ".", "_process_content_body", "(", "channel", ",", "payload", ")", "elif", "frame_type", "==", "8", ":", "self", ".", "_process_heartbeat", "(", "channel", ",", "payload", ")" ]
Read the next method from the source, once one complete method has been assembled it is placed in the internal queue.
[ "Read", "the", "next", "method", "from", "the", "source", "once", "one", "complete", "method", "has", "been", "assembled", "it", "is", "placed", "in", "the", "internal", "queue", "." ]
python
train
hydpy-dev/hydpy
hydpy/core/selectiontools.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/core/selectiontools.py#L734-L743
def deselect_nodenames(self, *substrings: str) -> 'Selection': """Restrict the current selection to all nodes with a name not containing at least one of the given substrings (does not affect any elements). See the documentation on method |Selection.search_nodenames| for additional information. """ self.nodes -= self.search_nodenames(*substrings).nodes return self
[ "def", "deselect_nodenames", "(", "self", ",", "*", "substrings", ":", "str", ")", "->", "'Selection'", ":", "self", ".", "nodes", "-=", "self", ".", "search_nodenames", "(", "*", "substrings", ")", ".", "nodes", "return", "self" ]
Restrict the current selection to all nodes with a name not containing at least one of the given substrings (does not affect any elements). See the documentation on method |Selection.search_nodenames| for additional information.
[ "Restrict", "the", "current", "selection", "to", "all", "nodes", "with", "a", "name", "not", "containing", "at", "least", "one", "of", "the", "given", "substrings", "(", "does", "not", "affect", "any", "elements", ")", "." ]
python
train
Scoppio/RagnarokEngine3
Tutorials/Platforming Block - PyGame Release/Game/Code/Ragnarok.py
https://github.com/Scoppio/RagnarokEngine3/blob/4395d419ccd64fe9327c41f200b72ee0176ad896/Tutorials/Platforming Block - PyGame Release/Game/Code/Ragnarok.py#L1097-L1107
def request_object(self): """Grab an object from the pool. If the pool is empty, a new object will be generated and returned.""" obj_to_return = None if self.queue.count > 0: obj_to_return = self.__dequeue() else: #The queue is empty, generate a new item. self.__init_object() object_to_return = self.__dequeue() self.active_objects += 1 return obj_to_return
[ "def", "request_object", "(", "self", ")", ":", "obj_to_return", "=", "None", "if", "self", ".", "queue", ".", "count", ">", "0", ":", "obj_to_return", "=", "self", ".", "__dequeue", "(", ")", "else", ":", "#The queue is empty, generate a new item.", "self", ".", "__init_object", "(", ")", "object_to_return", "=", "self", ".", "__dequeue", "(", ")", "self", ".", "active_objects", "+=", "1", "return", "obj_to_return" ]
Grab an object from the pool. If the pool is empty, a new object will be generated and returned.
[ "Grab", "an", "object", "from", "the", "pool", ".", "If", "the", "pool", "is", "empty", "a", "new", "object", "will", "be", "generated", "and", "returned", "." ]
python
train
ga4gh/ga4gh-server
ga4gh/server/datamodel/obo_parser.py
https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/datamodel/obo_parser.py#L191-L211
def _add_to_typedef(self, typedef_curr, line, lnum): """Add new fields to the current typedef.""" mtch = re.match(r'^(\S+):\s*(\S.*)$', line) if mtch: field_name = mtch.group(1) field_value = mtch.group(2).split('!')[0].rstrip() if field_name == "id": self._chk_none(typedef_curr.id, lnum) typedef_curr.id = field_value elif field_name == "name": self._chk_none(typedef_curr.name, lnum) typedef_curr.name = field_value elif field_name == "transitive_over": typedef_curr.transitive_over.append(field_value) elif field_name == "inverse_of": self._chk_none(typedef_curr.inverse_of, lnum) typedef_curr.inverse_of = field_value # Note: there are other tags that aren't imported here. else: self._die("UNEXPECTED FIELD CONTENT: {L}\n".format(L=line), lnum)
[ "def", "_add_to_typedef", "(", "self", ",", "typedef_curr", ",", "line", ",", "lnum", ")", ":", "mtch", "=", "re", ".", "match", "(", "r'^(\\S+):\\s*(\\S.*)$'", ",", "line", ")", "if", "mtch", ":", "field_name", "=", "mtch", ".", "group", "(", "1", ")", "field_value", "=", "mtch", ".", "group", "(", "2", ")", ".", "split", "(", "'!'", ")", "[", "0", "]", ".", "rstrip", "(", ")", "if", "field_name", "==", "\"id\"", ":", "self", ".", "_chk_none", "(", "typedef_curr", ".", "id", ",", "lnum", ")", "typedef_curr", ".", "id", "=", "field_value", "elif", "field_name", "==", "\"name\"", ":", "self", ".", "_chk_none", "(", "typedef_curr", ".", "name", ",", "lnum", ")", "typedef_curr", ".", "name", "=", "field_value", "elif", "field_name", "==", "\"transitive_over\"", ":", "typedef_curr", ".", "transitive_over", ".", "append", "(", "field_value", ")", "elif", "field_name", "==", "\"inverse_of\"", ":", "self", ".", "_chk_none", "(", "typedef_curr", ".", "inverse_of", ",", "lnum", ")", "typedef_curr", ".", "inverse_of", "=", "field_value", "# Note: there are other tags that aren't imported here.", "else", ":", "self", ".", "_die", "(", "\"UNEXPECTED FIELD CONTENT: {L}\\n\"", ".", "format", "(", "L", "=", "line", ")", ",", "lnum", ")" ]
Add new fields to the current typedef.
[ "Add", "new", "fields", "to", "the", "current", "typedef", "." ]
python
train
tanghaibao/jcvi
jcvi/variation/cnv.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/variation/cnv.py#L541-L547
def counter_format(counter): """ Pretty print a counter so that it appears as: "2:200,3:100,4:20" """ if not counter: return "na" return ",".join("{}:{}".format(*z) for z in sorted(counter.items()))
[ "def", "counter_format", "(", "counter", ")", ":", "if", "not", "counter", ":", "return", "\"na\"", "return", "\",\"", ".", "join", "(", "\"{}:{}\"", ".", "format", "(", "*", "z", ")", "for", "z", "in", "sorted", "(", "counter", ".", "items", "(", ")", ")", ")" ]
Pretty print a counter so that it appears as: "2:200,3:100,4:20"
[ "Pretty", "print", "a", "counter", "so", "that", "it", "appears", "as", ":", "2", ":", "200", "3", ":", "100", "4", ":", "20" ]
python
train
sassoo/goldman
goldman/resources/s3_model.py
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/resources/s3_model.py#L84-L118
def on_post(self, req, resp, rid): """ Deserialize the file upload & save it to S3 File uploads are associated with a model of some kind. Ensure the associating model exists first & foremost. """ signals.pre_req.send(self.model) signals.pre_req_upload.send(self.model) props = req.deserialize(self.mimetypes) model = find(self.model, rid) signals.pre_upload.send(self.model, model=model) try: conn = s3_connect(self.key, self.secret) path = self._gen_s3_path(model, props) s3_url = s3_upload(self.acl, self.bucket, conn, props['content'], props['content-type'], path) except IOError: abort(ServiceUnavailable(**{ 'detail': 'The upload attempt failed unexpectedly', })) else: signals.post_upload.send(self.model, model=model, url=s3_url) resp.location = s3_url resp.status = falcon.HTTP_201 resp.serialize({'data': {'url': s3_url}}) signals.post_req.send(self.model) signals.post_req_upload.send(self.model)
[ "def", "on_post", "(", "self", ",", "req", ",", "resp", ",", "rid", ")", ":", "signals", ".", "pre_req", ".", "send", "(", "self", ".", "model", ")", "signals", ".", "pre_req_upload", ".", "send", "(", "self", ".", "model", ")", "props", "=", "req", ".", "deserialize", "(", "self", ".", "mimetypes", ")", "model", "=", "find", "(", "self", ".", "model", ",", "rid", ")", "signals", ".", "pre_upload", ".", "send", "(", "self", ".", "model", ",", "model", "=", "model", ")", "try", ":", "conn", "=", "s3_connect", "(", "self", ".", "key", ",", "self", ".", "secret", ")", "path", "=", "self", ".", "_gen_s3_path", "(", "model", ",", "props", ")", "s3_url", "=", "s3_upload", "(", "self", ".", "acl", ",", "self", ".", "bucket", ",", "conn", ",", "props", "[", "'content'", "]", ",", "props", "[", "'content-type'", "]", ",", "path", ")", "except", "IOError", ":", "abort", "(", "ServiceUnavailable", "(", "*", "*", "{", "'detail'", ":", "'The upload attempt failed unexpectedly'", ",", "}", ")", ")", "else", ":", "signals", ".", "post_upload", ".", "send", "(", "self", ".", "model", ",", "model", "=", "model", ",", "url", "=", "s3_url", ")", "resp", ".", "location", "=", "s3_url", "resp", ".", "status", "=", "falcon", ".", "HTTP_201", "resp", ".", "serialize", "(", "{", "'data'", ":", "{", "'url'", ":", "s3_url", "}", "}", ")", "signals", ".", "post_req", ".", "send", "(", "self", ".", "model", ")", "signals", ".", "post_req_upload", ".", "send", "(", "self", ".", "model", ")" ]
Deserialize the file upload & save it to S3 File uploads are associated with a model of some kind. Ensure the associating model exists first & foremost.
[ "Deserialize", "the", "file", "upload", "&", "save", "it", "to", "S3" ]
python
train
dcramer/django-ratings
djangoratings/templatetags/ratings.py
https://github.com/dcramer/django-ratings/blob/4d00dedc920a4e32d650dc12d5f480c51fc6216c/djangoratings/templatetags/ratings.py#L34-L52
def do_rating_by_request(parser, token): """ Retrieves the ``Vote`` cast by a user on a particular object and stores it in a context variable. If the user has not voted, the context variable will be 0. Example usage:: {% rating_by_request request on instance as vote %} """ bits = token.contents.split() if len(bits) != 6: raise template.TemplateSyntaxError("'%s' tag takes exactly five arguments" % bits[0]) if bits[2] != 'on': raise template.TemplateSyntaxError("second argument to '%s' tag must be 'on'" % bits[0]) if bits[4] != 'as': raise template.TemplateSyntaxError("fourth argument to '%s' tag must be 'as'" % bits[0]) return RatingByRequestNode(bits[1], bits[3], bits[5])
[ "def", "do_rating_by_request", "(", "parser", ",", "token", ")", ":", "bits", "=", "token", ".", "contents", ".", "split", "(", ")", "if", "len", "(", "bits", ")", "!=", "6", ":", "raise", "template", ".", "TemplateSyntaxError", "(", "\"'%s' tag takes exactly five arguments\"", "%", "bits", "[", "0", "]", ")", "if", "bits", "[", "2", "]", "!=", "'on'", ":", "raise", "template", ".", "TemplateSyntaxError", "(", "\"second argument to '%s' tag must be 'on'\"", "%", "bits", "[", "0", "]", ")", "if", "bits", "[", "4", "]", "!=", "'as'", ":", "raise", "template", ".", "TemplateSyntaxError", "(", "\"fourth argument to '%s' tag must be 'as'\"", "%", "bits", "[", "0", "]", ")", "return", "RatingByRequestNode", "(", "bits", "[", "1", "]", ",", "bits", "[", "3", "]", ",", "bits", "[", "5", "]", ")" ]
Retrieves the ``Vote`` cast by a user on a particular object and stores it in a context variable. If the user has not voted, the context variable will be 0. Example usage:: {% rating_by_request request on instance as vote %}
[ "Retrieves", "the", "Vote", "cast", "by", "a", "user", "on", "a", "particular", "object", "and", "stores", "it", "in", "a", "context", "variable", ".", "If", "the", "user", "has", "not", "voted", "the", "context", "variable", "will", "be", "0", ".", "Example", "usage", "::", "{", "%", "rating_by_request", "request", "on", "instance", "as", "vote", "%", "}" ]
python
train
arne-cl/discoursegraphs
src/discoursegraphs/readwrite/geoff.py
https://github.com/arne-cl/discoursegraphs/blob/842f0068a3190be2c75905754521b176b25a54fb/src/discoursegraphs/readwrite/geoff.py#L37-L67
def edge2geoff(from_node, to_node, properties, edge_relationship_name, encoder): """converts a NetworkX edge into a Geoff string. Parameters ---------- from_node : str or int the ID of a NetworkX source node to_node : str or int the ID of a NetworkX target node properties : dict a dictionary of edge attributes edge_relationship_name : str string that describes the relationship between the two nodes encoder : json.JSONEncoder an instance of a JSON encoder (e.g. `json.JSONEncoder`) Returns ------- geoff : str a Geoff string """ edge_string = None if properties: args = [from_node, edge_relationship_name, encoder.encode(properties), to_node] edge_string = '({0})-[:{1} {2}]->({3})'.format(*args) else: args = [from_node, edge_relationship_name, to_node] edge_string = '({0})-[:{1}]->({2})'.format(*args) return edge_string
[ "def", "edge2geoff", "(", "from_node", ",", "to_node", ",", "properties", ",", "edge_relationship_name", ",", "encoder", ")", ":", "edge_string", "=", "None", "if", "properties", ":", "args", "=", "[", "from_node", ",", "edge_relationship_name", ",", "encoder", ".", "encode", "(", "properties", ")", ",", "to_node", "]", "edge_string", "=", "'({0})-[:{1} {2}]->({3})'", ".", "format", "(", "*", "args", ")", "else", ":", "args", "=", "[", "from_node", ",", "edge_relationship_name", ",", "to_node", "]", "edge_string", "=", "'({0})-[:{1}]->({2})'", ".", "format", "(", "*", "args", ")", "return", "edge_string" ]
converts a NetworkX edge into a Geoff string. Parameters ---------- from_node : str or int the ID of a NetworkX source node to_node : str or int the ID of a NetworkX target node properties : dict a dictionary of edge attributes edge_relationship_name : str string that describes the relationship between the two nodes encoder : json.JSONEncoder an instance of a JSON encoder (e.g. `json.JSONEncoder`) Returns ------- geoff : str a Geoff string
[ "converts", "a", "NetworkX", "edge", "into", "a", "Geoff", "string", "." ]
python
train
mattharrison/rst2odp
odplib/preso.py
https://github.com/mattharrison/rst2odp/blob/4adbf29b28c8207ec882f792ded07e98b1d3e7d0/odplib/preso.py#L1705-L1721
def style_node(self, additional_style_attrib=None): """ generate a style node (for automatic-styles) could specify additional attributes such as 'style:parent-style-name' or 'style:list-style-name' """ style_attrib = {"style:name": self.name, "style:family": self.FAMILY} if additional_style_attrib: style_attrib.update(additional_style_attrib) if self.PARENT_STYLE_DICT: style_attrib.update(self.PARENT_STYLE_DICT) node = el("style:style", attrib=style_attrib) props = sub_el(node, self.STYLE_PROP, attrib=self.styles) return node
[ "def", "style_node", "(", "self", ",", "additional_style_attrib", "=", "None", ")", ":", "style_attrib", "=", "{", "\"style:name\"", ":", "self", ".", "name", ",", "\"style:family\"", ":", "self", ".", "FAMILY", "}", "if", "additional_style_attrib", ":", "style_attrib", ".", "update", "(", "additional_style_attrib", ")", "if", "self", ".", "PARENT_STYLE_DICT", ":", "style_attrib", ".", "update", "(", "self", ".", "PARENT_STYLE_DICT", ")", "node", "=", "el", "(", "\"style:style\"", ",", "attrib", "=", "style_attrib", ")", "props", "=", "sub_el", "(", "node", ",", "self", ".", "STYLE_PROP", ",", "attrib", "=", "self", ".", "styles", ")", "return", "node" ]
generate a style node (for automatic-styles) could specify additional attributes such as 'style:parent-style-name' or 'style:list-style-name'
[ "generate", "a", "style", "node", "(", "for", "automatic", "-", "styles", ")" ]
python
train
numenta/htmresearch
htmresearch/regions/ColumnPoolerRegion.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/regions/ColumnPoolerRegion.py#L421-L477
def compute(self, inputs, outputs): """ Run one iteration of compute. Note that if the reset signal is True (1) we assume this iteration represents the *end* of a sequence. The output will contain the representation to this point and any history will then be reset. The output at the next compute will start fresh, presumably with bursting columns. """ # Handle reset first (should be sent with an empty signal) if "resetIn" in inputs: assert len(inputs["resetIn"]) == 1 if inputs["resetIn"][0] != 0: # send empty output self.reset() outputs["feedForwardOutput"][:] = 0 outputs["activeCells"][:] = 0 return feedforwardInput = numpy.asarray(inputs["feedforwardInput"].nonzero()[0], dtype="uint32") if "feedforwardGrowthCandidates" in inputs: feedforwardGrowthCandidates = numpy.asarray( inputs["feedforwardGrowthCandidates"].nonzero()[0], dtype="uint32") else: feedforwardGrowthCandidates = feedforwardInput if "lateralInput" in inputs: lateralInputs = tuple(numpy.asarray(singleInput.nonzero()[0], dtype="uint32") for singleInput in numpy.split(inputs["lateralInput"], self.numOtherCorticalColumns)) else: lateralInputs = () if "predictedInput" in inputs: predictedInput = numpy.asarray( inputs["predictedInput"].nonzero()[0], dtype="uint32") else: predictedInput = None # Send the inputs into the Column Pooler. self._pooler.compute(feedforwardInput, lateralInputs, feedforwardGrowthCandidates, learn=self.learningMode, predictedInput = predictedInput) # Extract the active / predicted cells and put them into binary arrays. outputs["activeCells"][:] = 0 outputs["activeCells"][self._pooler.getActiveCells()] = 1 # Send appropriate output to feedForwardOutput. if self.defaultOutputType == "active": outputs["feedForwardOutput"][:] = outputs["activeCells"] else: raise Exception("Unknown outputType: " + self.defaultOutputType)
[ "def", "compute", "(", "self", ",", "inputs", ",", "outputs", ")", ":", "# Handle reset first (should be sent with an empty signal)", "if", "\"resetIn\"", "in", "inputs", ":", "assert", "len", "(", "inputs", "[", "\"resetIn\"", "]", ")", "==", "1", "if", "inputs", "[", "\"resetIn\"", "]", "[", "0", "]", "!=", "0", ":", "# send empty output", "self", ".", "reset", "(", ")", "outputs", "[", "\"feedForwardOutput\"", "]", "[", ":", "]", "=", "0", "outputs", "[", "\"activeCells\"", "]", "[", ":", "]", "=", "0", "return", "feedforwardInput", "=", "numpy", ".", "asarray", "(", "inputs", "[", "\"feedforwardInput\"", "]", ".", "nonzero", "(", ")", "[", "0", "]", ",", "dtype", "=", "\"uint32\"", ")", "if", "\"feedforwardGrowthCandidates\"", "in", "inputs", ":", "feedforwardGrowthCandidates", "=", "numpy", ".", "asarray", "(", "inputs", "[", "\"feedforwardGrowthCandidates\"", "]", ".", "nonzero", "(", ")", "[", "0", "]", ",", "dtype", "=", "\"uint32\"", ")", "else", ":", "feedforwardGrowthCandidates", "=", "feedforwardInput", "if", "\"lateralInput\"", "in", "inputs", ":", "lateralInputs", "=", "tuple", "(", "numpy", ".", "asarray", "(", "singleInput", ".", "nonzero", "(", ")", "[", "0", "]", ",", "dtype", "=", "\"uint32\"", ")", "for", "singleInput", "in", "numpy", ".", "split", "(", "inputs", "[", "\"lateralInput\"", "]", ",", "self", ".", "numOtherCorticalColumns", ")", ")", "else", ":", "lateralInputs", "=", "(", ")", "if", "\"predictedInput\"", "in", "inputs", ":", "predictedInput", "=", "numpy", ".", "asarray", "(", "inputs", "[", "\"predictedInput\"", "]", ".", "nonzero", "(", ")", "[", "0", "]", ",", "dtype", "=", "\"uint32\"", ")", "else", ":", "predictedInput", "=", "None", "# Send the inputs into the Column Pooler.", "self", ".", "_pooler", ".", "compute", "(", "feedforwardInput", ",", "lateralInputs", ",", "feedforwardGrowthCandidates", ",", "learn", "=", "self", ".", "learningMode", ",", "predictedInput", "=", "predictedInput", ")", "# Extract the active / predicted cells and put them into binary arrays.", "outputs", "[", "\"activeCells\"", "]", "[", ":", "]", "=", "0", "outputs", "[", "\"activeCells\"", "]", "[", "self", ".", "_pooler", ".", "getActiveCells", "(", ")", "]", "=", "1", "# Send appropriate output to feedForwardOutput.", "if", "self", ".", "defaultOutputType", "==", "\"active\"", ":", "outputs", "[", "\"feedForwardOutput\"", "]", "[", ":", "]", "=", "outputs", "[", "\"activeCells\"", "]", "else", ":", "raise", "Exception", "(", "\"Unknown outputType: \"", "+", "self", ".", "defaultOutputType", ")" ]
Run one iteration of compute. Note that if the reset signal is True (1) we assume this iteration represents the *end* of a sequence. The output will contain the representation to this point and any history will then be reset. The output at the next compute will start fresh, presumably with bursting columns.
[ "Run", "one", "iteration", "of", "compute", "." ]
python
train
richardchien/python-cqhttp
cqhttp_helper.py
https://github.com/richardchien/python-cqhttp/blob/1869819a8f89001e3f70668e31afc6c78f7f5bc2/cqhttp_helper.py#L321-L338
def send_msg_async(self, *, message_type, user_id=None, group_id=None, discuss_id=None, message, auto_escape=False): """ 发送消息 (异步版本) ------------ :param str message_type: 消息类型,支持 `private`、`group`、`discuss`,分别对应私聊、群组、讨论组 :param int user_id: 对方 QQ 号(消息类型为 `private` 时需要) :param int group_id: 群号(消息类型为 `group` 时需要) :param int discuss_id: 讨论组 ID(需要从上报消息中获取,消息类型为 `discuss` 时需要) :param str | list[ dict[ str, unknown ] ] message: 要发送的内容 :param bool auto_escape: 消息内容是否作为纯文本发送(即不解析 CQ 码),`message` 数据类型为 `list` 时无效 :return: None :rtype: None """ return super().__getattr__('send_msg_async') \ (message_type=message_type, user_id=user_id, group_id=group_id, discuss_id=discuss_id, message=message, auto_escape=auto_escape)
[ "def", "send_msg_async", "(", "self", ",", "*", ",", "message_type", ",", "user_id", "=", "None", ",", "group_id", "=", "None", ",", "discuss_id", "=", "None", ",", "message", ",", "auto_escape", "=", "False", ")", ":", "return", "super", "(", ")", ".", "__getattr__", "(", "'send_msg_async'", ")", "(", "message_type", "=", "message_type", ",", "user_id", "=", "user_id", ",", "group_id", "=", "group_id", ",", "discuss_id", "=", "discuss_id", ",", "message", "=", "message", ",", "auto_escape", "=", "auto_escape", ")" ]
发送消息 (异步版本) ------------ :param str message_type: 消息类型,支持 `private`、`group`、`discuss`,分别对应私聊、群组、讨论组 :param int user_id: 对方 QQ 号(消息类型为 `private` 时需要) :param int group_id: 群号(消息类型为 `group` 时需要) :param int discuss_id: 讨论组 ID(需要从上报消息中获取,消息类型为 `discuss` 时需要) :param str | list[ dict[ str, unknown ] ] message: 要发送的内容 :param bool auto_escape: 消息内容是否作为纯文本发送(即不解析 CQ 码),`message` 数据类型为 `list` 时无效 :return: None :rtype: None
[ "发送消息", "(", "异步版本", ")" ]
python
valid
HPAC/matchpy
matchpy/expressions/functions.py
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/expressions/functions.py#L103-L112
def get_variables(expression, variables=None): """Returns the set of variable names in the given expression.""" if variables is None: variables = set() if hasattr(expression, 'variable_name') and expression.variable_name is not None: variables.add(expression.variable_name) if isinstance(expression, Operation): for operand in op_iter(expression): get_variables(operand, variables) return variables
[ "def", "get_variables", "(", "expression", ",", "variables", "=", "None", ")", ":", "if", "variables", "is", "None", ":", "variables", "=", "set", "(", ")", "if", "hasattr", "(", "expression", ",", "'variable_name'", ")", "and", "expression", ".", "variable_name", "is", "not", "None", ":", "variables", ".", "add", "(", "expression", ".", "variable_name", ")", "if", "isinstance", "(", "expression", ",", "Operation", ")", ":", "for", "operand", "in", "op_iter", "(", "expression", ")", ":", "get_variables", "(", "operand", ",", "variables", ")", "return", "variables" ]
Returns the set of variable names in the given expression.
[ "Returns", "the", "set", "of", "variable", "names", "in", "the", "given", "expression", "." ]
python
train
odlgroup/odl
odl/operator/tensor_ops.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/operator/tensor_ops.py#L1159-L1199
def adjoint(self): """Adjoint of the sampling operator, a `WeightedSumSamplingOperator`. If each sampling point occurs only once, the adjoint consists in inserting the given values into the output at the sampling points. Duplicate sampling points are weighted with their multiplicity. Examples -------- >>> space = odl.uniform_discr([-1, -1], [1, 1], shape=(2, 3)) >>> sampling_points = [[0, 1, 1, 0], ... [0, 1, 2, 0]] >>> op = odl.SamplingOperator(space, sampling_points) >>> x = space.element([[1, 2, 3], ... [4, 5, 6]]) >>> abs(op.adjoint(op(x)).inner(x) - op(x).inner(op(x))) < 1e-10 True The ``'integrate'`` variant adjoint puts ones at the indices in ``sampling_points``, multiplied by their multiplicity: >>> op = odl.SamplingOperator(space, sampling_points, ... variant='integrate') >>> op.adjoint(op.range.one()) # (0, 0) occurs twice uniform_discr([-1., -1.], [ 1., 1.], (2, 3)).element( [[ 2., 0., 0.], [ 0., 1., 1.]] ) >>> abs(op.adjoint(op(x)).inner(x) - op(x).inner(op(x))) < 1e-10 True """ if self.variant == 'point_eval': variant = 'dirac' elif self.variant == 'integrate': variant = 'char_fun' else: raise RuntimeError('bad variant {!r}'.format(self.variant)) return WeightedSumSamplingOperator(self.domain, self.sampling_points, variant)
[ "def", "adjoint", "(", "self", ")", ":", "if", "self", ".", "variant", "==", "'point_eval'", ":", "variant", "=", "'dirac'", "elif", "self", ".", "variant", "==", "'integrate'", ":", "variant", "=", "'char_fun'", "else", ":", "raise", "RuntimeError", "(", "'bad variant {!r}'", ".", "format", "(", "self", ".", "variant", ")", ")", "return", "WeightedSumSamplingOperator", "(", "self", ".", "domain", ",", "self", ".", "sampling_points", ",", "variant", ")" ]
Adjoint of the sampling operator, a `WeightedSumSamplingOperator`. If each sampling point occurs only once, the adjoint consists in inserting the given values into the output at the sampling points. Duplicate sampling points are weighted with their multiplicity. Examples -------- >>> space = odl.uniform_discr([-1, -1], [1, 1], shape=(2, 3)) >>> sampling_points = [[0, 1, 1, 0], ... [0, 1, 2, 0]] >>> op = odl.SamplingOperator(space, sampling_points) >>> x = space.element([[1, 2, 3], ... [4, 5, 6]]) >>> abs(op.adjoint(op(x)).inner(x) - op(x).inner(op(x))) < 1e-10 True The ``'integrate'`` variant adjoint puts ones at the indices in ``sampling_points``, multiplied by their multiplicity: >>> op = odl.SamplingOperator(space, sampling_points, ... variant='integrate') >>> op.adjoint(op.range.one()) # (0, 0) occurs twice uniform_discr([-1., -1.], [ 1., 1.], (2, 3)).element( [[ 2., 0., 0.], [ 0., 1., 1.]] ) >>> abs(op.adjoint(op(x)).inner(x) - op(x).inner(op(x))) < 1e-10 True
[ "Adjoint", "of", "the", "sampling", "operator", "a", "WeightedSumSamplingOperator", "." ]
python
train
RJT1990/pyflux
pyflux/garch/egarchm.py
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/garch/egarchm.py#L250-L307
def _sim_prediction(self, lmda, Y, scores, h, t_params, simulations): """ Simulates a h-step ahead mean prediction Parameters ---------- lmda : np.array The past predicted values Y : np.array The past data scores : np.array The past scores h : int How many steps ahead for the prediction t_params : np.array A vector of (transformed) latent variables simulations : int How many simulations to perform Returns ---------- Matrix of simulations """ sim_vector = np.zeros([simulations,h]) for n in range(0,simulations): # Create arrays to iteratre over lmda_exp = lmda.copy() scores_exp = scores.copy() Y_exp = Y.copy() # Loop over h time periods for t in range(0,h): new_value = t_params[0] if self.p != 0: for j in range(1,self.p+1): new_value += t_params[j]*lmda_exp[-j] if self.q != 0: for k in range(1,self.q+1): new_value += t_params[k+self.p]*scores_exp[-k] if self.leverage is True: new_value += t_params[1+self.p+self.q]*np.sign(-(Y_exp[-1]-t_params[-2]-t_params[-1]*np.exp(lmda_exp[-1]/2.0)))*(scores_exp[-1]+1) lmda_exp = np.append(lmda_exp,[new_value]) # For indexing consistency scores_exp = np.append(scores_exp,scores[np.random.randint(scores.shape[0])]) # expectation of score is zero Y_exp = np.append(Y_exp,Y[np.random.randint(Y.shape[0])]) # bootstrap returns sim_vector[n] = lmda_exp[-h:] return np.transpose(sim_vector)
[ "def", "_sim_prediction", "(", "self", ",", "lmda", ",", "Y", ",", "scores", ",", "h", ",", "t_params", ",", "simulations", ")", ":", "sim_vector", "=", "np", ".", "zeros", "(", "[", "simulations", ",", "h", "]", ")", "for", "n", "in", "range", "(", "0", ",", "simulations", ")", ":", "# Create arrays to iteratre over ", "lmda_exp", "=", "lmda", ".", "copy", "(", ")", "scores_exp", "=", "scores", ".", "copy", "(", ")", "Y_exp", "=", "Y", ".", "copy", "(", ")", "# Loop over h time periods ", "for", "t", "in", "range", "(", "0", ",", "h", ")", ":", "new_value", "=", "t_params", "[", "0", "]", "if", "self", ".", "p", "!=", "0", ":", "for", "j", "in", "range", "(", "1", ",", "self", ".", "p", "+", "1", ")", ":", "new_value", "+=", "t_params", "[", "j", "]", "*", "lmda_exp", "[", "-", "j", "]", "if", "self", ".", "q", "!=", "0", ":", "for", "k", "in", "range", "(", "1", ",", "self", ".", "q", "+", "1", ")", ":", "new_value", "+=", "t_params", "[", "k", "+", "self", ".", "p", "]", "*", "scores_exp", "[", "-", "k", "]", "if", "self", ".", "leverage", "is", "True", ":", "new_value", "+=", "t_params", "[", "1", "+", "self", ".", "p", "+", "self", ".", "q", "]", "*", "np", ".", "sign", "(", "-", "(", "Y_exp", "[", "-", "1", "]", "-", "t_params", "[", "-", "2", "]", "-", "t_params", "[", "-", "1", "]", "*", "np", ".", "exp", "(", "lmda_exp", "[", "-", "1", "]", "/", "2.0", ")", ")", ")", "*", "(", "scores_exp", "[", "-", "1", "]", "+", "1", ")", "lmda_exp", "=", "np", ".", "append", "(", "lmda_exp", ",", "[", "new_value", "]", ")", "# For indexing consistency", "scores_exp", "=", "np", ".", "append", "(", "scores_exp", ",", "scores", "[", "np", ".", "random", ".", "randint", "(", "scores", ".", "shape", "[", "0", "]", ")", "]", ")", "# expectation of score is zero", "Y_exp", "=", "np", ".", "append", "(", "Y_exp", ",", "Y", "[", "np", ".", "random", ".", "randint", "(", "Y", ".", "shape", "[", "0", "]", ")", "]", ")", "# bootstrap returns", "sim_vector", "[", "n", "]", "=", "lmda_exp", "[", "-", "h", ":", "]", "return", "np", ".", "transpose", "(", "sim_vector", ")" ]
Simulates a h-step ahead mean prediction Parameters ---------- lmda : np.array The past predicted values Y : np.array The past data scores : np.array The past scores h : int How many steps ahead for the prediction t_params : np.array A vector of (transformed) latent variables simulations : int How many simulations to perform Returns ---------- Matrix of simulations
[ "Simulates", "a", "h", "-", "step", "ahead", "mean", "prediction" ]
python
train
resonai/ybt
yabt/buildcontext.py
https://github.com/resonai/ybt/blob/5b40df0922ef3383eb85f2b04a26a2db4b81b3fd/yabt/buildcontext.py#L350-L441
def run_in_buildenv( self, buildenv_target_name: str, cmd: list, cmd_env: dict=None, work_dir: str=None, auto_uid: bool=True, runtime: str=None, **kwargs): """Run a command in a named BuildEnv Docker image. :param buildenv_target_name: A named Docker image target in which the command should be run. :param cmd: The command to run, as you'd pass to subprocess.run() :param cmd_env: A dictionary of environment variables for the command. :param work_dir: A different work dir to run in. Either absolute path, or relative to project root. :param auto_uid: Whether to run as the active uid:gid, or as root. :param kwargs: Extra keyword arguments that are passed to the subprocess.run() call that runs the BuildEnv container (for, e.g. timeout arg, stdout/err redirection, etc.) :raises KeyError: If named BuildEnv is not a registered BuildEnv image """ buildenv_target = self.targets[buildenv_target_name] # TODO(itamar): Assert that buildenv_target is up to date redirection = any( stream_key in kwargs for stream_key in ('stdin', 'stdout', 'stderr', 'input')) docker_run = ['docker', 'run'] # if not self.conf.non_interactive: # docker_run.append('-i') if not redirection: docker_run.append('-t') project_vol = (self.conf.docker_volume if self.conf.docker_volume else self.conf.project_root) container_work_dir = PurePath('/project') if work_dir: container_work_dir /= work_dir if runtime: docker_run.extend([ '--runtime', runtime, ]) docker_run.extend([ '--rm', '-v', project_vol + ':/project', # TODO: windows containers? '-w', container_work_dir.as_posix(), ]) if cmd_env: for key, value in cmd_env.items(): # TODO(itamar): escaping docker_run.extend(['-e', '{}={}'.format(key, value)]) if platform.system() == 'Linux' and auto_uid: # Fix permissions for bind-mounted project dir # The fix is not needed when using Docker For Mac / Windows, # because it is somehow taken care of by the sharing mechanics docker_run.extend([ '-u', '{}:{}'.format(os.getuid(), os.getgid()), '-v', '/etc/shadow:/etc/shadow:ro', '-v', '/etc/group:/etc/group:ro', '-v', '/etc/passwd:/etc/passwd:ro', '-v', '/etc/sudoers:/etc/sudoers:ro', ]) docker_run.append(format_qualified_image_name(buildenv_target)) docker_run.extend(cmd) logger.info('Running command in build env "{}" using command {}', buildenv_target_name, docker_run) # TODO: Consider changing the PIPEs to temp files. if 'stderr' not in kwargs: kwargs['stderr'] = PIPE if 'stdout' not in kwargs: kwargs['stdout'] = PIPE result = run(docker_run, check=True, **kwargs) # TODO(Dana): Understand what is the right enconding and remove the # try except if kwargs['stdout'] is PIPE: try: sys.stdout.write(result.stdout.decode('utf-8')) except UnicodeEncodeError as e: sys.stderr.write('tried writing the stdout of {},\n but it ' 'has a problematic character:\n {}\n' 'hex dump of stdout:\n{}\n' .format(docker_run, str(e), codecs.encode( result.stdout, 'hex').decode('utf8'))) if kwargs['stderr'] is PIPE: try: sys.stderr.write(result.stderr.decode('utf-8')) except UnicodeEncodeError as e: sys.stderr.write('tried writing the stderr of {},\n but it ' 'has a problematic character:\n {}\n' 'hex dump of stderr:\n{}\n' .format(docker_run, str(e), codecs.encode( result.stderr, 'hex').decode('utf8'))) return result
[ "def", "run_in_buildenv", "(", "self", ",", "buildenv_target_name", ":", "str", ",", "cmd", ":", "list", ",", "cmd_env", ":", "dict", "=", "None", ",", "work_dir", ":", "str", "=", "None", ",", "auto_uid", ":", "bool", "=", "True", ",", "runtime", ":", "str", "=", "None", ",", "*", "*", "kwargs", ")", ":", "buildenv_target", "=", "self", ".", "targets", "[", "buildenv_target_name", "]", "# TODO(itamar): Assert that buildenv_target is up to date", "redirection", "=", "any", "(", "stream_key", "in", "kwargs", "for", "stream_key", "in", "(", "'stdin'", ",", "'stdout'", ",", "'stderr'", ",", "'input'", ")", ")", "docker_run", "=", "[", "'docker'", ",", "'run'", "]", "# if not self.conf.non_interactive:", "# docker_run.append('-i')", "if", "not", "redirection", ":", "docker_run", ".", "append", "(", "'-t'", ")", "project_vol", "=", "(", "self", ".", "conf", ".", "docker_volume", "if", "self", ".", "conf", ".", "docker_volume", "else", "self", ".", "conf", ".", "project_root", ")", "container_work_dir", "=", "PurePath", "(", "'/project'", ")", "if", "work_dir", ":", "container_work_dir", "/=", "work_dir", "if", "runtime", ":", "docker_run", ".", "extend", "(", "[", "'--runtime'", ",", "runtime", ",", "]", ")", "docker_run", ".", "extend", "(", "[", "'--rm'", ",", "'-v'", ",", "project_vol", "+", "':/project'", ",", "# TODO: windows containers?", "'-w'", ",", "container_work_dir", ".", "as_posix", "(", ")", ",", "]", ")", "if", "cmd_env", ":", "for", "key", ",", "value", "in", "cmd_env", ".", "items", "(", ")", ":", "# TODO(itamar): escaping", "docker_run", ".", "extend", "(", "[", "'-e'", ",", "'{}={}'", ".", "format", "(", "key", ",", "value", ")", "]", ")", "if", "platform", ".", "system", "(", ")", "==", "'Linux'", "and", "auto_uid", ":", "# Fix permissions for bind-mounted project dir", "# The fix is not needed when using Docker For Mac / Windows,", "# because it is somehow taken care of by the sharing mechanics", "docker_run", ".", "extend", "(", "[", "'-u'", ",", "'{}:{}'", ".", "format", "(", "os", ".", "getuid", "(", ")", ",", "os", ".", "getgid", "(", ")", ")", ",", "'-v'", ",", "'/etc/shadow:/etc/shadow:ro'", ",", "'-v'", ",", "'/etc/group:/etc/group:ro'", ",", "'-v'", ",", "'/etc/passwd:/etc/passwd:ro'", ",", "'-v'", ",", "'/etc/sudoers:/etc/sudoers:ro'", ",", "]", ")", "docker_run", ".", "append", "(", "format_qualified_image_name", "(", "buildenv_target", ")", ")", "docker_run", ".", "extend", "(", "cmd", ")", "logger", ".", "info", "(", "'Running command in build env \"{}\" using command {}'", ",", "buildenv_target_name", ",", "docker_run", ")", "# TODO: Consider changing the PIPEs to temp files.", "if", "'stderr'", "not", "in", "kwargs", ":", "kwargs", "[", "'stderr'", "]", "=", "PIPE", "if", "'stdout'", "not", "in", "kwargs", ":", "kwargs", "[", "'stdout'", "]", "=", "PIPE", "result", "=", "run", "(", "docker_run", ",", "check", "=", "True", ",", "*", "*", "kwargs", ")", "# TODO(Dana): Understand what is the right enconding and remove the", "# try except", "if", "kwargs", "[", "'stdout'", "]", "is", "PIPE", ":", "try", ":", "sys", ".", "stdout", ".", "write", "(", "result", ".", "stdout", ".", "decode", "(", "'utf-8'", ")", ")", "except", "UnicodeEncodeError", "as", "e", ":", "sys", ".", "stderr", ".", "write", "(", "'tried writing the stdout of {},\\n but it '", "'has a problematic character:\\n {}\\n'", "'hex dump of stdout:\\n{}\\n'", ".", "format", "(", "docker_run", ",", "str", "(", "e", ")", ",", "codecs", ".", "encode", "(", "result", ".", "stdout", ",", "'hex'", ")", ".", "decode", "(", "'utf8'", ")", ")", ")", "if", "kwargs", "[", "'stderr'", "]", "is", "PIPE", ":", "try", ":", "sys", ".", "stderr", ".", "write", "(", "result", ".", "stderr", ".", "decode", "(", "'utf-8'", ")", ")", "except", "UnicodeEncodeError", "as", "e", ":", "sys", ".", "stderr", ".", "write", "(", "'tried writing the stderr of {},\\n but it '", "'has a problematic character:\\n {}\\n'", "'hex dump of stderr:\\n{}\\n'", ".", "format", "(", "docker_run", ",", "str", "(", "e", ")", ",", "codecs", ".", "encode", "(", "result", ".", "stderr", ",", "'hex'", ")", ".", "decode", "(", "'utf8'", ")", ")", ")", "return", "result" ]
Run a command in a named BuildEnv Docker image. :param buildenv_target_name: A named Docker image target in which the command should be run. :param cmd: The command to run, as you'd pass to subprocess.run() :param cmd_env: A dictionary of environment variables for the command. :param work_dir: A different work dir to run in. Either absolute path, or relative to project root. :param auto_uid: Whether to run as the active uid:gid, or as root. :param kwargs: Extra keyword arguments that are passed to the subprocess.run() call that runs the BuildEnv container (for, e.g. timeout arg, stdout/err redirection, etc.) :raises KeyError: If named BuildEnv is not a registered BuildEnv image
[ "Run", "a", "command", "in", "a", "named", "BuildEnv", "Docker", "image", "." ]
python
train
genialis/resolwe
resolwe/flow/managers/dispatcher.py
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/managers/dispatcher.py#L343-L353
def _marshal_settings(self): """Marshal Django settings into a serializable object. :return: The serialized settings. :rtype: dict """ result = {} for key in dir(settings): if any(map(key.startswith, ['FLOW_', 'RESOLWE_', 'CELERY_'])): result[key] = getattr(settings, key) return result
[ "def", "_marshal_settings", "(", "self", ")", ":", "result", "=", "{", "}", "for", "key", "in", "dir", "(", "settings", ")", ":", "if", "any", "(", "map", "(", "key", ".", "startswith", ",", "[", "'FLOW_'", ",", "'RESOLWE_'", ",", "'CELERY_'", "]", ")", ")", ":", "result", "[", "key", "]", "=", "getattr", "(", "settings", ",", "key", ")", "return", "result" ]
Marshal Django settings into a serializable object. :return: The serialized settings. :rtype: dict
[ "Marshal", "Django", "settings", "into", "a", "serializable", "object", "." ]
python
train
ekmmetering/ekmmeters
ekmmeters.py
https://github.com/ekmmetering/ekmmeters/blob/b3748bdf30263bfa46ea40157bdf8df2522e1904/ekmmeters.py#L3448-L3468
def request(self, send_terminator = False): """ Combined A and B read for V4 meter. Args: send_terminator (bool): Send termination string at end of read. Returns: bool: True on completion. """ try: retA = self.requestA() retB = self.requestB() if retA and retB: self.makeAB() self.calculateFields() self.updateObservers() return True except: ekm_log(traceback.format_exc(sys.exc_info())) return False
[ "def", "request", "(", "self", ",", "send_terminator", "=", "False", ")", ":", "try", ":", "retA", "=", "self", ".", "requestA", "(", ")", "retB", "=", "self", ".", "requestB", "(", ")", "if", "retA", "and", "retB", ":", "self", ".", "makeAB", "(", ")", "self", ".", "calculateFields", "(", ")", "self", ".", "updateObservers", "(", ")", "return", "True", "except", ":", "ekm_log", "(", "traceback", ".", "format_exc", "(", "sys", ".", "exc_info", "(", ")", ")", ")", "return", "False" ]
Combined A and B read for V4 meter. Args: send_terminator (bool): Send termination string at end of read. Returns: bool: True on completion.
[ "Combined", "A", "and", "B", "read", "for", "V4", "meter", "." ]
python
test
Alignak-monitoring/alignak
alignak/objects/satellitelink.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/satellitelink.py#L439-L480
def manages(self, cfg_part): """Tell if the satellite is managing this configuration part The managed configuration is formed as a dictionary indexed on the link instance_id: { u'SchedulerLink_1': { u'hash': u'4d08630a3483e1eac7898e7a721bd5d7768c8320', u'push_flavor': u'4d08630a3483e1eac7898e7a721bd5d7768c8320', u'managed_conf_id': [u'Config_1'] } } Note that the managed configuration is a string array rather than a simple string... no special for this reason, probably due to the serialization when the configuration is pushed :/ :param cfg_part: configuration part as prepare by the Dispatcher :type cfg_part: Conf :return: True if the satellite manages this configuration :rtype: bool """ logger.debug("Do I (%s/%s) manage: %s, my managed configuration(s): %s", self.type, self.name, cfg_part, self.cfg_managed) # If we do not yet manage a configuration if not self.cfg_managed: logger.info("I (%s/%s) do not manage (yet) any configuration!", self.type, self.name) return False # Check in the schedulers list configurations for managed_cfg in list(self.cfg_managed.values()): # If not even the cfg_id in the managed_conf, bail out if managed_cfg['managed_conf_id'] == cfg_part.instance_id \ and managed_cfg['push_flavor'] == cfg_part.push_flavor: logger.debug("I do manage this configuration: %s", cfg_part) break else: logger.warning("I (%s/%s) do not manage this configuration: %s", self.type, self.name, cfg_part) return False return True
[ "def", "manages", "(", "self", ",", "cfg_part", ")", ":", "logger", ".", "debug", "(", "\"Do I (%s/%s) manage: %s, my managed configuration(s): %s\"", ",", "self", ".", "type", ",", "self", ".", "name", ",", "cfg_part", ",", "self", ".", "cfg_managed", ")", "# If we do not yet manage a configuration", "if", "not", "self", ".", "cfg_managed", ":", "logger", ".", "info", "(", "\"I (%s/%s) do not manage (yet) any configuration!\"", ",", "self", ".", "type", ",", "self", ".", "name", ")", "return", "False", "# Check in the schedulers list configurations", "for", "managed_cfg", "in", "list", "(", "self", ".", "cfg_managed", ".", "values", "(", ")", ")", ":", "# If not even the cfg_id in the managed_conf, bail out", "if", "managed_cfg", "[", "'managed_conf_id'", "]", "==", "cfg_part", ".", "instance_id", "and", "managed_cfg", "[", "'push_flavor'", "]", "==", "cfg_part", ".", "push_flavor", ":", "logger", ".", "debug", "(", "\"I do manage this configuration: %s\"", ",", "cfg_part", ")", "break", "else", ":", "logger", ".", "warning", "(", "\"I (%s/%s) do not manage this configuration: %s\"", ",", "self", ".", "type", ",", "self", ".", "name", ",", "cfg_part", ")", "return", "False", "return", "True" ]
Tell if the satellite is managing this configuration part The managed configuration is formed as a dictionary indexed on the link instance_id: { u'SchedulerLink_1': { u'hash': u'4d08630a3483e1eac7898e7a721bd5d7768c8320', u'push_flavor': u'4d08630a3483e1eac7898e7a721bd5d7768c8320', u'managed_conf_id': [u'Config_1'] } } Note that the managed configuration is a string array rather than a simple string... no special for this reason, probably due to the serialization when the configuration is pushed :/ :param cfg_part: configuration part as prepare by the Dispatcher :type cfg_part: Conf :return: True if the satellite manages this configuration :rtype: bool
[ "Tell", "if", "the", "satellite", "is", "managing", "this", "configuration", "part" ]
python
train
saltstack/salt
salt/modules/lxc.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/lxc.py#L1070-L1122
def _get_base(**kwargs): ''' If the needed base does not exist, then create it, if it does exist create nothing and return the name of the base lxc container so it can be cloned. ''' profile = get_container_profile(copy.deepcopy(kwargs.get('profile'))) kw_overrides = copy.deepcopy(kwargs) def select(key, default=None): kw_overrides_match = kw_overrides.pop(key, _marker) profile_match = profile.pop(key, default) # let kwarg overrides be the preferred choice if kw_overrides_match is _marker: return profile_match return kw_overrides_match template = select('template') image = select('image') vgname = select('vgname') path = kwargs.get('path', None) # remove the above three variables from kwargs, if they exist, to avoid # duplicates if create() is invoked below. for param in ('path', 'image', 'vgname', 'template'): kwargs.pop(param, None) if image: proto = _urlparse(image).scheme img_tar = __salt__['cp.cache_file'](image) img_name = os.path.basename(img_tar) hash_ = salt.utils.hashutils.get_hash( img_tar, __salt__['config.get']('hash_type')) name = '__base_{0}_{1}_{2}'.format(proto, img_name, hash_) if not exists(name, path=path): create(name, template=template, image=image, path=path, vgname=vgname, **kwargs) if vgname: rootfs = os.path.join('/dev', vgname, name) edit_conf(info(name, path=path)['config'], out_format='commented', **{'lxc.rootfs': rootfs}) return name elif template: name = '__base_{0}'.format(template) if not exists(name, path=path): create(name, template=template, image=image, path=path, vgname=vgname, **kwargs) if vgname: rootfs = os.path.join('/dev', vgname, name) edit_conf(info(name, path=path)['config'], out_format='commented', **{'lxc.rootfs': rootfs}) return name return ''
[ "def", "_get_base", "(", "*", "*", "kwargs", ")", ":", "profile", "=", "get_container_profile", "(", "copy", ".", "deepcopy", "(", "kwargs", ".", "get", "(", "'profile'", ")", ")", ")", "kw_overrides", "=", "copy", ".", "deepcopy", "(", "kwargs", ")", "def", "select", "(", "key", ",", "default", "=", "None", ")", ":", "kw_overrides_match", "=", "kw_overrides", ".", "pop", "(", "key", ",", "_marker", ")", "profile_match", "=", "profile", ".", "pop", "(", "key", ",", "default", ")", "# let kwarg overrides be the preferred choice", "if", "kw_overrides_match", "is", "_marker", ":", "return", "profile_match", "return", "kw_overrides_match", "template", "=", "select", "(", "'template'", ")", "image", "=", "select", "(", "'image'", ")", "vgname", "=", "select", "(", "'vgname'", ")", "path", "=", "kwargs", ".", "get", "(", "'path'", ",", "None", ")", "# remove the above three variables from kwargs, if they exist, to avoid", "# duplicates if create() is invoked below.", "for", "param", "in", "(", "'path'", ",", "'image'", ",", "'vgname'", ",", "'template'", ")", ":", "kwargs", ".", "pop", "(", "param", ",", "None", ")", "if", "image", ":", "proto", "=", "_urlparse", "(", "image", ")", ".", "scheme", "img_tar", "=", "__salt__", "[", "'cp.cache_file'", "]", "(", "image", ")", "img_name", "=", "os", ".", "path", ".", "basename", "(", "img_tar", ")", "hash_", "=", "salt", ".", "utils", ".", "hashutils", ".", "get_hash", "(", "img_tar", ",", "__salt__", "[", "'config.get'", "]", "(", "'hash_type'", ")", ")", "name", "=", "'__base_{0}_{1}_{2}'", ".", "format", "(", "proto", ",", "img_name", ",", "hash_", ")", "if", "not", "exists", "(", "name", ",", "path", "=", "path", ")", ":", "create", "(", "name", ",", "template", "=", "template", ",", "image", "=", "image", ",", "path", "=", "path", ",", "vgname", "=", "vgname", ",", "*", "*", "kwargs", ")", "if", "vgname", ":", "rootfs", "=", "os", ".", "path", ".", "join", "(", "'/dev'", ",", "vgname", ",", "name", ")", "edit_conf", "(", "info", "(", "name", ",", "path", "=", "path", ")", "[", "'config'", "]", ",", "out_format", "=", "'commented'", ",", "*", "*", "{", "'lxc.rootfs'", ":", "rootfs", "}", ")", "return", "name", "elif", "template", ":", "name", "=", "'__base_{0}'", ".", "format", "(", "template", ")", "if", "not", "exists", "(", "name", ",", "path", "=", "path", ")", ":", "create", "(", "name", ",", "template", "=", "template", ",", "image", "=", "image", ",", "path", "=", "path", ",", "vgname", "=", "vgname", ",", "*", "*", "kwargs", ")", "if", "vgname", ":", "rootfs", "=", "os", ".", "path", ".", "join", "(", "'/dev'", ",", "vgname", ",", "name", ")", "edit_conf", "(", "info", "(", "name", ",", "path", "=", "path", ")", "[", "'config'", "]", ",", "out_format", "=", "'commented'", ",", "*", "*", "{", "'lxc.rootfs'", ":", "rootfs", "}", ")", "return", "name", "return", "''" ]
If the needed base does not exist, then create it, if it does exist create nothing and return the name of the base lxc container so it can be cloned.
[ "If", "the", "needed", "base", "does", "not", "exist", "then", "create", "it", "if", "it", "does", "exist", "create", "nothing", "and", "return", "the", "name", "of", "the", "base", "lxc", "container", "so", "it", "can", "be", "cloned", "." ]
python
train
pycontribs/pyrax
pyrax/object_storage.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/object_storage.py#L1626-L1636
def download(self, directory, structure=True): """ Fetches the object from storage, and writes it to the specified directory. The directory must exist before calling this method. If the object name represents a nested folder structure, such as "foo/bar/baz.txt", that folder structure will be created in the target directory by default. If you do not want the nested folders to be created, pass `structure=False` in the parameters. """ return self.manager.download(self, directory, structure=structure)
[ "def", "download", "(", "self", ",", "directory", ",", "structure", "=", "True", ")", ":", "return", "self", ".", "manager", ".", "download", "(", "self", ",", "directory", ",", "structure", "=", "structure", ")" ]
Fetches the object from storage, and writes it to the specified directory. The directory must exist before calling this method. If the object name represents a nested folder structure, such as "foo/bar/baz.txt", that folder structure will be created in the target directory by default. If you do not want the nested folders to be created, pass `structure=False` in the parameters.
[ "Fetches", "the", "object", "from", "storage", "and", "writes", "it", "to", "the", "specified", "directory", ".", "The", "directory", "must", "exist", "before", "calling", "this", "method", "." ]
python
train
jedie/DragonPy
dragonpy/Dragon32/MC6821_PIA.py
https://github.com/jedie/DragonPy/blob/6659e5b5133aab26979a498ee7453495773a4f6c/dragonpy/Dragon32/MC6821_PIA.py#L346-L367
def read_PIA0_B_data(self, cpu_cycles, op_address, address): """ read from 0xff02 -> PIA 0 B side Data reg. bit 7 | PB7 | keyboard matrix column 8 bit 6 | PB6 | keyboard matrix column 7 / ram size output bit 5 | PB5 | keyboard matrix column 6 bit 4 | PB4 | keyboard matrix column 5 bit 3 | PB3 | keyboard matrix column 4 bit 2 | PB2 | keyboard matrix column 3 bit 1 | PB1 | keyboard matrix column 2 bit 0 | PB0 | keyboard matrix column 1 bits 0-7 also printer data lines """ value = self.pia_0_B_data.value # $ff02 log.debug( "%04x| read $%04x (PIA 0 B side Data reg.) send $%02x (%s) back.\t|%s", op_address, address, value, byte2bit_string(value), self.cfg.mem_info.get_shortest(op_address) ) return value
[ "def", "read_PIA0_B_data", "(", "self", ",", "cpu_cycles", ",", "op_address", ",", "address", ")", ":", "value", "=", "self", ".", "pia_0_B_data", ".", "value", "# $ff02", "log", ".", "debug", "(", "\"%04x| read $%04x (PIA 0 B side Data reg.) send $%02x (%s) back.\\t|%s\"", ",", "op_address", ",", "address", ",", "value", ",", "byte2bit_string", "(", "value", ")", ",", "self", ".", "cfg", ".", "mem_info", ".", "get_shortest", "(", "op_address", ")", ")", "return", "value" ]
read from 0xff02 -> PIA 0 B side Data reg. bit 7 | PB7 | keyboard matrix column 8 bit 6 | PB6 | keyboard matrix column 7 / ram size output bit 5 | PB5 | keyboard matrix column 6 bit 4 | PB4 | keyboard matrix column 5 bit 3 | PB3 | keyboard matrix column 4 bit 2 | PB2 | keyboard matrix column 3 bit 1 | PB1 | keyboard matrix column 2 bit 0 | PB0 | keyboard matrix column 1 bits 0-7 also printer data lines
[ "read", "from", "0xff02", "-", ">", "PIA", "0", "B", "side", "Data", "reg", "." ]
python
train
bukun/TorCMS
torcms/model/user_model.py
https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/model/user_model.py#L241-L250
def delete_by_user_name(user_name): ''' Delete user in the database by `user_name`. ''' try: del_count = TabMember.delete().where(TabMember.user_name == user_name) del_count.execute() return True except: return False
[ "def", "delete_by_user_name", "(", "user_name", ")", ":", "try", ":", "del_count", "=", "TabMember", ".", "delete", "(", ")", ".", "where", "(", "TabMember", ".", "user_name", "==", "user_name", ")", "del_count", ".", "execute", "(", ")", "return", "True", "except", ":", "return", "False" ]
Delete user in the database by `user_name`.
[ "Delete", "user", "in", "the", "database", "by", "user_name", "." ]
python
train
wmayner/pyphi
pyphi/actual.py
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/actual.py#L387-L420
def find_causal_link(self, direction, mechanism, purviews=False, allow_neg=False): """Return the maximally irreducible cause or effect ratio for a mechanism. Args: direction (str): The temporal direction, specifying cause or effect. mechanism (tuple[int]): The mechanism to be tested for irreducibility. Keyword Args: purviews (tuple[int]): Optionally restrict the possible purviews to a subset of the subsystem. This may be useful for _e.g._ finding only concepts that are "about" a certain subset of nodes. Returns: CausalLink: The maximally-irreducible actual cause or effect. """ purviews = self.potential_purviews(direction, mechanism, purviews) # Find the maximal RIA over the remaining purviews. if not purviews: max_ria = _null_ac_ria(self.mechanism_state(direction), direction, mechanism, None) else: # This max should be most positive max_ria = max(self.find_mip(direction, mechanism, purview, allow_neg) for purview in purviews) # Construct the corresponding CausalLink return CausalLink(max_ria)
[ "def", "find_causal_link", "(", "self", ",", "direction", ",", "mechanism", ",", "purviews", "=", "False", ",", "allow_neg", "=", "False", ")", ":", "purviews", "=", "self", ".", "potential_purviews", "(", "direction", ",", "mechanism", ",", "purviews", ")", "# Find the maximal RIA over the remaining purviews.", "if", "not", "purviews", ":", "max_ria", "=", "_null_ac_ria", "(", "self", ".", "mechanism_state", "(", "direction", ")", ",", "direction", ",", "mechanism", ",", "None", ")", "else", ":", "# This max should be most positive", "max_ria", "=", "max", "(", "self", ".", "find_mip", "(", "direction", ",", "mechanism", ",", "purview", ",", "allow_neg", ")", "for", "purview", "in", "purviews", ")", "# Construct the corresponding CausalLink", "return", "CausalLink", "(", "max_ria", ")" ]
Return the maximally irreducible cause or effect ratio for a mechanism. Args: direction (str): The temporal direction, specifying cause or effect. mechanism (tuple[int]): The mechanism to be tested for irreducibility. Keyword Args: purviews (tuple[int]): Optionally restrict the possible purviews to a subset of the subsystem. This may be useful for _e.g._ finding only concepts that are "about" a certain subset of nodes. Returns: CausalLink: The maximally-irreducible actual cause or effect.
[ "Return", "the", "maximally", "irreducible", "cause", "or", "effect", "ratio", "for", "a", "mechanism", "." ]
python
train
halcy/Mastodon.py
mastodon/Mastodon.py
https://github.com/halcy/Mastodon.py/blob/35c43562dd3d34d6ebf7a0f757c09e8fcccc957c/mastodon/Mastodon.py#L2443-L2458
def __datetime_to_epoch(self, date_time): """ Converts a python datetime to unix epoch, accounting for time zones and such. Assumes UTC if timezone is not given. """ date_time_utc = None if date_time.tzinfo is None: date_time_utc = date_time.replace(tzinfo=pytz.utc) else: date_time_utc = date_time.astimezone(pytz.utc) epoch_utc = datetime.datetime.utcfromtimestamp(0).replace(tzinfo=pytz.utc) return (date_time_utc - epoch_utc).total_seconds()
[ "def", "__datetime_to_epoch", "(", "self", ",", "date_time", ")", ":", "date_time_utc", "=", "None", "if", "date_time", ".", "tzinfo", "is", "None", ":", "date_time_utc", "=", "date_time", ".", "replace", "(", "tzinfo", "=", "pytz", ".", "utc", ")", "else", ":", "date_time_utc", "=", "date_time", ".", "astimezone", "(", "pytz", ".", "utc", ")", "epoch_utc", "=", "datetime", ".", "datetime", ".", "utcfromtimestamp", "(", "0", ")", ".", "replace", "(", "tzinfo", "=", "pytz", ".", "utc", ")", "return", "(", "date_time_utc", "-", "epoch_utc", ")", ".", "total_seconds", "(", ")" ]
Converts a python datetime to unix epoch, accounting for time zones and such. Assumes UTC if timezone is not given.
[ "Converts", "a", "python", "datetime", "to", "unix", "epoch", "accounting", "for", "time", "zones", "and", "such", "." ]
python
train
Terrance/SkPy
skpy/conn.py
https://github.com/Terrance/SkPy/blob/0f9489c94e8ec4d3effab4314497428872a80ad1/skpy/conn.py#L312-L327
def writeToken(self): """ Store details of the current connection in the named file. This can be used by :meth:`readToken` to re-authenticate at a later time. """ # Write token file privately. with os.fdopen(os.open(self.tokenFile, os.O_WRONLY | os.O_CREAT, 0o600), "w") as f: # When opening files via os, truncation must be done manually. f.truncate() f.write(self.userId + "\n") f.write(self.tokens["skype"] + "\n") f.write(str(int(time.mktime(self.tokenExpiry["skype"].timetuple()))) + "\n") f.write(self.tokens["reg"] + "\n") f.write(str(int(time.mktime(self.tokenExpiry["reg"].timetuple()))) + "\n") f.write(self.msgsHost + "\n")
[ "def", "writeToken", "(", "self", ")", ":", "# Write token file privately.", "with", "os", ".", "fdopen", "(", "os", ".", "open", "(", "self", ".", "tokenFile", ",", "os", ".", "O_WRONLY", "|", "os", ".", "O_CREAT", ",", "0o600", ")", ",", "\"w\"", ")", "as", "f", ":", "# When opening files via os, truncation must be done manually.", "f", ".", "truncate", "(", ")", "f", ".", "write", "(", "self", ".", "userId", "+", "\"\\n\"", ")", "f", ".", "write", "(", "self", ".", "tokens", "[", "\"skype\"", "]", "+", "\"\\n\"", ")", "f", ".", "write", "(", "str", "(", "int", "(", "time", ".", "mktime", "(", "self", ".", "tokenExpiry", "[", "\"skype\"", "]", ".", "timetuple", "(", ")", ")", ")", ")", "+", "\"\\n\"", ")", "f", ".", "write", "(", "self", ".", "tokens", "[", "\"reg\"", "]", "+", "\"\\n\"", ")", "f", ".", "write", "(", "str", "(", "int", "(", "time", ".", "mktime", "(", "self", ".", "tokenExpiry", "[", "\"reg\"", "]", ".", "timetuple", "(", ")", ")", ")", ")", "+", "\"\\n\"", ")", "f", ".", "write", "(", "self", ".", "msgsHost", "+", "\"\\n\"", ")" ]
Store details of the current connection in the named file. This can be used by :meth:`readToken` to re-authenticate at a later time.
[ "Store", "details", "of", "the", "current", "connection", "in", "the", "named", "file", "." ]
python
test
datosgobar/pydatajson
pydatajson/helpers.py
https://github.com/datosgobar/pydatajson/blob/3141082ffbaa295e2deaf6ffbbc5a59f5859960e/pydatajson/helpers.py#L384-L394
def find_ws_name(wb, name): """Busca una hoja en un workbook sin importar mayúsculas/minúsculas.""" if isinstance(wb, string_types): # FIXME: importar o borrar segun corresponda wb = load_workbook(wb, read_only=True, data_only=True) for sheetname in wb.sheetnames: if sheetname.lower() == name.lower(): return sheetname raise Exception("No existe la hoja {}".format(name))
[ "def", "find_ws_name", "(", "wb", ",", "name", ")", ":", "if", "isinstance", "(", "wb", ",", "string_types", ")", ":", "# FIXME: importar o borrar segun corresponda", "wb", "=", "load_workbook", "(", "wb", ",", "read_only", "=", "True", ",", "data_only", "=", "True", ")", "for", "sheetname", "in", "wb", ".", "sheetnames", ":", "if", "sheetname", ".", "lower", "(", ")", "==", "name", ".", "lower", "(", ")", ":", "return", "sheetname", "raise", "Exception", "(", "\"No existe la hoja {}\"", ".", "format", "(", "name", ")", ")" ]
Busca una hoja en un workbook sin importar mayúsculas/minúsculas.
[ "Busca", "una", "hoja", "en", "un", "workbook", "sin", "importar", "mayúsculas", "/", "minúsculas", "." ]
python
train
EventRegistry/event-registry-python
eventregistry/EventForText.py
https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/EventForText.py#L42-L57
def compute(self, text, # text for which to find the most similar event lang = "eng"): # language in which the text is written """ compute the list of most similar events for the given text """ params = { "lang": lang, "text": text, "topClustersCount": self._nrOfEventsToReturn } res = self._er.jsonRequest("/json/getEventForText/enqueueRequest", params) requestId = res["requestId"] for i in range(10): time.sleep(1) # sleep for 1 second to wait for the clustering to perform computation res = self._er.jsonRequest("/json/getEventForText/testRequest", { "requestId": requestId }) if isinstance(res, list) and len(res) > 0: return res return None
[ "def", "compute", "(", "self", ",", "text", ",", "# text for which to find the most similar event", "lang", "=", "\"eng\"", ")", ":", "# language in which the text is written", "params", "=", "{", "\"lang\"", ":", "lang", ",", "\"text\"", ":", "text", ",", "\"topClustersCount\"", ":", "self", ".", "_nrOfEventsToReturn", "}", "res", "=", "self", ".", "_er", ".", "jsonRequest", "(", "\"/json/getEventForText/enqueueRequest\"", ",", "params", ")", "requestId", "=", "res", "[", "\"requestId\"", "]", "for", "i", "in", "range", "(", "10", ")", ":", "time", ".", "sleep", "(", "1", ")", "# sleep for 1 second to wait for the clustering to perform computation", "res", "=", "self", ".", "_er", ".", "jsonRequest", "(", "\"/json/getEventForText/testRequest\"", ",", "{", "\"requestId\"", ":", "requestId", "}", ")", "if", "isinstance", "(", "res", ",", "list", ")", "and", "len", "(", "res", ")", ">", "0", ":", "return", "res", "return", "None" ]
compute the list of most similar events for the given text
[ "compute", "the", "list", "of", "most", "similar", "events", "for", "the", "given", "text" ]
python
train
SiLab-Bonn/basil
basil/HL/GPAC.py
https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/GPAC.py#L830-L837
def get_over_current(self, channel): '''Reading over current status of power channel ''' try: bit = self._ch_map[channel]['GPIOOC']['bit'] except KeyError: raise ValueError('get_over_current() not supported for channel %s' % channel) return not self._get_power_gpio_value(bit)
[ "def", "get_over_current", "(", "self", ",", "channel", ")", ":", "try", ":", "bit", "=", "self", ".", "_ch_map", "[", "channel", "]", "[", "'GPIOOC'", "]", "[", "'bit'", "]", "except", "KeyError", ":", "raise", "ValueError", "(", "'get_over_current() not supported for channel %s'", "%", "channel", ")", "return", "not", "self", ".", "_get_power_gpio_value", "(", "bit", ")" ]
Reading over current status of power channel
[ "Reading", "over", "current", "status", "of", "power", "channel" ]
python
train
eqcorrscan/EQcorrscan
eqcorrscan/utils/catalog_to_dd.py
https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/catalog_to_dd.py#L194-L214
def sfiles_to_event(sfile_list): """ Write an event.dat file from a list of Seisan events :type sfile_list: list :param sfile_list: List of s-files to sort and put into the database :returns: List of tuples of event ID (int) and Sfile name """ event_list = [] sort_list = [(readheader(sfile).origins[0].time, sfile) for sfile in sfile_list] sort_list.sort(key=lambda tup: tup[0]) sfile_list = [sfile[1] for sfile in sort_list] catalog = Catalog() for i, sfile in enumerate(sfile_list): event_list.append((i, sfile)) catalog.append(readheader(sfile)) # Hand off to sister function write_event(catalog) return event_list
[ "def", "sfiles_to_event", "(", "sfile_list", ")", ":", "event_list", "=", "[", "]", "sort_list", "=", "[", "(", "readheader", "(", "sfile", ")", ".", "origins", "[", "0", "]", ".", "time", ",", "sfile", ")", "for", "sfile", "in", "sfile_list", "]", "sort_list", ".", "sort", "(", "key", "=", "lambda", "tup", ":", "tup", "[", "0", "]", ")", "sfile_list", "=", "[", "sfile", "[", "1", "]", "for", "sfile", "in", "sort_list", "]", "catalog", "=", "Catalog", "(", ")", "for", "i", ",", "sfile", "in", "enumerate", "(", "sfile_list", ")", ":", "event_list", ".", "append", "(", "(", "i", ",", "sfile", ")", ")", "catalog", ".", "append", "(", "readheader", "(", "sfile", ")", ")", "# Hand off to sister function", "write_event", "(", "catalog", ")", "return", "event_list" ]
Write an event.dat file from a list of Seisan events :type sfile_list: list :param sfile_list: List of s-files to sort and put into the database :returns: List of tuples of event ID (int) and Sfile name
[ "Write", "an", "event", ".", "dat", "file", "from", "a", "list", "of", "Seisan", "events" ]
python
train