repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
openpermissions/chub
chub/handlers.py
https://github.com/openpermissions/chub/blob/00762aa17015f4b3010673d1570c708eab3c34ed/chub/handlers.py#L91-L101
def parse_response(response): """ parse response and return a dictionary if the content type. is json/application. :param response: HTTPRequest :return dictionary for json content type otherwise response body """ if response.headers.get('Content-Type', JSON_TYPE).startswith(JSON_TYPE): return ResponseObject(json.loads(response.body)) else: return response.body
[ "def", "parse_response", "(", "response", ")", ":", "if", "response", ".", "headers", ".", "get", "(", "'Content-Type'", ",", "JSON_TYPE", ")", ".", "startswith", "(", "JSON_TYPE", ")", ":", "return", "ResponseObject", "(", "json", ".", "loads", "(", "response", ".", "body", ")", ")", "else", ":", "return", "response", ".", "body" ]
parse response and return a dictionary if the content type. is json/application. :param response: HTTPRequest :return dictionary for json content type otherwise response body
[ "parse", "response", "and", "return", "a", "dictionary", "if", "the", "content", "type", ".", "is", "json", "/", "application", ".", ":", "param", "response", ":", "HTTPRequest", ":", "return", "dictionary", "for", "json", "content", "type", "otherwise", "response", "body" ]
python
train
adamcharnock/django-hordak
hordak/models/core.py
https://github.com/adamcharnock/django-hordak/blob/0ffcad1d3b388b860c8c47fde12aa40df213066f/hordak/models/core.py#L574-L600
def create_transaction(self, to_account): """Create a transaction for this statement amount and account, into to_account This will also set this StatementLine's ``transaction`` attribute to the newly created transaction. Args: to_account (Account): The account the transaction is into / out of. Returns: Transaction: The newly created (and committed) transaction. """ from_account = self.statement_import.bank_account transaction = Transaction.objects.create() Leg.objects.create( transaction=transaction, account=from_account, amount=+(self.amount * -1) ) Leg.objects.create(transaction=transaction, account=to_account, amount=-(self.amount * -1)) transaction.date = self.date transaction.save() self.transaction = transaction self.save() return transaction
[ "def", "create_transaction", "(", "self", ",", "to_account", ")", ":", "from_account", "=", "self", ".", "statement_import", ".", "bank_account", "transaction", "=", "Transaction", ".", "objects", ".", "create", "(", ")", "Leg", ".", "objects", ".", "create", "(", "transaction", "=", "transaction", ",", "account", "=", "from_account", ",", "amount", "=", "+", "(", "self", ".", "amount", "*", "-", "1", ")", ")", "Leg", ".", "objects", ".", "create", "(", "transaction", "=", "transaction", ",", "account", "=", "to_account", ",", "amount", "=", "-", "(", "self", ".", "amount", "*", "-", "1", ")", ")", "transaction", ".", "date", "=", "self", ".", "date", "transaction", ".", "save", "(", ")", "self", ".", "transaction", "=", "transaction", "self", ".", "save", "(", ")", "return", "transaction" ]
Create a transaction for this statement amount and account, into to_account This will also set this StatementLine's ``transaction`` attribute to the newly created transaction. Args: to_account (Account): The account the transaction is into / out of. Returns: Transaction: The newly created (and committed) transaction.
[ "Create", "a", "transaction", "for", "this", "statement", "amount", "and", "account", "into", "to_account" ]
python
train
aaugustin/websockets
src/websockets/extensions/permessage_deflate.py
https://github.com/aaugustin/websockets/blob/17b3f47549b6f752a1be07fa1ba3037cb59c7d56/src/websockets/extensions/permessage_deflate.py#L313-L323
def get_request_params(self) -> List[ExtensionParameter]: """ Build request parameters. """ return _build_parameters( self.server_no_context_takeover, self.client_no_context_takeover, self.server_max_window_bits, self.client_max_window_bits, )
[ "def", "get_request_params", "(", "self", ")", "->", "List", "[", "ExtensionParameter", "]", ":", "return", "_build_parameters", "(", "self", ".", "server_no_context_takeover", ",", "self", ".", "client_no_context_takeover", ",", "self", ".", "server_max_window_bits", ",", "self", ".", "client_max_window_bits", ",", ")" ]
Build request parameters.
[ "Build", "request", "parameters", "." ]
python
train
user-cont/colin
colin/core/colin.py
https://github.com/user-cont/colin/blob/00bb80e6e91522e15361935f813e8cf13d7e76dc/colin/core/colin.py#L135-L164
def _set_logging( logger_name="colin", level=logging.INFO, handler_class=logging.StreamHandler, handler_kwargs=None, format='%(asctime)s.%(msecs).03d %(filename)-17s %(levelname)-6s %(message)s', date_format='%H:%M:%S'): """ Set personal logger for this library. :param logger_name: str, name of the logger :param level: int, see logging.{DEBUG,INFO,ERROR,...}: level of logger and handler :param handler_class: logging.Handler instance, default is StreamHandler (/dev/stderr) :param handler_kwargs: dict, keyword arguments to handler's constructor :param format: str, formatting style :param date_format: str, date style in the logs """ if level != logging.NOTSET: logger = logging.getLogger(logger_name) logger.setLevel(level) # do not readd handlers if they are already present if not [x for x in logger.handlers if isinstance(x, handler_class)]: handler_kwargs = handler_kwargs or {} handler = handler_class(**handler_kwargs) handler.setLevel(level) formatter = logging.Formatter(format, date_format) handler.setFormatter(formatter) logger.addHandler(handler)
[ "def", "_set_logging", "(", "logger_name", "=", "\"colin\"", ",", "level", "=", "logging", ".", "INFO", ",", "handler_class", "=", "logging", ".", "StreamHandler", ",", "handler_kwargs", "=", "None", ",", "format", "=", "'%(asctime)s.%(msecs).03d %(filename)-17s %(levelname)-6s %(message)s'", ",", "date_format", "=", "'%H:%M:%S'", ")", ":", "if", "level", "!=", "logging", ".", "NOTSET", ":", "logger", "=", "logging", ".", "getLogger", "(", "logger_name", ")", "logger", ".", "setLevel", "(", "level", ")", "# do not readd handlers if they are already present", "if", "not", "[", "x", "for", "x", "in", "logger", ".", "handlers", "if", "isinstance", "(", "x", ",", "handler_class", ")", "]", ":", "handler_kwargs", "=", "handler_kwargs", "or", "{", "}", "handler", "=", "handler_class", "(", "*", "*", "handler_kwargs", ")", "handler", ".", "setLevel", "(", "level", ")", "formatter", "=", "logging", ".", "Formatter", "(", "format", ",", "date_format", ")", "handler", ".", "setFormatter", "(", "formatter", ")", "logger", ".", "addHandler", "(", "handler", ")" ]
Set personal logger for this library. :param logger_name: str, name of the logger :param level: int, see logging.{DEBUG,INFO,ERROR,...}: level of logger and handler :param handler_class: logging.Handler instance, default is StreamHandler (/dev/stderr) :param handler_kwargs: dict, keyword arguments to handler's constructor :param format: str, formatting style :param date_format: str, date style in the logs
[ "Set", "personal", "logger", "for", "this", "library", "." ]
python
train
reingart/pyafipws
wslum.py
https://github.com/reingart/pyafipws/blob/ee87cfe4ac12285ab431df5fec257f103042d1ab/wslum.py#L239-L251
def AgregarUbicacionTambo(self, latitud, longitud, domicilio, cod_localidad, cod_provincia, codigo_postal, nombre_partido_depto, **kwargs): "Agrego los datos del productor a la liq." ubic_tambo = {'latitud': latitud, 'longitud': longitud, 'domicilio': domicilio, 'codLocalidad': cod_localidad, 'codProvincia': cod_provincia, 'nombrePartidoDepto': nombre_partido_depto, 'codigoPostal': codigo_postal} self.solicitud['tambo']['ubicacionTambo'] = ubic_tambo return True
[ "def", "AgregarUbicacionTambo", "(", "self", ",", "latitud", ",", "longitud", ",", "domicilio", ",", "cod_localidad", ",", "cod_provincia", ",", "codigo_postal", ",", "nombre_partido_depto", ",", "*", "*", "kwargs", ")", ":", "ubic_tambo", "=", "{", "'latitud'", ":", "latitud", ",", "'longitud'", ":", "longitud", ",", "'domicilio'", ":", "domicilio", ",", "'codLocalidad'", ":", "cod_localidad", ",", "'codProvincia'", ":", "cod_provincia", ",", "'nombrePartidoDepto'", ":", "nombre_partido_depto", ",", "'codigoPostal'", ":", "codigo_postal", "}", "self", ".", "solicitud", "[", "'tambo'", "]", "[", "'ubicacionTambo'", "]", "=", "ubic_tambo", "return", "True" ]
Agrego los datos del productor a la liq.
[ "Agrego", "los", "datos", "del", "productor", "a", "la", "liq", "." ]
python
train
openspending/babbage
babbage/validation.py
https://github.com/openspending/babbage/blob/9e03efe62e0be0cceabafd4de2a09cb8ec794b92/babbage/validation.py#L24-L39
def check_valid_hierarchies(instance): """ Additional check for the hierarchies model, to ensure that levels given are pointing to actual dimensions """ hierarchies = instance.get('hierarchies', {}).values() dimensions = set(instance.get('dimensions', {}).keys()) all_levels = set() for hierarcy in hierarchies: levels = set(hierarcy.get('levels', [])) if len(all_levels.intersection(levels)) > 0: # Dimension appears in two different hierarchies return False all_levels = all_levels.union(levels) if not dimensions.issuperset(levels): # Level which is not in a dimension return False return True
[ "def", "check_valid_hierarchies", "(", "instance", ")", ":", "hierarchies", "=", "instance", ".", "get", "(", "'hierarchies'", ",", "{", "}", ")", ".", "values", "(", ")", "dimensions", "=", "set", "(", "instance", ".", "get", "(", "'dimensions'", ",", "{", "}", ")", ".", "keys", "(", ")", ")", "all_levels", "=", "set", "(", ")", "for", "hierarcy", "in", "hierarchies", ":", "levels", "=", "set", "(", "hierarcy", ".", "get", "(", "'levels'", ",", "[", "]", ")", ")", "if", "len", "(", "all_levels", ".", "intersection", "(", "levels", ")", ")", ">", "0", ":", "# Dimension appears in two different hierarchies", "return", "False", "all_levels", "=", "all_levels", ".", "union", "(", "levels", ")", "if", "not", "dimensions", ".", "issuperset", "(", "levels", ")", ":", "# Level which is not in a dimension", "return", "False", "return", "True" ]
Additional check for the hierarchies model, to ensure that levels given are pointing to actual dimensions
[ "Additional", "check", "for", "the", "hierarchies", "model", "to", "ensure", "that", "levels", "given", "are", "pointing", "to", "actual", "dimensions" ]
python
train
openvax/varcode
varcode/variant_collection.py
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/variant_collection.py#L108-L122
def effects(self, raise_on_error=True): """ Parameters ---------- raise_on_error : bool, optional If exception is raised while determining effect of variant on a transcript, should it be raised? This default is True, meaning errors result in raised exceptions, otherwise they are only logged. """ return EffectCollection([ effect for variant in self for effect in variant.effects(raise_on_error=raise_on_error) ])
[ "def", "effects", "(", "self", ",", "raise_on_error", "=", "True", ")", ":", "return", "EffectCollection", "(", "[", "effect", "for", "variant", "in", "self", "for", "effect", "in", "variant", ".", "effects", "(", "raise_on_error", "=", "raise_on_error", ")", "]", ")" ]
Parameters ---------- raise_on_error : bool, optional If exception is raised while determining effect of variant on a transcript, should it be raised? This default is True, meaning errors result in raised exceptions, otherwise they are only logged.
[ "Parameters", "----------", "raise_on_error", ":", "bool", "optional", "If", "exception", "is", "raised", "while", "determining", "effect", "of", "variant", "on", "a", "transcript", "should", "it", "be", "raised?", "This", "default", "is", "True", "meaning", "errors", "result", "in", "raised", "exceptions", "otherwise", "they", "are", "only", "logged", "." ]
python
train
juju/juju-bundlelib
jujubundlelib/pyutils.py
https://github.com/juju/juju-bundlelib/blob/c2efa614f53675ed9526027776448bfbb0454ca6/jujubundlelib/pyutils.py#L11-L23
def string_class(cls): """Define __unicode__ and __str__ methods on the given class in Python 2. The given class must define a __str__ method returning a unicode string, otherwise a TypeError is raised. Under Python 3, the class is returned as is. """ if not PY3: if '__str__' not in cls.__dict__: raise TypeError('the given class has no __str__ method') cls.__unicode__, cls.__string__ = ( cls.__str__, lambda self: self.__unicode__().encode('utf-8')) return cls
[ "def", "string_class", "(", "cls", ")", ":", "if", "not", "PY3", ":", "if", "'__str__'", "not", "in", "cls", ".", "__dict__", ":", "raise", "TypeError", "(", "'the given class has no __str__ method'", ")", "cls", ".", "__unicode__", ",", "cls", ".", "__string__", "=", "(", "cls", ".", "__str__", ",", "lambda", "self", ":", "self", ".", "__unicode__", "(", ")", ".", "encode", "(", "'utf-8'", ")", ")", "return", "cls" ]
Define __unicode__ and __str__ methods on the given class in Python 2. The given class must define a __str__ method returning a unicode string, otherwise a TypeError is raised. Under Python 3, the class is returned as is.
[ "Define", "__unicode__", "and", "__str__", "methods", "on", "the", "given", "class", "in", "Python", "2", "." ]
python
train
SheffieldML/GPy
GPy/util/datasets.py
https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/util/datasets.py#L96-L112
def data_available(dataset_name=None): """Check if the data set is available on the local machine already.""" try: from itertools import zip_longest except ImportError: from itertools import izip_longest as zip_longest dr = data_resources[dataset_name] zip_urls = (dr['files'], ) if 'save_names' in dr: zip_urls += (dr['save_names'], ) else: zip_urls += ([],) for file_list, save_list in zip_longest(*zip_urls, fillvalue=[]): for f, s in zip_longest(file_list, save_list, fillvalue=None): if s is not None: f=s # If there is a save_name given, use that one if not os.path.exists(os.path.join(data_path, dataset_name, f)): return False return True
[ "def", "data_available", "(", "dataset_name", "=", "None", ")", ":", "try", ":", "from", "itertools", "import", "zip_longest", "except", "ImportError", ":", "from", "itertools", "import", "izip_longest", "as", "zip_longest", "dr", "=", "data_resources", "[", "dataset_name", "]", "zip_urls", "=", "(", "dr", "[", "'files'", "]", ",", ")", "if", "'save_names'", "in", "dr", ":", "zip_urls", "+=", "(", "dr", "[", "'save_names'", "]", ",", ")", "else", ":", "zip_urls", "+=", "(", "[", "]", ",", ")", "for", "file_list", ",", "save_list", "in", "zip_longest", "(", "*", "zip_urls", ",", "fillvalue", "=", "[", "]", ")", ":", "for", "f", ",", "s", "in", "zip_longest", "(", "file_list", ",", "save_list", ",", "fillvalue", "=", "None", ")", ":", "if", "s", "is", "not", "None", ":", "f", "=", "s", "# If there is a save_name given, use that one", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "data_path", ",", "dataset_name", ",", "f", ")", ")", ":", "return", "False", "return", "True" ]
Check if the data set is available on the local machine already.
[ "Check", "if", "the", "data", "set", "is", "available", "on", "the", "local", "machine", "already", "." ]
python
train
OpenTreeOfLife/peyotl
peyotl/api/taxomachine.py
https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/api/taxomachine.py#L205-L228
def autocomplete(self, name, context_name=None, include_dubious=False): """Takes a name and optional context_name returns a list of matches. Each match is a dict with: 'higher' boolean DEF??? 'exact' boolean for exact match 'ottId' int 'name' name (or uniqname???) for the taxon in OTT 'nodeId' int ID of not in the taxomachine db. probably not of use to anyone... """ if context_name and context_name not in self.valid_contexts: raise ValueError('"{}" is not a valid context name'.format(context_name)) if self.use_v1: uri = '{p}/autocompleteBoxQuery'.format(p=self.prefix) data = {'queryString': name} if context_name: data['contextName'] = context_name else: uri = '{p}/autocomplete_name'.format(p=self.prefix) data = {'name': name} if context_name: data['context_name'] = context_name if include_dubious: data['include_dubious'] = True return self.json_http_post(uri, data=anyjson.dumps(data))
[ "def", "autocomplete", "(", "self", ",", "name", ",", "context_name", "=", "None", ",", "include_dubious", "=", "False", ")", ":", "if", "context_name", "and", "context_name", "not", "in", "self", ".", "valid_contexts", ":", "raise", "ValueError", "(", "'\"{}\" is not a valid context name'", ".", "format", "(", "context_name", ")", ")", "if", "self", ".", "use_v1", ":", "uri", "=", "'{p}/autocompleteBoxQuery'", ".", "format", "(", "p", "=", "self", ".", "prefix", ")", "data", "=", "{", "'queryString'", ":", "name", "}", "if", "context_name", ":", "data", "[", "'contextName'", "]", "=", "context_name", "else", ":", "uri", "=", "'{p}/autocomplete_name'", ".", "format", "(", "p", "=", "self", ".", "prefix", ")", "data", "=", "{", "'name'", ":", "name", "}", "if", "context_name", ":", "data", "[", "'context_name'", "]", "=", "context_name", "if", "include_dubious", ":", "data", "[", "'include_dubious'", "]", "=", "True", "return", "self", ".", "json_http_post", "(", "uri", ",", "data", "=", "anyjson", ".", "dumps", "(", "data", ")", ")" ]
Takes a name and optional context_name returns a list of matches. Each match is a dict with: 'higher' boolean DEF??? 'exact' boolean for exact match 'ottId' int 'name' name (or uniqname???) for the taxon in OTT 'nodeId' int ID of not in the taxomachine db. probably not of use to anyone...
[ "Takes", "a", "name", "and", "optional", "context_name", "returns", "a", "list", "of", "matches", ".", "Each", "match", "is", "a", "dict", "with", ":", "higher", "boolean", "DEF???", "exact", "boolean", "for", "exact", "match", "ottId", "int", "name", "name", "(", "or", "uniqname???", ")", "for", "the", "taxon", "in", "OTT", "nodeId", "int", "ID", "of", "not", "in", "the", "taxomachine", "db", ".", "probably", "not", "of", "use", "to", "anyone", "..." ]
python
train
daler/metaseq
metaseq/results_table.py
https://github.com/daler/metaseq/blob/fa875d1f72317aa7ef95cb128b739956b16eef9f/metaseq/results_table.py#L131-L147
def reindex_to(self, x, attribute="Name"): """ Returns a copy that only has rows corresponding to feature names in x. Parameters ---------- x : str or pybedtools.BedTool BED, GFF, GTF, or VCF where the "Name" field (that is, the value returned by feature['Name']) or any arbitrary attribute attribute : str Attribute containing the name of the feature to use as the index. """ names = [i[attribute] for i in x] new = self.copy() new.data = new.data.reindex(names) return new
[ "def", "reindex_to", "(", "self", ",", "x", ",", "attribute", "=", "\"Name\"", ")", ":", "names", "=", "[", "i", "[", "attribute", "]", "for", "i", "in", "x", "]", "new", "=", "self", ".", "copy", "(", ")", "new", ".", "data", "=", "new", ".", "data", ".", "reindex", "(", "names", ")", "return", "new" ]
Returns a copy that only has rows corresponding to feature names in x. Parameters ---------- x : str or pybedtools.BedTool BED, GFF, GTF, or VCF where the "Name" field (that is, the value returned by feature['Name']) or any arbitrary attribute attribute : str Attribute containing the name of the feature to use as the index.
[ "Returns", "a", "copy", "that", "only", "has", "rows", "corresponding", "to", "feature", "names", "in", "x", "." ]
python
train
RedFantom/ttkwidgets
ttkwidgets/frames/balloon.py
https://github.com/RedFantom/ttkwidgets/blob/02150322060f867b6e59a175522ef84b09168019/ttkwidgets/frames/balloon.py#L66-L70
def _grid_widgets(self): """Place the widgets in the Toplevel.""" self._canvas.grid(sticky="nswe") self.header_label.grid(row=1, column=1, sticky="nswe", pady=5, padx=5) self.text_label.grid(row=3, column=1, sticky="nswe", pady=6, padx=5)
[ "def", "_grid_widgets", "(", "self", ")", ":", "self", ".", "_canvas", ".", "grid", "(", "sticky", "=", "\"nswe\"", ")", "self", ".", "header_label", ".", "grid", "(", "row", "=", "1", ",", "column", "=", "1", ",", "sticky", "=", "\"nswe\"", ",", "pady", "=", "5", ",", "padx", "=", "5", ")", "self", ".", "text_label", ".", "grid", "(", "row", "=", "3", ",", "column", "=", "1", ",", "sticky", "=", "\"nswe\"", ",", "pady", "=", "6", ",", "padx", "=", "5", ")" ]
Place the widgets in the Toplevel.
[ "Place", "the", "widgets", "in", "the", "Toplevel", "." ]
python
train
CSchoel/nolds
nolds/measures.py
https://github.com/CSchoel/nolds/blob/8a5ecc472d67ac08b571bd68967287668ca9058e/nolds/measures.py#L839-L874
def logmid_n(max_n, ratio=1/4.0, nsteps=15): """ Creates an array of integers that lie evenly spaced in the "middle" of the logarithmic scale from 0 to log(max_n). If max_n is very small and/or nsteps is very large, this may lead to duplicate values which will be removed from the output. This function has benefits in hurst_rs, because it cuts away both very small and very large n, which both can cause problems, and still produces a logarithmically spaced sequence. Args: max_n (int): largest possible output value (should be the sequence length when used in hurst_rs) Kwargs: ratio (float): width of the "middle" of the logarithmic interval relative to log(max_n). For example, for ratio=1/2.0 the logarithm of the resulting values will lie between 0.25 * log(max_n) and 0.75 * log(max_n). nsteps (float): (maximum) number of values to take from the specified range Returns: array of int: a logarithmically spaced sequence of at most nsteps values (may be less, because only unique values are returned) """ l = np.log(max_n) span = l * ratio start = l * (1 - ratio) * 0.5 midrange = start + 1.0*np.arange(nsteps)/nsteps*span nvals = np.round(np.exp(midrange)).astype("int32") return np.unique(nvals)
[ "def", "logmid_n", "(", "max_n", ",", "ratio", "=", "1", "/", "4.0", ",", "nsteps", "=", "15", ")", ":", "l", "=", "np", ".", "log", "(", "max_n", ")", "span", "=", "l", "*", "ratio", "start", "=", "l", "*", "(", "1", "-", "ratio", ")", "*", "0.5", "midrange", "=", "start", "+", "1.0", "*", "np", ".", "arange", "(", "nsteps", ")", "/", "nsteps", "*", "span", "nvals", "=", "np", ".", "round", "(", "np", ".", "exp", "(", "midrange", ")", ")", ".", "astype", "(", "\"int32\"", ")", "return", "np", ".", "unique", "(", "nvals", ")" ]
Creates an array of integers that lie evenly spaced in the "middle" of the logarithmic scale from 0 to log(max_n). If max_n is very small and/or nsteps is very large, this may lead to duplicate values which will be removed from the output. This function has benefits in hurst_rs, because it cuts away both very small and very large n, which both can cause problems, and still produces a logarithmically spaced sequence. Args: max_n (int): largest possible output value (should be the sequence length when used in hurst_rs) Kwargs: ratio (float): width of the "middle" of the logarithmic interval relative to log(max_n). For example, for ratio=1/2.0 the logarithm of the resulting values will lie between 0.25 * log(max_n) and 0.75 * log(max_n). nsteps (float): (maximum) number of values to take from the specified range Returns: array of int: a logarithmically spaced sequence of at most nsteps values (may be less, because only unique values are returned)
[ "Creates", "an", "array", "of", "integers", "that", "lie", "evenly", "spaced", "in", "the", "middle", "of", "the", "logarithmic", "scale", "from", "0", "to", "log", "(", "max_n", ")", "." ]
python
train
flowroute/txjason
txjason/service.py
https://github.com/flowroute/txjason/blob/4865bd716847dcbab99acc69daa0c44ae3cc5b89/txjason/service.py#L397-L410
def _get_params(self, rdata): """ Returns a list of jsonrpc request's method parameters. """ if 'params' in rdata: if isinstance(rdata['params'], dict) \ or isinstance(rdata['params'], list) \ or rdata['params'] is None: return rdata['params'] else: # wrong type raise InvalidRequestError else: return None
[ "def", "_get_params", "(", "self", ",", "rdata", ")", ":", "if", "'params'", "in", "rdata", ":", "if", "isinstance", "(", "rdata", "[", "'params'", "]", ",", "dict", ")", "or", "isinstance", "(", "rdata", "[", "'params'", "]", ",", "list", ")", "or", "rdata", "[", "'params'", "]", "is", "None", ":", "return", "rdata", "[", "'params'", "]", "else", ":", "# wrong type", "raise", "InvalidRequestError", "else", ":", "return", "None" ]
Returns a list of jsonrpc request's method parameters.
[ "Returns", "a", "list", "of", "jsonrpc", "request", "s", "method", "parameters", "." ]
python
train
riga/tfdeploy
tfdeploy.py
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L2224-L2229
def AvgPool3D(a, k, strides, padding): """ Average 3D pooling op. """ patches = _pool_patches(a, k, strides, padding.decode("ascii")) return np.average(patches, axis=tuple(range(-len(k), 0))),
[ "def", "AvgPool3D", "(", "a", ",", "k", ",", "strides", ",", "padding", ")", ":", "patches", "=", "_pool_patches", "(", "a", ",", "k", ",", "strides", ",", "padding", ".", "decode", "(", "\"ascii\"", ")", ")", "return", "np", ".", "average", "(", "patches", ",", "axis", "=", "tuple", "(", "range", "(", "-", "len", "(", "k", ")", ",", "0", ")", ")", ")", "," ]
Average 3D pooling op.
[ "Average", "3D", "pooling", "op", "." ]
python
train
inspirehep/harvesting-kit
harvestingkit/inspire_cds_package/from_cds.py
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/inspire_cds_package/from_cds.py#L317-L327
def update_keywords(self): """653 Free Keywords.""" for field in record_get_field_instances(self.record, '653', ind1='1'): subs = field_get_subfields(field) new_subs = [] if 'a' in subs: for val in subs['a']: new_subs.extend([('9', 'author'), ('a', val)]) new_field = create_field(subfields=new_subs, ind1='1') record_replace_field( self.record, '653', new_field, field_position_global=field[4])
[ "def", "update_keywords", "(", "self", ")", ":", "for", "field", "in", "record_get_field_instances", "(", "self", ".", "record", ",", "'653'", ",", "ind1", "=", "'1'", ")", ":", "subs", "=", "field_get_subfields", "(", "field", ")", "new_subs", "=", "[", "]", "if", "'a'", "in", "subs", ":", "for", "val", "in", "subs", "[", "'a'", "]", ":", "new_subs", ".", "extend", "(", "[", "(", "'9'", ",", "'author'", ")", ",", "(", "'a'", ",", "val", ")", "]", ")", "new_field", "=", "create_field", "(", "subfields", "=", "new_subs", ",", "ind1", "=", "'1'", ")", "record_replace_field", "(", "self", ".", "record", ",", "'653'", ",", "new_field", ",", "field_position_global", "=", "field", "[", "4", "]", ")" ]
653 Free Keywords.
[ "653", "Free", "Keywords", "." ]
python
valid
pazz/alot
alot/ui.py
https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/ui.py#L514-L579
def choice(self, message, choices=None, select=None, cancel=None, msg_position='above', choices_to_return=None): """ prompt user to make a choice. :param message: string to display before list of choices :type message: unicode :param choices: dict of possible choices :type choices: dict: keymap->choice (both str) :param choices_to_return: dict of possible choices to return for the choices of the choices of paramter :type choices: dict: keymap->choice key is str and value is any obj) :param select: choice to return if enter/return is hit. Ignored if set to `None`. :type select: str :param cancel: choice to return if escape is hit. Ignored if set to `None`. :type cancel: str :param msg_position: determines if `message` is above or left of the prompt. Must be `above` or `left`. :type msg_position: str :rtype: asyncio.Future """ choices = choices or {'y': 'yes', 'n': 'no'} assert select is None or select in choices.values() assert cancel is None or cancel in choices.values() assert msg_position in ['left', 'above'] fut = asyncio.get_event_loop().create_future() # Create a returned future oldroot = self.mainloop.widget def select_or_cancel(text): """Restore the main screen and invoce the callback (delayed return) with the given text.""" self.mainloop.widget = oldroot self._passall = False fut.set_result(text) # set up widgets msgpart = urwid.Text(message) choicespart = ChoiceWidget(choices, choices_to_return=choices_to_return, callback=select_or_cancel, select=select, cancel=cancel) # build widget if msg_position == 'left': both = urwid.Columns( [ ('fixed', len(message), msgpart), ('weight', 1, choicespart), ], dividechars=1) else: # above both = urwid.Pile([msgpart, choicespart]) att = settings.get_theming_attribute('global', 'prompt') both = urwid.AttrMap(both, att, att) # put promptwidget as overlay on main widget overlay = urwid.Overlay(both, oldroot, ('fixed left', 0), ('fixed right', 0), ('fixed bottom', 1), None) self.mainloop.widget = overlay self._passall = True return fut
[ "def", "choice", "(", "self", ",", "message", ",", "choices", "=", "None", ",", "select", "=", "None", ",", "cancel", "=", "None", ",", "msg_position", "=", "'above'", ",", "choices_to_return", "=", "None", ")", ":", "choices", "=", "choices", "or", "{", "'y'", ":", "'yes'", ",", "'n'", ":", "'no'", "}", "assert", "select", "is", "None", "or", "select", "in", "choices", ".", "values", "(", ")", "assert", "cancel", "is", "None", "or", "cancel", "in", "choices", ".", "values", "(", ")", "assert", "msg_position", "in", "[", "'left'", ",", "'above'", "]", "fut", "=", "asyncio", ".", "get_event_loop", "(", ")", ".", "create_future", "(", ")", "# Create a returned future", "oldroot", "=", "self", ".", "mainloop", ".", "widget", "def", "select_or_cancel", "(", "text", ")", ":", "\"\"\"Restore the main screen and invoce the callback (delayed return)\n with the given text.\"\"\"", "self", ".", "mainloop", ".", "widget", "=", "oldroot", "self", ".", "_passall", "=", "False", "fut", ".", "set_result", "(", "text", ")", "# set up widgets", "msgpart", "=", "urwid", ".", "Text", "(", "message", ")", "choicespart", "=", "ChoiceWidget", "(", "choices", ",", "choices_to_return", "=", "choices_to_return", ",", "callback", "=", "select_or_cancel", ",", "select", "=", "select", ",", "cancel", "=", "cancel", ")", "# build widget", "if", "msg_position", "==", "'left'", ":", "both", "=", "urwid", ".", "Columns", "(", "[", "(", "'fixed'", ",", "len", "(", "message", ")", ",", "msgpart", ")", ",", "(", "'weight'", ",", "1", ",", "choicespart", ")", ",", "]", ",", "dividechars", "=", "1", ")", "else", ":", "# above", "both", "=", "urwid", ".", "Pile", "(", "[", "msgpart", ",", "choicespart", "]", ")", "att", "=", "settings", ".", "get_theming_attribute", "(", "'global'", ",", "'prompt'", ")", "both", "=", "urwid", ".", "AttrMap", "(", "both", ",", "att", ",", "att", ")", "# put promptwidget as overlay on main widget", "overlay", "=", "urwid", ".", "Overlay", "(", "both", ",", "oldroot", ",", "(", "'fixed left'", ",", "0", ")", ",", "(", "'fixed right'", ",", "0", ")", ",", "(", "'fixed bottom'", ",", "1", ")", ",", "None", ")", "self", ".", "mainloop", ".", "widget", "=", "overlay", "self", ".", "_passall", "=", "True", "return", "fut" ]
prompt user to make a choice. :param message: string to display before list of choices :type message: unicode :param choices: dict of possible choices :type choices: dict: keymap->choice (both str) :param choices_to_return: dict of possible choices to return for the choices of the choices of paramter :type choices: dict: keymap->choice key is str and value is any obj) :param select: choice to return if enter/return is hit. Ignored if set to `None`. :type select: str :param cancel: choice to return if escape is hit. Ignored if set to `None`. :type cancel: str :param msg_position: determines if `message` is above or left of the prompt. Must be `above` or `left`. :type msg_position: str :rtype: asyncio.Future
[ "prompt", "user", "to", "make", "a", "choice", "." ]
python
train
open511/open511
open511/utils/schedule.py
https://github.com/open511/open511/blob/3d573f59d7efa06ff1b5419ea5ff4d90a90b3cf8/open511/utils/schedule.py#L58-L64
def next_interval(self, after=None): """Returns the next Period this event is in effect, or None if the event has no remaining periods.""" if after is None: after = timezone.now() after = self.to_timezone(after) return next(self.intervals(range_start=after), None)
[ "def", "next_interval", "(", "self", ",", "after", "=", "None", ")", ":", "if", "after", "is", "None", ":", "after", "=", "timezone", ".", "now", "(", ")", "after", "=", "self", ".", "to_timezone", "(", "after", ")", "return", "next", "(", "self", ".", "intervals", "(", "range_start", "=", "after", ")", ",", "None", ")" ]
Returns the next Period this event is in effect, or None if the event has no remaining periods.
[ "Returns", "the", "next", "Period", "this", "event", "is", "in", "effect", "or", "None", "if", "the", "event", "has", "no", "remaining", "periods", "." ]
python
valid
fitnr/twitter_bot_utils
twitter_bot_utils/args.py
https://github.com/fitnr/twitter_bot_utils/blob/21f35afa5048cd3efa54db8cb87d405f69a78a62/twitter_bot_utils/args.py#L21-L53
def add_default_args(parser, version=None, include=None): ''' Add default arguments to a parser. These are: - config: argument for specifying a configuration file. - user: argument for specifying a user. - dry-run: option for running without side effects. - verbose: option for running verbosely. - quiet: option for running quietly. - version: option for spitting out version information. Args: version (str): version to return on <cli> --version include (Sequence): default arguments to add to cli. Default: (config, user, dry-run, verbose, quiet) ''' include = include or ('config', 'user', 'dry-run', 'verbose', 'quiet') if 'config' in include: parser.add_argument('-c', '--config', dest='config_file', metavar='PATH', default=None, type=str, help='bots config file (json or yaml)') if 'user' in include: parser.add_argument('-u', '--user', dest='screen_name', type=str, help="Twitter screen name") if 'dry-run' in include: parser.add_argument('-n', '--dry-run', action='store_true', help="Don't actually do anything") if 'verbose' in include: parser.add_argument('-v', '--verbose', action='store_true', help="Run talkatively") if 'quiet' in include: parser.add_argument('-q', '--quiet', action='store_true', help="Run quietly") if version: parser.add_argument('-V', '--version', action='version', version="%(prog)s " + version)
[ "def", "add_default_args", "(", "parser", ",", "version", "=", "None", ",", "include", "=", "None", ")", ":", "include", "=", "include", "or", "(", "'config'", ",", "'user'", ",", "'dry-run'", ",", "'verbose'", ",", "'quiet'", ")", "if", "'config'", "in", "include", ":", "parser", ".", "add_argument", "(", "'-c'", ",", "'--config'", ",", "dest", "=", "'config_file'", ",", "metavar", "=", "'PATH'", ",", "default", "=", "None", ",", "type", "=", "str", ",", "help", "=", "'bots config file (json or yaml)'", ")", "if", "'user'", "in", "include", ":", "parser", ".", "add_argument", "(", "'-u'", ",", "'--user'", ",", "dest", "=", "'screen_name'", ",", "type", "=", "str", ",", "help", "=", "\"Twitter screen name\"", ")", "if", "'dry-run'", "in", "include", ":", "parser", ".", "add_argument", "(", "'-n'", ",", "'--dry-run'", ",", "action", "=", "'store_true'", ",", "help", "=", "\"Don't actually do anything\"", ")", "if", "'verbose'", "in", "include", ":", "parser", ".", "add_argument", "(", "'-v'", ",", "'--verbose'", ",", "action", "=", "'store_true'", ",", "help", "=", "\"Run talkatively\"", ")", "if", "'quiet'", "in", "include", ":", "parser", ".", "add_argument", "(", "'-q'", ",", "'--quiet'", ",", "action", "=", "'store_true'", ",", "help", "=", "\"Run quietly\"", ")", "if", "version", ":", "parser", ".", "add_argument", "(", "'-V'", ",", "'--version'", ",", "action", "=", "'version'", ",", "version", "=", "\"%(prog)s \"", "+", "version", ")" ]
Add default arguments to a parser. These are: - config: argument for specifying a configuration file. - user: argument for specifying a user. - dry-run: option for running without side effects. - verbose: option for running verbosely. - quiet: option for running quietly. - version: option for spitting out version information. Args: version (str): version to return on <cli> --version include (Sequence): default arguments to add to cli. Default: (config, user, dry-run, verbose, quiet)
[ "Add", "default", "arguments", "to", "a", "parser", ".", "These", "are", ":", "-", "config", ":", "argument", "for", "specifying", "a", "configuration", "file", ".", "-", "user", ":", "argument", "for", "specifying", "a", "user", ".", "-", "dry", "-", "run", ":", "option", "for", "running", "without", "side", "effects", ".", "-", "verbose", ":", "option", "for", "running", "verbosely", ".", "-", "quiet", ":", "option", "for", "running", "quietly", ".", "-", "version", ":", "option", "for", "spitting", "out", "version", "information", "." ]
python
train
peerplays-network/python-peerplays
peerplays/peerplays.py
https://github.com/peerplays-network/python-peerplays/blob/188f04238e7e21d5f73e9b01099eea44289ef6b7/peerplays/peerplays.py#L874-L899
def sport_delete(self, sport_id="0.0.0", account=None, **kwargs): """ Remove a sport. This needs to be **proposed**. :param str sport_id: Sport ID to identify the Sport to be deleted :param str account: (optional) Account used to verify the operation """ if not account: if "default_account" in config: account = config["default_account"] if not account: raise ValueError("You need to provide an account") account = Account(account) sport = Sport(sport_id) op = operations.Sport_delete( **{ "fee": {"amount": 0, "asset_id": "1.3.0"}, "sport_id": sport["id"], "prefix": self.prefix, } ) return self.finalizeOp(op, account["name"], "active", **kwargs)
[ "def", "sport_delete", "(", "self", ",", "sport_id", "=", "\"0.0.0\"", ",", "account", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "not", "account", ":", "if", "\"default_account\"", "in", "config", ":", "account", "=", "config", "[", "\"default_account\"", "]", "if", "not", "account", ":", "raise", "ValueError", "(", "\"You need to provide an account\"", ")", "account", "=", "Account", "(", "account", ")", "sport", "=", "Sport", "(", "sport_id", ")", "op", "=", "operations", ".", "Sport_delete", "(", "*", "*", "{", "\"fee\"", ":", "{", "\"amount\"", ":", "0", ",", "\"asset_id\"", ":", "\"1.3.0\"", "}", ",", "\"sport_id\"", ":", "sport", "[", "\"id\"", "]", ",", "\"prefix\"", ":", "self", ".", "prefix", ",", "}", ")", "return", "self", ".", "finalizeOp", "(", "op", ",", "account", "[", "\"name\"", "]", ",", "\"active\"", ",", "*", "*", "kwargs", ")" ]
Remove a sport. This needs to be **proposed**. :param str sport_id: Sport ID to identify the Sport to be deleted :param str account: (optional) Account used to verify the operation
[ "Remove", "a", "sport", ".", "This", "needs", "to", "be", "**", "proposed", "**", "." ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_mac_address_table.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_mac_address_table.py#L250-L263
def get_mac_address_table_input_request_type_get_request_mac_address(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_mac_address_table = ET.Element("get_mac_address_table") config = get_mac_address_table input = ET.SubElement(get_mac_address_table, "input") request_type = ET.SubElement(input, "request-type") get_request = ET.SubElement(request_type, "get-request") mac_address = ET.SubElement(get_request, "mac-address") mac_address.text = kwargs.pop('mac_address') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_mac_address_table_input_request_type_get_request_mac_address", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_mac_address_table", "=", "ET", ".", "Element", "(", "\"get_mac_address_table\"", ")", "config", "=", "get_mac_address_table", "input", "=", "ET", ".", "SubElement", "(", "get_mac_address_table", ",", "\"input\"", ")", "request_type", "=", "ET", ".", "SubElement", "(", "input", ",", "\"request-type\"", ")", "get_request", "=", "ET", ".", "SubElement", "(", "request_type", ",", "\"get-request\"", ")", "mac_address", "=", "ET", ".", "SubElement", "(", "get_request", ",", "\"mac-address\"", ")", "mac_address", ".", "text", "=", "kwargs", ".", "pop", "(", "'mac_address'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
rehandalal/buchner
buchner/project-template/manage.py
https://github.com/rehandalal/buchner/blob/dc22a61c493b9d4a74d76e8b42a319aa13e385f3/buchner/project-template/manage.py#L30-L36
def db_create(): """Create the database""" try: migrate_api.version_control(url=db_url, repository=db_repo) db_upgrade() except DatabaseAlreadyControlledError: print 'ERROR: Database is already version controlled.'
[ "def", "db_create", "(", ")", ":", "try", ":", "migrate_api", ".", "version_control", "(", "url", "=", "db_url", ",", "repository", "=", "db_repo", ")", "db_upgrade", "(", ")", "except", "DatabaseAlreadyControlledError", ":", "print", "'ERROR: Database is already version controlled.'" ]
Create the database
[ "Create", "the", "database" ]
python
train
qubell/contrib-python-qubell-client
qubell/api/private/instance.py
https://github.com/qubell/contrib-python-qubell-client/blob/4586ea11d5103c2ff9607d3ed922b5a0991b8845/qubell/api/private/instance.py#L113-L121
def __collect_interfaces_return(interfaces): """Collect new style (44.1+) return values to old-style kv-list""" acc = [] for (interfaceName, interfaceData) in interfaces.items(): signalValues = interfaceData.get("signals", {}) for (signalName, signalValue) in signalValues.items(): pinName = "{0}.{1}".format(interfaceName, signalName) acc.append({'id': pinName, 'value': signalValue}) return acc
[ "def", "__collect_interfaces_return", "(", "interfaces", ")", ":", "acc", "=", "[", "]", "for", "(", "interfaceName", ",", "interfaceData", ")", "in", "interfaces", ".", "items", "(", ")", ":", "signalValues", "=", "interfaceData", ".", "get", "(", "\"signals\"", ",", "{", "}", ")", "for", "(", "signalName", ",", "signalValue", ")", "in", "signalValues", ".", "items", "(", ")", ":", "pinName", "=", "\"{0}.{1}\"", ".", "format", "(", "interfaceName", ",", "signalName", ")", "acc", ".", "append", "(", "{", "'id'", ":", "pinName", ",", "'value'", ":", "signalValue", "}", ")", "return", "acc" ]
Collect new style (44.1+) return values to old-style kv-list
[ "Collect", "new", "style", "(", "44", ".", "1", "+", ")", "return", "values", "to", "old", "-", "style", "kv", "-", "list" ]
python
train
mikedh/trimesh
trimesh/path/polygons.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/path/polygons.py#L376-L398
def random_polygon(segments=8, radius=1.0): """ Generate a random polygon with a maximum number of sides and approximate radius. Parameters --------- segments: int, the maximum number of sides the random polygon will have radius: float, the approximate radius of the polygon desired Returns --------- polygon: shapely.geometry.Polygon object with random exterior, and no interiors. """ angles = np.sort(np.cumsum(np.random.random( segments) * np.pi * 2) % (np.pi * 2)) radii = np.random.random(segments) * radius points = np.column_stack( (np.cos(angles), np.sin(angles))) * radii.reshape((-1, 1)) points = np.vstack((points, points[0])) polygon = Polygon(points).buffer(0.0) if util.is_sequence(polygon): return polygon[0] return polygon
[ "def", "random_polygon", "(", "segments", "=", "8", ",", "radius", "=", "1.0", ")", ":", "angles", "=", "np", ".", "sort", "(", "np", ".", "cumsum", "(", "np", ".", "random", ".", "random", "(", "segments", ")", "*", "np", ".", "pi", "*", "2", ")", "%", "(", "np", ".", "pi", "*", "2", ")", ")", "radii", "=", "np", ".", "random", ".", "random", "(", "segments", ")", "*", "radius", "points", "=", "np", ".", "column_stack", "(", "(", "np", ".", "cos", "(", "angles", ")", ",", "np", ".", "sin", "(", "angles", ")", ")", ")", "*", "radii", ".", "reshape", "(", "(", "-", "1", ",", "1", ")", ")", "points", "=", "np", ".", "vstack", "(", "(", "points", ",", "points", "[", "0", "]", ")", ")", "polygon", "=", "Polygon", "(", "points", ")", ".", "buffer", "(", "0.0", ")", "if", "util", ".", "is_sequence", "(", "polygon", ")", ":", "return", "polygon", "[", "0", "]", "return", "polygon" ]
Generate a random polygon with a maximum number of sides and approximate radius. Parameters --------- segments: int, the maximum number of sides the random polygon will have radius: float, the approximate radius of the polygon desired Returns --------- polygon: shapely.geometry.Polygon object with random exterior, and no interiors.
[ "Generate", "a", "random", "polygon", "with", "a", "maximum", "number", "of", "sides", "and", "approximate", "radius", "." ]
python
train
opendatateam/udata
udata/assets.py
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/assets.py#L27-L36
def register_manifest(app, filename='manifest.json'): '''Register an assets json manifest''' if current_app.config.get('TESTING'): return # Do not spend time here when testing if not has_manifest(app, filename): msg = '{filename} not found for {app}'.format(**locals()) raise ValueError(msg) manifest = _manifests.get(app, {}) manifest.update(load_manifest(app, filename)) _manifests[app] = manifest
[ "def", "register_manifest", "(", "app", ",", "filename", "=", "'manifest.json'", ")", ":", "if", "current_app", ".", "config", ".", "get", "(", "'TESTING'", ")", ":", "return", "# Do not spend time here when testing", "if", "not", "has_manifest", "(", "app", ",", "filename", ")", ":", "msg", "=", "'{filename} not found for {app}'", ".", "format", "(", "*", "*", "locals", "(", ")", ")", "raise", "ValueError", "(", "msg", ")", "manifest", "=", "_manifests", ".", "get", "(", "app", ",", "{", "}", ")", "manifest", ".", "update", "(", "load_manifest", "(", "app", ",", "filename", ")", ")", "_manifests", "[", "app", "]", "=", "manifest" ]
Register an assets json manifest
[ "Register", "an", "assets", "json", "manifest" ]
python
train
tcalmant/ipopo
pelix/remote/beans.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/remote/beans.py#L605-L616
def matches(self, ldap_filter): # type: (Any[str, pelix.ldapfilter.LDAPFilter]) -> bool """ Tests the properties of this EndpointDescription against the given filter :param ldap_filter: A filter :return: True if properties matches the filter """ return pelix.ldapfilter.get_ldap_filter(ldap_filter).matches( self.__properties )
[ "def", "matches", "(", "self", ",", "ldap_filter", ")", ":", "# type: (Any[str, pelix.ldapfilter.LDAPFilter]) -> bool", "return", "pelix", ".", "ldapfilter", ".", "get_ldap_filter", "(", "ldap_filter", ")", ".", "matches", "(", "self", ".", "__properties", ")" ]
Tests the properties of this EndpointDescription against the given filter :param ldap_filter: A filter :return: True if properties matches the filter
[ "Tests", "the", "properties", "of", "this", "EndpointDescription", "against", "the", "given", "filter" ]
python
train
depop/python-flexisettings
flexisettings/__init__.py
https://github.com/depop/python-flexisettings/blob/36d08280ab7c45568fdf206fcdb4cf771d240c6b/flexisettings/__init__.py#L90-L114
def _load_config(initial_namespace=None, defaults=None): # type: (Optional[str], Optional[str]) -> ConfigLoader """ Kwargs: initial_namespace: defaults: """ # load defaults if defaults: config = ConfigLoader() config.update_from_object(defaults) namespace = getattr(config, 'CONFIG_NAMESPACE', initial_namespace) app_config = getattr(config, 'APP_CONFIG', None) # load customised config if app_config: if namespace is None: config.update_from_object(app_config) else: _temp = ConfigLoader() _temp.update_from_object(app_config, lambda key: key.startswith(namespace)) config.update(_temp.namespace(namespace)) return config
[ "def", "_load_config", "(", "initial_namespace", "=", "None", ",", "defaults", "=", "None", ")", ":", "# type: (Optional[str], Optional[str]) -> ConfigLoader", "# load defaults", "if", "defaults", ":", "config", "=", "ConfigLoader", "(", ")", "config", ".", "update_from_object", "(", "defaults", ")", "namespace", "=", "getattr", "(", "config", ",", "'CONFIG_NAMESPACE'", ",", "initial_namespace", ")", "app_config", "=", "getattr", "(", "config", ",", "'APP_CONFIG'", ",", "None", ")", "# load customised config", "if", "app_config", ":", "if", "namespace", "is", "None", ":", "config", ".", "update_from_object", "(", "app_config", ")", "else", ":", "_temp", "=", "ConfigLoader", "(", ")", "_temp", ".", "update_from_object", "(", "app_config", ",", "lambda", "key", ":", "key", ".", "startswith", "(", "namespace", ")", ")", "config", ".", "update", "(", "_temp", ".", "namespace", "(", "namespace", ")", ")", "return", "config" ]
Kwargs: initial_namespace: defaults:
[ "Kwargs", ":", "initial_namespace", ":", "defaults", ":" ]
python
train
BlendedSiteGenerator/Blended
blended/__main__.py
https://github.com/BlendedSiteGenerator/Blended/blob/e5865a8633e461a22c86ef6ee98cdd7051c412ac/blended/__main__.py#L348-L713
def build_files(outdir): """Build the files!""" # Make sure there is actually a configuration file config_file_dir = os.path.join(cwd, "config.py") if not os.path.exists(config_file_dir): sys.exit( "There dosen't seem to be a configuration file. Have you run the init command?") else: sys.path.insert(0, cwd) try: from config import website_name, website_description, website_language, home_page_list except: sys.exit( "ERROR: Some of the crucial configuration values could not be found! Maybe your config.py is too old. Run 'blended init' to fix.") try: from config import website_description_long, website_license, website_url, author_name, author_bio, plugins, minify_css, minify_js, custom_variables except: website_description_long = "" website_license = "" website_url = "" author_name = "" author_bio = "" plugins = [] custom_variables = {} minify_css = False minify_js = False print("WARNING: Some of the optional configuration values could not be found! Maybe your config.py is too old. Run 'blended init' to fix.\n") # Create the build folder build_dir = os.path.join(cwd, outdir) if "." not in outdir and ".." not in outdir and "..." not in outdir and "...." not in outdir and "....." not in outdir: replace_folder(build_dir) # Make sure there is actually a header template file header_file_dir = os.path.join(cwd, "templates", "header.html") if not os.path.exists(header_file_dir): sys.exit( "There dosen't seem to be a header template file. You need one to generate.") # Make sure there is actually a footer template file footer_file_dir = os.path.join(cwd, "templates", "footer.html") if not os.path.exists(footer_file_dir): sys.exit( "There dosen't seem to be a footer template file. You need one to generate.") # Open the header and footer files for reading header_file = open(header_file_dir, "r") footer_file = open(footer_file_dir, "r") # Create the HTML page listing page_list_item_file = os.path.join(cwd, "templates", "page_list_item.html") if not os.path.exists(page_list_item_file): page_list = '<ul class="page-list">\n' for root, dirs, files in os.walk(os.path.join(cwd, "content")): for filename in files: top = os.path.dirname(os.path.join(root, filename)) top2 = top.replace(os.path.join(cwd, "content"), "", 1) if platform != "win32": subfolder = top2.replace("/", "", 1) else: subfolder = top2.replace("\\", "", 1) if subfolder == "": subfolder_link = "" else: subfolder_link = subfolder + "/" file_modified = time.ctime( os.path.getmtime(os.path.join(root, filename))) newFilename = get_html_filename(filename) newFilename2 = get_html_clear_filename(filename) page_list = page_list + '<li class="page-list-item"><a href="' + subfolder_link + newFilename + \ '">' + newFilename2 + '</a><span class="page-list-item-time"> - ' + \ str(file_modified) + '</span></li>\n' page_list = page_list + '</ul>' else: with open(page_list_item_file, 'r') as f: page_list_item = f.read() page_list = "" for root, dirs, files in os.walk(os.path.join(cwd, "content")): dirs[:] = [d for d in dirs if "_" not in d] for filename in files: p_content = convert_text(os.path.join(root, filename)) top = os.path.dirname(os.path.join(root, filename)) top2 = top.replace(os.path.join(cwd, "content"), "", 1) if platform != "win32": subfolder = top2.replace("/", "", 1) else: subfolder = top2.replace("\\", "", 1) if subfolder == "": subfolder_link = "" else: subfolder_link = subfolder + "/" file_modified = time.ctime( os.path.getmtime(os.path.join(root, filename))) file_modified_day = str(datetime.strptime( file_modified, "%a %b %d %H:%M:%S %Y"))[8:10] file_modified_year = str(datetime.strptime( file_modified, "%a %b %d %H:%M:%S %Y"))[:4] file_modified_month = str(datetime.strptime( file_modified, "%a %b %d %H:%M:%S %Y"))[5:7] month_name = calendar.month_name[int(file_modified_month)] newFilename = get_html_filename(filename) newFilename2 = get_html_clear_filename(filename) page_list = page_list + page_list_item.replace("{path}", subfolder_link + newFilename).replace("{name}", newFilename2).replace( "{date}", str(file_modified)).replace("{content}", p_content).replace("{content_short}", p_content[:250] + "...").replace("{day}", file_modified_day).replace("{month}", file_modified_month).replace("{month_name}", month_name).replace("{year}", file_modified_year) if home_page_list == "yes" or home_page_list: # Open the home page file (index.html) for writing home_working_file = open(os.path.join(cwd, outdir, "index.html"), "w") home_working_file.write(header_file.read()) # Make sure there is actually a home page template file home_templ_dir = os.path.join(cwd, "templates", "home_page.html") if os.path.exists(home_templ_dir): home_templ_file = open(home_templ_dir, "r") home_working_file.write(home_templ_file.read()) else: print("\nNo home page template file found. Writing page list to index.html") home_working_file.write(page_list) home_working_file.write(footer_file.read()) home_working_file.close() for root, dirs, files in os.walk(os.path.join(cwd, "content")): dirs[:] = [d for d in dirs if "_" not in d] for filename in files: if not filename.startswith("_"): header_file = open(header_file_dir, "r") footer_file = open(footer_file_dir, "r") newFilename = get_html_filename(filename) top = os.path.dirname(os.path.join(root, filename)) top2 = top.replace(os.path.join(cwd, "content"), "", 1) if platform != "win32": subfolder = top2.replace("/", "", 1) else: subfolder = top2.replace("\\", "", 1) if subfolder == "": currents_working_file = open( os.path.join(cwd, outdir, newFilename), "w") else: create_folder(os.path.join(cwd, outdir, subfolder)) currents_working_file = open(os.path.join( cwd, outdir, subfolder, newFilename), "w") # Write the header currents_working_file.write(header_file.read()) text_cont1 = convert_text(os.path.join(root, filename)) if "+++++" in text_cont1.splitlines()[1]: page_template_file = text_cont1.splitlines()[0] text_cont1 = text_cont1.replace( text_cont1.splitlines()[0], "") text_cont1 = text_cont1.replace( text_cont1.splitlines()[1], "") else: page_template_file = "content_page" # Write the text content into the content template and onto the # build file content_templ_dir = os.path.join( cwd, "templates", page_template_file + ".html") if os.path.exists(content_templ_dir): content_templ_file = open(content_templ_dir, "r") content_templ_file1 = content_templ_file.read() content_templ_file2 = content_templ_file1.replace( "{page_content}", text_cont1) currents_working_file.write(content_templ_file2) else: currents_working_file.write(text_cont1) # Write the footer to the build file currents_working_file.write("\n" + footer_file.read()) # Close the build file currents_working_file.close() # Find all the nav(something) templates in the `templates` folder and # Read their content to the dict navs = {} for file in os.listdir(os.path.join(cwd, "templates")): if "nav" in file: nav_cont = open(os.path.join(cwd, "templates", file), "r") navs[file.replace(".html", "")] = nav_cont.read() nav_cont.close() forbidden_dirs = set(["assets", "templates"]) blended_version_message = "Built with Blended v" + \ str(app_version) build_date = str(datetime.now().date()) build_time = str(datetime.now().time()) build_datetime = str(datetime.now()) # Replace global variables such as site name and language for root, dirs, files in os.walk(os.path.join(cwd, outdir)): dirs[:] = [d for d in dirs if d not in forbidden_dirs] for filename in files: if filename != "config.pyc" and filename != "config.py": newFilename = get_html_clear_filename(filename) page_file = filename.replace(".html", "") page_folder = os.path.basename(os.path.dirname(os.path.join( root, filename))).replace("-", "").replace("_", "").title() page_folder_orig = os.path.basename( os.path.dirname(os.path.join(root, filename))) top = os.path.dirname(os.path.join(root, filename)) top2 = top.replace(os.path.join(cwd, outdir), "", 1) if platform != "win32": subfolder = top2.replace("/", "", 1) else: subfolder = top2.replace("\\", "", 1) if subfolder == "": subfolder_folder = os.path.join(cwd, outdir, filename) else: subfolder_folder = os.path.join( cwd, outdir, subfolder, filename) file_modified = time.ctime( os.path.getmtime(os.path.join(root, filename))) file_modified_day = str(datetime.strptime( file_modified, "%a %b %d %H:%M:%S %Y"))[8:10] file_modified_year = str(datetime.strptime( file_modified, "%a %b %d %H:%M:%S %Y"))[:4] file_modified_month = str(datetime.strptime( file_modified, "%a %b %d %H:%M:%S %Y"))[5:7] month_name = calendar.month_name[int(file_modified_month)] # The Loop! for line in fileinput.input(subfolder_folder, inplace=1): for var in custom_variables: line = line.replace( "{" + var + "}", custom_variables[var]) if len(plugins) != 0: for i in range(len(plugins)): if sys.version_info[0] < 2: main = importlib.import_module(plugins[i]) elif sys.version_info[0] < 3: main = __import__(plugins[i]) content = main.main() line = line.replace( "{" + plugins[i] + "}", content) if "{nav" in line: navname = line.split("{")[1].split("}")[0] line = line.replace( "{" + navname + "}", navs[(line.split("{"))[1].split("}")[0]]) line = line.replace( "{website_description}", website_description) line = line.replace( "{website_description_long}", website_description_long) line = line.replace("{website_license}", website_license) line = line.replace("{website_language}", website_language) line = line.replace("{website_url}", website_url) line = line.replace("{author_name}", author_name) line = line.replace("{author_bio}", author_bio) line = line.replace("{random_number}", str(randint(0, 100000000))) line = line.replace("{build_date}", build_date) line = line.replace("{build_time}", build_time) line = line.replace("{build_datetime}", build_datetime) line = line.replace("{page_list}", page_list) line = line.replace("{page_name}", newFilename) line = line.replace("{page_filename}", page_file) line = line.replace("{page_file}", filename) line = line.replace("{" + filename + "_active}", "active") if page_folder != outdir.title(): line = line.replace("{page_folder}", page_folder) else: line = line.replace("{page_folder}", "") if page_folder_orig != outdir: line = line.replace( "{page_folder_orig}", page_folder_orig) else: line = line.replace("{page_folder_orig}", "") line = line.replace("{page_date}", str(file_modified)) line = line.replace("{page_day}", str(file_modified_day)) line = line.replace("{page_year}", str(file_modified_year)) line = line.replace( "{page_month}", str(file_modified_month)) line = line.replace( "{page_month_name}", str(month_name)) line = line.replace("{blended_version}", str(app_version)) line = line.replace( "{blended_version_message}", blended_version_message) line = line.replace("{website_name}", website_name) top = os.path.join(cwd, outdir) startinglevel = top.count(os.sep) relative_path = "" level = root.count(os.sep) - startinglevel for i in range(level): relative_path = relative_path + "../" line = line.replace("{relative_root}", relative_path) print(line.rstrip('\n')) fileinput.close() # Copy the asset folder to the build folder if os.path.exists(os.path.join(cwd, "templates", "assets")): if os.path.exists(os.path.join(cwd, outdir, "assets")): shutil.rmtree(os.path.join(cwd, outdir, "assets")) shutil.copytree(os.path.join(cwd, "templates", "assets"), os.path.join(cwd, outdir, "assets")) for root, dirs, files in os.walk(os.path.join(cwd, outdir, "assets")): for file in files: if not file.startswith("_"): if (file.endswith(".sass")) or (file.endswith(".scss")): sass_text = open(os.path.join(root, file)).read() text_file = open(os.path.join( root, file[:-4] + "css"), "w") if sass_text != "": text_file.write(sass.compile(string=sass_text)) else: print(file + " is empty! Not compiling Sass.") text_file.close() if file.endswith(".less"): less_text = open(os.path.join(root, file)).read() text_file = open(os.path.join( root, file[:-4] + "css"), "w") if less_text != "": text_file.write(lesscpy.compile(StringIO(less_text))) else: print(file + " is empty! Not compiling Less.") text_file.close() if file.endswith(".styl"): try: styl_text = open(os.path.join(root, file)).read() text_file = open(os.path.join( root, file[:-4] + "css"), "w") if styl_text != "": text_file.write(Stylus().compile(styl_text)) else: print(file + " is empty! Not compiling Styl.") text_file.close() except: print("Not able to build with Stylus! Is it installed?") try: subprocess.call["npm", "install", "-g", "stylus"] except: print("NPM (NodeJS) not working. Is it installed?") if file.endswith(".coffee"): coffee_text = open(os.path.join(root, file)).read() text_file = open(os.path.join(root, file[:-6] + "js"), "w") if coffee_text != "": text_file.write(coffeescript.compile(coffee_text)) else: print(file + " is empty! Not compiling CoffeeScript.") text_file.close() if minify_css: if file.endswith(".css"): css_text = open(os.path.join(root, file)).read() text_file = open(os.path.join(root, file), "w") if css_text != "": text_file.write(cssmin(css_text)) text_file.close() if minify_js: if file.endswith(".js"): js_text = open(os.path.join(root, file)).read() text_file = open(os.path.join(root, file), "w") if js_text != "": text_file.write(jsmin(js_text)) text_file.close()
[ "def", "build_files", "(", "outdir", ")", ":", "# Make sure there is actually a configuration file", "config_file_dir", "=", "os", ".", "path", ".", "join", "(", "cwd", ",", "\"config.py\"", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "config_file_dir", ")", ":", "sys", ".", "exit", "(", "\"There dosen't seem to be a configuration file. Have you run the init command?\"", ")", "else", ":", "sys", ".", "path", ".", "insert", "(", "0", ",", "cwd", ")", "try", ":", "from", "config", "import", "website_name", ",", "website_description", ",", "website_language", ",", "home_page_list", "except", ":", "sys", ".", "exit", "(", "\"ERROR: Some of the crucial configuration values could not be found! Maybe your config.py is too old. Run 'blended init' to fix.\"", ")", "try", ":", "from", "config", "import", "website_description_long", ",", "website_license", ",", "website_url", ",", "author_name", ",", "author_bio", ",", "plugins", ",", "minify_css", ",", "minify_js", ",", "custom_variables", "except", ":", "website_description_long", "=", "\"\"", "website_license", "=", "\"\"", "website_url", "=", "\"\"", "author_name", "=", "\"\"", "author_bio", "=", "\"\"", "plugins", "=", "[", "]", "custom_variables", "=", "{", "}", "minify_css", "=", "False", "minify_js", "=", "False", "print", "(", "\"WARNING: Some of the optional configuration values could not be found! Maybe your config.py is too old. Run 'blended init' to fix.\\n\"", ")", "# Create the build folder", "build_dir", "=", "os", ".", "path", ".", "join", "(", "cwd", ",", "outdir", ")", "if", "\".\"", "not", "in", "outdir", "and", "\"..\"", "not", "in", "outdir", "and", "\"...\"", "not", "in", "outdir", "and", "\"....\"", "not", "in", "outdir", "and", "\".....\"", "not", "in", "outdir", ":", "replace_folder", "(", "build_dir", ")", "# Make sure there is actually a header template file", "header_file_dir", "=", "os", ".", "path", ".", "join", "(", "cwd", ",", "\"templates\"", ",", "\"header.html\"", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "header_file_dir", ")", ":", "sys", ".", "exit", "(", "\"There dosen't seem to be a header template file. You need one to generate.\"", ")", "# Make sure there is actually a footer template file", "footer_file_dir", "=", "os", ".", "path", ".", "join", "(", "cwd", ",", "\"templates\"", ",", "\"footer.html\"", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "footer_file_dir", ")", ":", "sys", ".", "exit", "(", "\"There dosen't seem to be a footer template file. You need one to generate.\"", ")", "# Open the header and footer files for reading", "header_file", "=", "open", "(", "header_file_dir", ",", "\"r\"", ")", "footer_file", "=", "open", "(", "footer_file_dir", ",", "\"r\"", ")", "# Create the HTML page listing", "page_list_item_file", "=", "os", ".", "path", ".", "join", "(", "cwd", ",", "\"templates\"", ",", "\"page_list_item.html\"", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "page_list_item_file", ")", ":", "page_list", "=", "'<ul class=\"page-list\">\\n'", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "os", ".", "path", ".", "join", "(", "cwd", ",", "\"content\"", ")", ")", ":", "for", "filename", "in", "files", ":", "top", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "join", "(", "root", ",", "filename", ")", ")", "top2", "=", "top", ".", "replace", "(", "os", ".", "path", ".", "join", "(", "cwd", ",", "\"content\"", ")", ",", "\"\"", ",", "1", ")", "if", "platform", "!=", "\"win32\"", ":", "subfolder", "=", "top2", ".", "replace", "(", "\"/\"", ",", "\"\"", ",", "1", ")", "else", ":", "subfolder", "=", "top2", ".", "replace", "(", "\"\\\\\"", ",", "\"\"", ",", "1", ")", "if", "subfolder", "==", "\"\"", ":", "subfolder_link", "=", "\"\"", "else", ":", "subfolder_link", "=", "subfolder", "+", "\"/\"", "file_modified", "=", "time", ".", "ctime", "(", "os", ".", "path", ".", "getmtime", "(", "os", ".", "path", ".", "join", "(", "root", ",", "filename", ")", ")", ")", "newFilename", "=", "get_html_filename", "(", "filename", ")", "newFilename2", "=", "get_html_clear_filename", "(", "filename", ")", "page_list", "=", "page_list", "+", "'<li class=\"page-list-item\"><a href=\"'", "+", "subfolder_link", "+", "newFilename", "+", "'\">'", "+", "newFilename2", "+", "'</a><span class=\"page-list-item-time\"> - '", "+", "str", "(", "file_modified", ")", "+", "'</span></li>\\n'", "page_list", "=", "page_list", "+", "'</ul>'", "else", ":", "with", "open", "(", "page_list_item_file", ",", "'r'", ")", "as", "f", ":", "page_list_item", "=", "f", ".", "read", "(", ")", "page_list", "=", "\"\"", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "os", ".", "path", ".", "join", "(", "cwd", ",", "\"content\"", ")", ")", ":", "dirs", "[", ":", "]", "=", "[", "d", "for", "d", "in", "dirs", "if", "\"_\"", "not", "in", "d", "]", "for", "filename", "in", "files", ":", "p_content", "=", "convert_text", "(", "os", ".", "path", ".", "join", "(", "root", ",", "filename", ")", ")", "top", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "join", "(", "root", ",", "filename", ")", ")", "top2", "=", "top", ".", "replace", "(", "os", ".", "path", ".", "join", "(", "cwd", ",", "\"content\"", ")", ",", "\"\"", ",", "1", ")", "if", "platform", "!=", "\"win32\"", ":", "subfolder", "=", "top2", ".", "replace", "(", "\"/\"", ",", "\"\"", ",", "1", ")", "else", ":", "subfolder", "=", "top2", ".", "replace", "(", "\"\\\\\"", ",", "\"\"", ",", "1", ")", "if", "subfolder", "==", "\"\"", ":", "subfolder_link", "=", "\"\"", "else", ":", "subfolder_link", "=", "subfolder", "+", "\"/\"", "file_modified", "=", "time", ".", "ctime", "(", "os", ".", "path", ".", "getmtime", "(", "os", ".", "path", ".", "join", "(", "root", ",", "filename", ")", ")", ")", "file_modified_day", "=", "str", "(", "datetime", ".", "strptime", "(", "file_modified", ",", "\"%a %b %d %H:%M:%S %Y\"", ")", ")", "[", "8", ":", "10", "]", "file_modified_year", "=", "str", "(", "datetime", ".", "strptime", "(", "file_modified", ",", "\"%a %b %d %H:%M:%S %Y\"", ")", ")", "[", ":", "4", "]", "file_modified_month", "=", "str", "(", "datetime", ".", "strptime", "(", "file_modified", ",", "\"%a %b %d %H:%M:%S %Y\"", ")", ")", "[", "5", ":", "7", "]", "month_name", "=", "calendar", ".", "month_name", "[", "int", "(", "file_modified_month", ")", "]", "newFilename", "=", "get_html_filename", "(", "filename", ")", "newFilename2", "=", "get_html_clear_filename", "(", "filename", ")", "page_list", "=", "page_list", "+", "page_list_item", ".", "replace", "(", "\"{path}\"", ",", "subfolder_link", "+", "newFilename", ")", ".", "replace", "(", "\"{name}\"", ",", "newFilename2", ")", ".", "replace", "(", "\"{date}\"", ",", "str", "(", "file_modified", ")", ")", ".", "replace", "(", "\"{content}\"", ",", "p_content", ")", ".", "replace", "(", "\"{content_short}\"", ",", "p_content", "[", ":", "250", "]", "+", "\"...\"", ")", ".", "replace", "(", "\"{day}\"", ",", "file_modified_day", ")", ".", "replace", "(", "\"{month}\"", ",", "file_modified_month", ")", ".", "replace", "(", "\"{month_name}\"", ",", "month_name", ")", ".", "replace", "(", "\"{year}\"", ",", "file_modified_year", ")", "if", "home_page_list", "==", "\"yes\"", "or", "home_page_list", ":", "# Open the home page file (index.html) for writing", "home_working_file", "=", "open", "(", "os", ".", "path", ".", "join", "(", "cwd", ",", "outdir", ",", "\"index.html\"", ")", ",", "\"w\"", ")", "home_working_file", ".", "write", "(", "header_file", ".", "read", "(", ")", ")", "# Make sure there is actually a home page template file", "home_templ_dir", "=", "os", ".", "path", ".", "join", "(", "cwd", ",", "\"templates\"", ",", "\"home_page.html\"", ")", "if", "os", ".", "path", ".", "exists", "(", "home_templ_dir", ")", ":", "home_templ_file", "=", "open", "(", "home_templ_dir", ",", "\"r\"", ")", "home_working_file", ".", "write", "(", "home_templ_file", ".", "read", "(", ")", ")", "else", ":", "print", "(", "\"\\nNo home page template file found. Writing page list to index.html\"", ")", "home_working_file", ".", "write", "(", "page_list", ")", "home_working_file", ".", "write", "(", "footer_file", ".", "read", "(", ")", ")", "home_working_file", ".", "close", "(", ")", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "os", ".", "path", ".", "join", "(", "cwd", ",", "\"content\"", ")", ")", ":", "dirs", "[", ":", "]", "=", "[", "d", "for", "d", "in", "dirs", "if", "\"_\"", "not", "in", "d", "]", "for", "filename", "in", "files", ":", "if", "not", "filename", ".", "startswith", "(", "\"_\"", ")", ":", "header_file", "=", "open", "(", "header_file_dir", ",", "\"r\"", ")", "footer_file", "=", "open", "(", "footer_file_dir", ",", "\"r\"", ")", "newFilename", "=", "get_html_filename", "(", "filename", ")", "top", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "join", "(", "root", ",", "filename", ")", ")", "top2", "=", "top", ".", "replace", "(", "os", ".", "path", ".", "join", "(", "cwd", ",", "\"content\"", ")", ",", "\"\"", ",", "1", ")", "if", "platform", "!=", "\"win32\"", ":", "subfolder", "=", "top2", ".", "replace", "(", "\"/\"", ",", "\"\"", ",", "1", ")", "else", ":", "subfolder", "=", "top2", ".", "replace", "(", "\"\\\\\"", ",", "\"\"", ",", "1", ")", "if", "subfolder", "==", "\"\"", ":", "currents_working_file", "=", "open", "(", "os", ".", "path", ".", "join", "(", "cwd", ",", "outdir", ",", "newFilename", ")", ",", "\"w\"", ")", "else", ":", "create_folder", "(", "os", ".", "path", ".", "join", "(", "cwd", ",", "outdir", ",", "subfolder", ")", ")", "currents_working_file", "=", "open", "(", "os", ".", "path", ".", "join", "(", "cwd", ",", "outdir", ",", "subfolder", ",", "newFilename", ")", ",", "\"w\"", ")", "# Write the header", "currents_working_file", ".", "write", "(", "header_file", ".", "read", "(", ")", ")", "text_cont1", "=", "convert_text", "(", "os", ".", "path", ".", "join", "(", "root", ",", "filename", ")", ")", "if", "\"+++++\"", "in", "text_cont1", ".", "splitlines", "(", ")", "[", "1", "]", ":", "page_template_file", "=", "text_cont1", ".", "splitlines", "(", ")", "[", "0", "]", "text_cont1", "=", "text_cont1", ".", "replace", "(", "text_cont1", ".", "splitlines", "(", ")", "[", "0", "]", ",", "\"\"", ")", "text_cont1", "=", "text_cont1", ".", "replace", "(", "text_cont1", ".", "splitlines", "(", ")", "[", "1", "]", ",", "\"\"", ")", "else", ":", "page_template_file", "=", "\"content_page\"", "# Write the text content into the content template and onto the", "# build file", "content_templ_dir", "=", "os", ".", "path", ".", "join", "(", "cwd", ",", "\"templates\"", ",", "page_template_file", "+", "\".html\"", ")", "if", "os", ".", "path", ".", "exists", "(", "content_templ_dir", ")", ":", "content_templ_file", "=", "open", "(", "content_templ_dir", ",", "\"r\"", ")", "content_templ_file1", "=", "content_templ_file", ".", "read", "(", ")", "content_templ_file2", "=", "content_templ_file1", ".", "replace", "(", "\"{page_content}\"", ",", "text_cont1", ")", "currents_working_file", ".", "write", "(", "content_templ_file2", ")", "else", ":", "currents_working_file", ".", "write", "(", "text_cont1", ")", "# Write the footer to the build file", "currents_working_file", ".", "write", "(", "\"\\n\"", "+", "footer_file", ".", "read", "(", ")", ")", "# Close the build file", "currents_working_file", ".", "close", "(", ")", "# Find all the nav(something) templates in the `templates` folder and", "# Read their content to the dict", "navs", "=", "{", "}", "for", "file", "in", "os", ".", "listdir", "(", "os", ".", "path", ".", "join", "(", "cwd", ",", "\"templates\"", ")", ")", ":", "if", "\"nav\"", "in", "file", ":", "nav_cont", "=", "open", "(", "os", ".", "path", ".", "join", "(", "cwd", ",", "\"templates\"", ",", "file", ")", ",", "\"r\"", ")", "navs", "[", "file", ".", "replace", "(", "\".html\"", ",", "\"\"", ")", "]", "=", "nav_cont", ".", "read", "(", ")", "nav_cont", ".", "close", "(", ")", "forbidden_dirs", "=", "set", "(", "[", "\"assets\"", ",", "\"templates\"", "]", ")", "blended_version_message", "=", "\"Built with Blended v\"", "+", "str", "(", "app_version", ")", "build_date", "=", "str", "(", "datetime", ".", "now", "(", ")", ".", "date", "(", ")", ")", "build_time", "=", "str", "(", "datetime", ".", "now", "(", ")", ".", "time", "(", ")", ")", "build_datetime", "=", "str", "(", "datetime", ".", "now", "(", ")", ")", "# Replace global variables such as site name and language", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "os", ".", "path", ".", "join", "(", "cwd", ",", "outdir", ")", ")", ":", "dirs", "[", ":", "]", "=", "[", "d", "for", "d", "in", "dirs", "if", "d", "not", "in", "forbidden_dirs", "]", "for", "filename", "in", "files", ":", "if", "filename", "!=", "\"config.pyc\"", "and", "filename", "!=", "\"config.py\"", ":", "newFilename", "=", "get_html_clear_filename", "(", "filename", ")", "page_file", "=", "filename", ".", "replace", "(", "\".html\"", ",", "\"\"", ")", "page_folder", "=", "os", ".", "path", ".", "basename", "(", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "join", "(", "root", ",", "filename", ")", ")", ")", ".", "replace", "(", "\"-\"", ",", "\"\"", ")", ".", "replace", "(", "\"_\"", ",", "\"\"", ")", ".", "title", "(", ")", "page_folder_orig", "=", "os", ".", "path", ".", "basename", "(", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "join", "(", "root", ",", "filename", ")", ")", ")", "top", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "join", "(", "root", ",", "filename", ")", ")", "top2", "=", "top", ".", "replace", "(", "os", ".", "path", ".", "join", "(", "cwd", ",", "outdir", ")", ",", "\"\"", ",", "1", ")", "if", "platform", "!=", "\"win32\"", ":", "subfolder", "=", "top2", ".", "replace", "(", "\"/\"", ",", "\"\"", ",", "1", ")", "else", ":", "subfolder", "=", "top2", ".", "replace", "(", "\"\\\\\"", ",", "\"\"", ",", "1", ")", "if", "subfolder", "==", "\"\"", ":", "subfolder_folder", "=", "os", ".", "path", ".", "join", "(", "cwd", ",", "outdir", ",", "filename", ")", "else", ":", "subfolder_folder", "=", "os", ".", "path", ".", "join", "(", "cwd", ",", "outdir", ",", "subfolder", ",", "filename", ")", "file_modified", "=", "time", ".", "ctime", "(", "os", ".", "path", ".", "getmtime", "(", "os", ".", "path", ".", "join", "(", "root", ",", "filename", ")", ")", ")", "file_modified_day", "=", "str", "(", "datetime", ".", "strptime", "(", "file_modified", ",", "\"%a %b %d %H:%M:%S %Y\"", ")", ")", "[", "8", ":", "10", "]", "file_modified_year", "=", "str", "(", "datetime", ".", "strptime", "(", "file_modified", ",", "\"%a %b %d %H:%M:%S %Y\"", ")", ")", "[", ":", "4", "]", "file_modified_month", "=", "str", "(", "datetime", ".", "strptime", "(", "file_modified", ",", "\"%a %b %d %H:%M:%S %Y\"", ")", ")", "[", "5", ":", "7", "]", "month_name", "=", "calendar", ".", "month_name", "[", "int", "(", "file_modified_month", ")", "]", "# The Loop!", "for", "line", "in", "fileinput", ".", "input", "(", "subfolder_folder", ",", "inplace", "=", "1", ")", ":", "for", "var", "in", "custom_variables", ":", "line", "=", "line", ".", "replace", "(", "\"{\"", "+", "var", "+", "\"}\"", ",", "custom_variables", "[", "var", "]", ")", "if", "len", "(", "plugins", ")", "!=", "0", ":", "for", "i", "in", "range", "(", "len", "(", "plugins", ")", ")", ":", "if", "sys", ".", "version_info", "[", "0", "]", "<", "2", ":", "main", "=", "importlib", ".", "import_module", "(", "plugins", "[", "i", "]", ")", "elif", "sys", ".", "version_info", "[", "0", "]", "<", "3", ":", "main", "=", "__import__", "(", "plugins", "[", "i", "]", ")", "content", "=", "main", ".", "main", "(", ")", "line", "=", "line", ".", "replace", "(", "\"{\"", "+", "plugins", "[", "i", "]", "+", "\"}\"", ",", "content", ")", "if", "\"{nav\"", "in", "line", ":", "navname", "=", "line", ".", "split", "(", "\"{\"", ")", "[", "1", "]", ".", "split", "(", "\"}\"", ")", "[", "0", "]", "line", "=", "line", ".", "replace", "(", "\"{\"", "+", "navname", "+", "\"}\"", ",", "navs", "[", "(", "line", ".", "split", "(", "\"{\"", ")", ")", "[", "1", "]", ".", "split", "(", "\"}\"", ")", "[", "0", "]", "]", ")", "line", "=", "line", ".", "replace", "(", "\"{website_description}\"", ",", "website_description", ")", "line", "=", "line", ".", "replace", "(", "\"{website_description_long}\"", ",", "website_description_long", ")", "line", "=", "line", ".", "replace", "(", "\"{website_license}\"", ",", "website_license", ")", "line", "=", "line", ".", "replace", "(", "\"{website_language}\"", ",", "website_language", ")", "line", "=", "line", ".", "replace", "(", "\"{website_url}\"", ",", "website_url", ")", "line", "=", "line", ".", "replace", "(", "\"{author_name}\"", ",", "author_name", ")", "line", "=", "line", ".", "replace", "(", "\"{author_bio}\"", ",", "author_bio", ")", "line", "=", "line", ".", "replace", "(", "\"{random_number}\"", ",", "str", "(", "randint", "(", "0", ",", "100000000", ")", ")", ")", "line", "=", "line", ".", "replace", "(", "\"{build_date}\"", ",", "build_date", ")", "line", "=", "line", ".", "replace", "(", "\"{build_time}\"", ",", "build_time", ")", "line", "=", "line", ".", "replace", "(", "\"{build_datetime}\"", ",", "build_datetime", ")", "line", "=", "line", ".", "replace", "(", "\"{page_list}\"", ",", "page_list", ")", "line", "=", "line", ".", "replace", "(", "\"{page_name}\"", ",", "newFilename", ")", "line", "=", "line", ".", "replace", "(", "\"{page_filename}\"", ",", "page_file", ")", "line", "=", "line", ".", "replace", "(", "\"{page_file}\"", ",", "filename", ")", "line", "=", "line", ".", "replace", "(", "\"{\"", "+", "filename", "+", "\"_active}\"", ",", "\"active\"", ")", "if", "page_folder", "!=", "outdir", ".", "title", "(", ")", ":", "line", "=", "line", ".", "replace", "(", "\"{page_folder}\"", ",", "page_folder", ")", "else", ":", "line", "=", "line", ".", "replace", "(", "\"{page_folder}\"", ",", "\"\"", ")", "if", "page_folder_orig", "!=", "outdir", ":", "line", "=", "line", ".", "replace", "(", "\"{page_folder_orig}\"", ",", "page_folder_orig", ")", "else", ":", "line", "=", "line", ".", "replace", "(", "\"{page_folder_orig}\"", ",", "\"\"", ")", "line", "=", "line", ".", "replace", "(", "\"{page_date}\"", ",", "str", "(", "file_modified", ")", ")", "line", "=", "line", ".", "replace", "(", "\"{page_day}\"", ",", "str", "(", "file_modified_day", ")", ")", "line", "=", "line", ".", "replace", "(", "\"{page_year}\"", ",", "str", "(", "file_modified_year", ")", ")", "line", "=", "line", ".", "replace", "(", "\"{page_month}\"", ",", "str", "(", "file_modified_month", ")", ")", "line", "=", "line", ".", "replace", "(", "\"{page_month_name}\"", ",", "str", "(", "month_name", ")", ")", "line", "=", "line", ".", "replace", "(", "\"{blended_version}\"", ",", "str", "(", "app_version", ")", ")", "line", "=", "line", ".", "replace", "(", "\"{blended_version_message}\"", ",", "blended_version_message", ")", "line", "=", "line", ".", "replace", "(", "\"{website_name}\"", ",", "website_name", ")", "top", "=", "os", ".", "path", ".", "join", "(", "cwd", ",", "outdir", ")", "startinglevel", "=", "top", ".", "count", "(", "os", ".", "sep", ")", "relative_path", "=", "\"\"", "level", "=", "root", ".", "count", "(", "os", ".", "sep", ")", "-", "startinglevel", "for", "i", "in", "range", "(", "level", ")", ":", "relative_path", "=", "relative_path", "+", "\"../\"", "line", "=", "line", ".", "replace", "(", "\"{relative_root}\"", ",", "relative_path", ")", "print", "(", "line", ".", "rstrip", "(", "'\\n'", ")", ")", "fileinput", ".", "close", "(", ")", "# Copy the asset folder to the build folder", "if", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "cwd", ",", "\"templates\"", ",", "\"assets\"", ")", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "cwd", ",", "outdir", ",", "\"assets\"", ")", ")", ":", "shutil", ".", "rmtree", "(", "os", ".", "path", ".", "join", "(", "cwd", ",", "outdir", ",", "\"assets\"", ")", ")", "shutil", ".", "copytree", "(", "os", ".", "path", ".", "join", "(", "cwd", ",", "\"templates\"", ",", "\"assets\"", ")", ",", "os", ".", "path", ".", "join", "(", "cwd", ",", "outdir", ",", "\"assets\"", ")", ")", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "os", ".", "path", ".", "join", "(", "cwd", ",", "outdir", ",", "\"assets\"", ")", ")", ":", "for", "file", "in", "files", ":", "if", "not", "file", ".", "startswith", "(", "\"_\"", ")", ":", "if", "(", "file", ".", "endswith", "(", "\".sass\"", ")", ")", "or", "(", "file", ".", "endswith", "(", "\".scss\"", ")", ")", ":", "sass_text", "=", "open", "(", "os", ".", "path", ".", "join", "(", "root", ",", "file", ")", ")", ".", "read", "(", ")", "text_file", "=", "open", "(", "os", ".", "path", ".", "join", "(", "root", ",", "file", "[", ":", "-", "4", "]", "+", "\"css\"", ")", ",", "\"w\"", ")", "if", "sass_text", "!=", "\"\"", ":", "text_file", ".", "write", "(", "sass", ".", "compile", "(", "string", "=", "sass_text", ")", ")", "else", ":", "print", "(", "file", "+", "\" is empty! Not compiling Sass.\"", ")", "text_file", ".", "close", "(", ")", "if", "file", ".", "endswith", "(", "\".less\"", ")", ":", "less_text", "=", "open", "(", "os", ".", "path", ".", "join", "(", "root", ",", "file", ")", ")", ".", "read", "(", ")", "text_file", "=", "open", "(", "os", ".", "path", ".", "join", "(", "root", ",", "file", "[", ":", "-", "4", "]", "+", "\"css\"", ")", ",", "\"w\"", ")", "if", "less_text", "!=", "\"\"", ":", "text_file", ".", "write", "(", "lesscpy", ".", "compile", "(", "StringIO", "(", "less_text", ")", ")", ")", "else", ":", "print", "(", "file", "+", "\" is empty! Not compiling Less.\"", ")", "text_file", ".", "close", "(", ")", "if", "file", ".", "endswith", "(", "\".styl\"", ")", ":", "try", ":", "styl_text", "=", "open", "(", "os", ".", "path", ".", "join", "(", "root", ",", "file", ")", ")", ".", "read", "(", ")", "text_file", "=", "open", "(", "os", ".", "path", ".", "join", "(", "root", ",", "file", "[", ":", "-", "4", "]", "+", "\"css\"", ")", ",", "\"w\"", ")", "if", "styl_text", "!=", "\"\"", ":", "text_file", ".", "write", "(", "Stylus", "(", ")", ".", "compile", "(", "styl_text", ")", ")", "else", ":", "print", "(", "file", "+", "\" is empty! Not compiling Styl.\"", ")", "text_file", ".", "close", "(", ")", "except", ":", "print", "(", "\"Not able to build with Stylus! Is it installed?\"", ")", "try", ":", "subprocess", ".", "call", "[", "\"npm\"", ",", "\"install\"", ",", "\"-g\"", ",", "\"stylus\"", "]", "except", ":", "print", "(", "\"NPM (NodeJS) not working. Is it installed?\"", ")", "if", "file", ".", "endswith", "(", "\".coffee\"", ")", ":", "coffee_text", "=", "open", "(", "os", ".", "path", ".", "join", "(", "root", ",", "file", ")", ")", ".", "read", "(", ")", "text_file", "=", "open", "(", "os", ".", "path", ".", "join", "(", "root", ",", "file", "[", ":", "-", "6", "]", "+", "\"js\"", ")", ",", "\"w\"", ")", "if", "coffee_text", "!=", "\"\"", ":", "text_file", ".", "write", "(", "coffeescript", ".", "compile", "(", "coffee_text", ")", ")", "else", ":", "print", "(", "file", "+", "\" is empty! Not compiling CoffeeScript.\"", ")", "text_file", ".", "close", "(", ")", "if", "minify_css", ":", "if", "file", ".", "endswith", "(", "\".css\"", ")", ":", "css_text", "=", "open", "(", "os", ".", "path", ".", "join", "(", "root", ",", "file", ")", ")", ".", "read", "(", ")", "text_file", "=", "open", "(", "os", ".", "path", ".", "join", "(", "root", ",", "file", ")", ",", "\"w\"", ")", "if", "css_text", "!=", "\"\"", ":", "text_file", ".", "write", "(", "cssmin", "(", "css_text", ")", ")", "text_file", ".", "close", "(", ")", "if", "minify_js", ":", "if", "file", ".", "endswith", "(", "\".js\"", ")", ":", "js_text", "=", "open", "(", "os", ".", "path", ".", "join", "(", "root", ",", "file", ")", ")", ".", "read", "(", ")", "text_file", "=", "open", "(", "os", ".", "path", ".", "join", "(", "root", ",", "file", ")", ",", "\"w\"", ")", "if", "js_text", "!=", "\"\"", ":", "text_file", ".", "write", "(", "jsmin", "(", "js_text", ")", ")", "text_file", ".", "close", "(", ")" ]
Build the files!
[ "Build", "the", "files!" ]
python
train
StackStorm/pybind
pybind/nos/v6_0_2f/rbridge_id/vrf/address_family/ip/unicast/ip/route/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/rbridge_id/vrf/address_family/ip/unicast/ip/route/__init__.py#L199-L220
def _set_static_route_oif(self, v, load=False): """ Setter method for static_route_oif, mapped from YANG variable /rbridge_id/vrf/address_family/ip/unicast/ip/route/static_route_oif (list) If this variable is read-only (config: false) in the source YANG file, then _set_static_route_oif is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_static_route_oif() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("static_route_dest static_route_oif_type static_route_oif_name",static_route_oif.static_route_oif, yang_name="static-route-oif", rest_name="static-route-oif", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='static-route-dest static-route-oif-type static-route-oif-name', extensions={u'tailf-common': {u'info': u'Route with egress interface', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'cli-drop-node-name': None, u'callpoint': u'rtm-if-static-route'}}), is_container='list', yang_name="static-route-oif", rest_name="static-route-oif", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Route with egress interface', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'cli-drop-node-name': None, u'callpoint': u'rtm-if-static-route'}}, namespace='urn:brocade.com:mgmt:brocade-rtm', defining_module='brocade-rtm', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """static_route_oif must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("static_route_dest static_route_oif_type static_route_oif_name",static_route_oif.static_route_oif, yang_name="static-route-oif", rest_name="static-route-oif", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='static-route-dest static-route-oif-type static-route-oif-name', extensions={u'tailf-common': {u'info': u'Route with egress interface', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'cli-drop-node-name': None, u'callpoint': u'rtm-if-static-route'}}), is_container='list', yang_name="static-route-oif", rest_name="static-route-oif", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Route with egress interface', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'cli-drop-node-name': None, u'callpoint': u'rtm-if-static-route'}}, namespace='urn:brocade.com:mgmt:brocade-rtm', defining_module='brocade-rtm', yang_type='list', is_config=True)""", }) self.__static_route_oif = t if hasattr(self, '_set'): self._set()
[ "def", "_set_static_route_oif", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "YANGListType", "(", "\"static_route_dest static_route_oif_type static_route_oif_name\"", ",", "static_route_oif", ".", "static_route_oif", ",", "yang_name", "=", "\"static-route-oif\"", ",", "rest_name", "=", "\"static-route-oif\"", ",", "parent", "=", "self", ",", "is_container", "=", "'list'", ",", "user_ordered", "=", "False", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "yang_keys", "=", "'static-route-dest static-route-oif-type static-route-oif-name'", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Route with egress interface'", ",", "u'cli-no-key-completion'", ":", "None", ",", "u'cli-suppress-mode'", ":", "None", ",", "u'cli-suppress-list-no'", ":", "None", ",", "u'cli-full-no'", ":", "None", ",", "u'cli-drop-node-name'", ":", "None", ",", "u'callpoint'", ":", "u'rtm-if-static-route'", "}", "}", ")", ",", "is_container", "=", "'list'", ",", "yang_name", "=", "\"static-route-oif\"", ",", "rest_name", "=", "\"static-route-oif\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Route with egress interface'", ",", "u'cli-no-key-completion'", ":", "None", ",", "u'cli-suppress-mode'", ":", "None", ",", "u'cli-suppress-list-no'", ":", "None", ",", "u'cli-full-no'", ":", "None", ",", "u'cli-drop-node-name'", ":", "None", ",", "u'callpoint'", ":", "u'rtm-if-static-route'", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-rtm'", ",", "defining_module", "=", "'brocade-rtm'", ",", "yang_type", "=", "'list'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"static_route_oif must be of a type compatible with list\"\"\"", ",", "'defined-type'", ":", "\"list\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=YANGListType(\"static_route_dest static_route_oif_type static_route_oif_name\",static_route_oif.static_route_oif, yang_name=\"static-route-oif\", rest_name=\"static-route-oif\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='static-route-dest static-route-oif-type static-route-oif-name', extensions={u'tailf-common': {u'info': u'Route with egress interface', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'cli-drop-node-name': None, u'callpoint': u'rtm-if-static-route'}}), is_container='list', yang_name=\"static-route-oif\", rest_name=\"static-route-oif\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Route with egress interface', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'cli-drop-node-name': None, u'callpoint': u'rtm-if-static-route'}}, namespace='urn:brocade.com:mgmt:brocade-rtm', defining_module='brocade-rtm', yang_type='list', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__static_route_oif", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for static_route_oif, mapped from YANG variable /rbridge_id/vrf/address_family/ip/unicast/ip/route/static_route_oif (list) If this variable is read-only (config: false) in the source YANG file, then _set_static_route_oif is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_static_route_oif() directly.
[ "Setter", "method", "for", "static_route_oif", "mapped", "from", "YANG", "variable", "/", "rbridge_id", "/", "vrf", "/", "address_family", "/", "ip", "/", "unicast", "/", "ip", "/", "route", "/", "static_route_oif", "(", "list", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_static_route_oif", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_static_route_oif", "()", "directly", "." ]
python
train
pyblish/pyblish-qml
pyblish_qml/models.py
https://github.com/pyblish/pyblish-qml/blob/6095d18b2ec0afd0409a9b1a17e53b0658887283/pyblish_qml/models.py#L821-L828
def _add_rule(self, group, role, value): """Implementation detail""" if role not in group: group[role] = list() group[role].append(value) self.invalidate()
[ "def", "_add_rule", "(", "self", ",", "group", ",", "role", ",", "value", ")", ":", "if", "role", "not", "in", "group", ":", "group", "[", "role", "]", "=", "list", "(", ")", "group", "[", "role", "]", ".", "append", "(", "value", ")", "self", ".", "invalidate", "(", ")" ]
Implementation detail
[ "Implementation", "detail" ]
python
train
Parsl/parsl
parsl/dataflow/usage_tracking/usage.py
https://github.com/Parsl/parsl/blob/d7afb3bc37f50dcf224ae78637944172edb35dac/parsl/dataflow/usage_tracking/usage.py#L156-L176
def construct_start_message(self): """Collect preliminary run info at the start of the DFK. Returns : - Message dict dumped as json string, ready for UDP """ uname = getpass.getuser().encode('latin1') hashed_username = hashlib.sha256(uname).hexdigest()[0:10] hname = socket.gethostname().encode('latin1') hashed_hostname = hashlib.sha256(hname).hexdigest()[0:10] message = {'uuid': self.uuid, 'uname': hashed_username, 'hname': hashed_hostname, 'test': self.test_mode, 'parsl_v': self.parsl_version, 'python_v': self.python_version, 'os': platform.system(), 'os_v': platform.release(), 'start': time.time()} return json.dumps(message)
[ "def", "construct_start_message", "(", "self", ")", ":", "uname", "=", "getpass", ".", "getuser", "(", ")", ".", "encode", "(", "'latin1'", ")", "hashed_username", "=", "hashlib", ".", "sha256", "(", "uname", ")", ".", "hexdigest", "(", ")", "[", "0", ":", "10", "]", "hname", "=", "socket", ".", "gethostname", "(", ")", ".", "encode", "(", "'latin1'", ")", "hashed_hostname", "=", "hashlib", ".", "sha256", "(", "hname", ")", ".", "hexdigest", "(", ")", "[", "0", ":", "10", "]", "message", "=", "{", "'uuid'", ":", "self", ".", "uuid", ",", "'uname'", ":", "hashed_username", ",", "'hname'", ":", "hashed_hostname", ",", "'test'", ":", "self", ".", "test_mode", ",", "'parsl_v'", ":", "self", ".", "parsl_version", ",", "'python_v'", ":", "self", ".", "python_version", ",", "'os'", ":", "platform", ".", "system", "(", ")", ",", "'os_v'", ":", "platform", ".", "release", "(", ")", ",", "'start'", ":", "time", ".", "time", "(", ")", "}", "return", "json", ".", "dumps", "(", "message", ")" ]
Collect preliminary run info at the start of the DFK. Returns : - Message dict dumped as json string, ready for UDP
[ "Collect", "preliminary", "run", "info", "at", "the", "start", "of", "the", "DFK", "." ]
python
valid
baruwa-enterprise/BaruwaAPI
BaruwaAPI/resource.py
https://github.com/baruwa-enterprise/BaruwaAPI/blob/53335b377ccfd388e42f4f240f181eed72f51180/BaruwaAPI/resource.py#L151-L156
def update_domain(self, domainid, data): """Update a domain""" return self.api_call( ENDPOINTS['domains']['update'], dict(domainid=domainid), body=data)
[ "def", "update_domain", "(", "self", ",", "domainid", ",", "data", ")", ":", "return", "self", ".", "api_call", "(", "ENDPOINTS", "[", "'domains'", "]", "[", "'update'", "]", ",", "dict", "(", "domainid", "=", "domainid", ")", ",", "body", "=", "data", ")" ]
Update a domain
[ "Update", "a", "domain" ]
python
train
cloudera/cm_api
python/src/cm_api/endpoints/clusters.py
https://github.com/cloudera/cm_api/blob/5d2512375bd94684b4da36df9e0d9177865ffcbb/python/src/cm_api/endpoints/clusters.py#L59-L66
def get_all_clusters(resource_root, view=None): """ Get all clusters @param resource_root: The root Resource object. @return: A list of ApiCluster objects. """ return call(resource_root.get, CLUSTERS_PATH, ApiCluster, True, params=view and dict(view=view) or None)
[ "def", "get_all_clusters", "(", "resource_root", ",", "view", "=", "None", ")", ":", "return", "call", "(", "resource_root", ".", "get", ",", "CLUSTERS_PATH", ",", "ApiCluster", ",", "True", ",", "params", "=", "view", "and", "dict", "(", "view", "=", "view", ")", "or", "None", ")" ]
Get all clusters @param resource_root: The root Resource object. @return: A list of ApiCluster objects.
[ "Get", "all", "clusters" ]
python
train
shmir/PyIxExplorer
ixexplorer/ixe_app.py
https://github.com/shmir/PyIxExplorer/blob/d6946b9ce0e8961507cc912062e10c365d4beee2/ixexplorer/ixe_app.py#L187-L195
def stop_transmit(self, *ports): """ Stop traffic on ports. :param ports: list of ports to stop traffic on, if empty start on all ports. """ port_list = self.set_ports_list(*ports) self.api.call_rc('ixStopTransmit {}'.format(port_list)) time.sleep(0.2)
[ "def", "stop_transmit", "(", "self", ",", "*", "ports", ")", ":", "port_list", "=", "self", ".", "set_ports_list", "(", "*", "ports", ")", "self", ".", "api", ".", "call_rc", "(", "'ixStopTransmit {}'", ".", "format", "(", "port_list", ")", ")", "time", ".", "sleep", "(", "0.2", ")" ]
Stop traffic on ports. :param ports: list of ports to stop traffic on, if empty start on all ports.
[ "Stop", "traffic", "on", "ports", "." ]
python
train
saltstack/salt
salt/runners/venafiapi.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/runners/venafiapi.py#L629-L640
def list_domain_cache(): ''' List domains that have been cached CLI Example: .. code-block:: bash salt-run venafi.list_domain_cache ''' cache = salt.cache.Cache(__opts__, syspaths.CACHE_DIR) return cache.list('venafi/domains')
[ "def", "list_domain_cache", "(", ")", ":", "cache", "=", "salt", ".", "cache", ".", "Cache", "(", "__opts__", ",", "syspaths", ".", "CACHE_DIR", ")", "return", "cache", ".", "list", "(", "'venafi/domains'", ")" ]
List domains that have been cached CLI Example: .. code-block:: bash salt-run venafi.list_domain_cache
[ "List", "domains", "that", "have", "been", "cached" ]
python
train
twisted/vertex
vertex/conncache.py
https://github.com/twisted/vertex/blob/feb591aa1b9a3b2b8fdcf53e4962dad2a0bc38ca/vertex/conncache.py#L103-L115
def shutdown(self): """ Disconnect all cached connections. @returns: a deferred that fires once all connection are disconnected. @rtype: L{Deferred} """ self._shuttingDown = {key: Deferred() for key in self.cachedConnections.keys()} return DeferredList( [maybeDeferred(p.transport.loseConnection) for p in self.cachedConnections.values()] + self._shuttingDown.values())
[ "def", "shutdown", "(", "self", ")", ":", "self", ".", "_shuttingDown", "=", "{", "key", ":", "Deferred", "(", ")", "for", "key", "in", "self", ".", "cachedConnections", ".", "keys", "(", ")", "}", "return", "DeferredList", "(", "[", "maybeDeferred", "(", "p", ".", "transport", ".", "loseConnection", ")", "for", "p", "in", "self", ".", "cachedConnections", ".", "values", "(", ")", "]", "+", "self", ".", "_shuttingDown", ".", "values", "(", ")", ")" ]
Disconnect all cached connections. @returns: a deferred that fires once all connection are disconnected. @rtype: L{Deferred}
[ "Disconnect", "all", "cached", "connections", "." ]
python
train
saltstack/salt
salt/cloud/clouds/clc.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/clc.py#L285-L304
def get_build_status(req_id, nodename): ''' get the build status from CLC to make sure we dont return to early ''' counter = 0 req_id = six.text_type(req_id) while counter < 10: queue = clc.v1.Blueprint.GetStatus(request_id=(req_id)) if queue["PercentComplete"] == 100: server_name = queue["Servers"][0] creds = get_creds() clc.v2.SetCredentials(creds["user"], creds["password"]) ip_addresses = clc.v2.Server(server_name).ip_addresses internal_ip_address = ip_addresses[0]["internal"] return internal_ip_address else: counter = counter + 1 log.info('Creating Cloud VM %s Time out in %s minutes', nodename, six.text_type(10 - counter)) time.sleep(60)
[ "def", "get_build_status", "(", "req_id", ",", "nodename", ")", ":", "counter", "=", "0", "req_id", "=", "six", ".", "text_type", "(", "req_id", ")", "while", "counter", "<", "10", ":", "queue", "=", "clc", ".", "v1", ".", "Blueprint", ".", "GetStatus", "(", "request_id", "=", "(", "req_id", ")", ")", "if", "queue", "[", "\"PercentComplete\"", "]", "==", "100", ":", "server_name", "=", "queue", "[", "\"Servers\"", "]", "[", "0", "]", "creds", "=", "get_creds", "(", ")", "clc", ".", "v2", ".", "SetCredentials", "(", "creds", "[", "\"user\"", "]", ",", "creds", "[", "\"password\"", "]", ")", "ip_addresses", "=", "clc", ".", "v2", ".", "Server", "(", "server_name", ")", ".", "ip_addresses", "internal_ip_address", "=", "ip_addresses", "[", "0", "]", "[", "\"internal\"", "]", "return", "internal_ip_address", "else", ":", "counter", "=", "counter", "+", "1", "log", ".", "info", "(", "'Creating Cloud VM %s Time out in %s minutes'", ",", "nodename", ",", "six", ".", "text_type", "(", "10", "-", "counter", ")", ")", "time", ".", "sleep", "(", "60", ")" ]
get the build status from CLC to make sure we dont return to early
[ "get", "the", "build", "status", "from", "CLC", "to", "make", "sure", "we", "dont", "return", "to", "early" ]
python
train
numenta/nupic
src/nupic/data/generators/pattern_machine.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/generators/pattern_machine.py#L115-L135
def numberMapForBits(self, bits): """ Return a map from number to matching on bits, for all numbers that match a set of bits. @param bits (set) Indices of bits @return (dict) Mapping from number => on bits. """ numberMap = dict() for bit in bits: numbers = self.numbersForBit(bit) for number in numbers: if not number in numberMap: numberMap[number] = set() numberMap[number].add(bit) return numberMap
[ "def", "numberMapForBits", "(", "self", ",", "bits", ")", ":", "numberMap", "=", "dict", "(", ")", "for", "bit", "in", "bits", ":", "numbers", "=", "self", ".", "numbersForBit", "(", "bit", ")", "for", "number", "in", "numbers", ":", "if", "not", "number", "in", "numberMap", ":", "numberMap", "[", "number", "]", "=", "set", "(", ")", "numberMap", "[", "number", "]", ".", "add", "(", "bit", ")", "return", "numberMap" ]
Return a map from number to matching on bits, for all numbers that match a set of bits. @param bits (set) Indices of bits @return (dict) Mapping from number => on bits.
[ "Return", "a", "map", "from", "number", "to", "matching", "on", "bits", "for", "all", "numbers", "that", "match", "a", "set", "of", "bits", "." ]
python
valid
mosdef-hub/mbuild
mbuild/coordinate_transform.py
https://github.com/mosdef-hub/mbuild/blob/dcb80a2becd5d0e6a7e3e7bcb1b59793c46a2dd3/mbuild/coordinate_transform.py#L577-L597
def _spin(coordinates, theta, around): """Rotate a set of coordinates in place around an arbitrary vector. Parameters ---------- coordinates : np.ndarray, shape=(n,3), dtype=float The coordinates being spun. theta : float The angle by which to spin the coordinates, in radians. around : np.ndarray, shape=(3,), dtype=float The axis about which to spin the coordinates. """ around = np.asarray(around).reshape(3) if np.array_equal(around, np.zeros(3)): raise ValueError('Cannot spin around a zero vector') center_pos = np.mean(coordinates, axis=0) coordinates -= center_pos coordinates = _rotate(coordinates, theta, around) coordinates += center_pos return coordinates
[ "def", "_spin", "(", "coordinates", ",", "theta", ",", "around", ")", ":", "around", "=", "np", ".", "asarray", "(", "around", ")", ".", "reshape", "(", "3", ")", "if", "np", ".", "array_equal", "(", "around", ",", "np", ".", "zeros", "(", "3", ")", ")", ":", "raise", "ValueError", "(", "'Cannot spin around a zero vector'", ")", "center_pos", "=", "np", ".", "mean", "(", "coordinates", ",", "axis", "=", "0", ")", "coordinates", "-=", "center_pos", "coordinates", "=", "_rotate", "(", "coordinates", ",", "theta", ",", "around", ")", "coordinates", "+=", "center_pos", "return", "coordinates" ]
Rotate a set of coordinates in place around an arbitrary vector. Parameters ---------- coordinates : np.ndarray, shape=(n,3), dtype=float The coordinates being spun. theta : float The angle by which to spin the coordinates, in radians. around : np.ndarray, shape=(3,), dtype=float The axis about which to spin the coordinates.
[ "Rotate", "a", "set", "of", "coordinates", "in", "place", "around", "an", "arbitrary", "vector", "." ]
python
train
spotify/luigi
luigi/contrib/redshift.py
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/redshift.py#L386-L408
def copy(self, cursor, f): """ Defines copying from s3 into redshift. If both key-based and role-based credentials are provided, role-based will be used. """ logger.info("Inserting file: %s", f) colnames = '' if self.columns and len(self.columns) > 0: colnames = ",".join([x[0] for x in self.columns]) colnames = '({})'.format(colnames) cursor.execute(""" COPY {table} {colnames} from '{source}' CREDENTIALS '{creds}' {options} ;""".format( table=self.table, colnames=colnames, source=f, creds=self._credentials(), options=self.copy_options) )
[ "def", "copy", "(", "self", ",", "cursor", ",", "f", ")", ":", "logger", ".", "info", "(", "\"Inserting file: %s\"", ",", "f", ")", "colnames", "=", "''", "if", "self", ".", "columns", "and", "len", "(", "self", ".", "columns", ")", ">", "0", ":", "colnames", "=", "\",\"", ".", "join", "(", "[", "x", "[", "0", "]", "for", "x", "in", "self", ".", "columns", "]", ")", "colnames", "=", "'({})'", ".", "format", "(", "colnames", ")", "cursor", ".", "execute", "(", "\"\"\"\n COPY {table} {colnames} from '{source}'\n CREDENTIALS '{creds}'\n {options}\n ;\"\"\"", ".", "format", "(", "table", "=", "self", ".", "table", ",", "colnames", "=", "colnames", ",", "source", "=", "f", ",", "creds", "=", "self", ".", "_credentials", "(", ")", ",", "options", "=", "self", ".", "copy_options", ")", ")" ]
Defines copying from s3 into redshift. If both key-based and role-based credentials are provided, role-based will be used.
[ "Defines", "copying", "from", "s3", "into", "redshift", "." ]
python
train
incuna/django-wkhtmltopdf
wkhtmltopdf/views.py
https://github.com/incuna/django-wkhtmltopdf/blob/4e73f604c48f7f449c916c4257a72af59517322c/wkhtmltopdf/views.py#L64-L82
def rendered_content(self): """Returns the freshly rendered content for the template and context described by the PDFResponse. This *does not* set the final content of the response. To set the response content, you must either call render(), or set the content explicitly using the value of this property. """ cmd_options = self.cmd_options.copy() return render_pdf_from_template( self.resolve_template(self.template_name), self.resolve_template(self.header_template), self.resolve_template(self.footer_template), context=self.resolve_context(self.context_data), request=self._request, cmd_options=cmd_options, cover_template=self.resolve_template(self.cover_template) )
[ "def", "rendered_content", "(", "self", ")", ":", "cmd_options", "=", "self", ".", "cmd_options", ".", "copy", "(", ")", "return", "render_pdf_from_template", "(", "self", ".", "resolve_template", "(", "self", ".", "template_name", ")", ",", "self", ".", "resolve_template", "(", "self", ".", "header_template", ")", ",", "self", ".", "resolve_template", "(", "self", ".", "footer_template", ")", ",", "context", "=", "self", ".", "resolve_context", "(", "self", ".", "context_data", ")", ",", "request", "=", "self", ".", "_request", ",", "cmd_options", "=", "cmd_options", ",", "cover_template", "=", "self", ".", "resolve_template", "(", "self", ".", "cover_template", ")", ")" ]
Returns the freshly rendered content for the template and context described by the PDFResponse. This *does not* set the final content of the response. To set the response content, you must either call render(), or set the content explicitly using the value of this property.
[ "Returns", "the", "freshly", "rendered", "content", "for", "the", "template", "and", "context", "described", "by", "the", "PDFResponse", "." ]
python
test
kwikteam/phy
phy/io/array.py
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/io/array.py#L163-L180
def _get_padded(data, start, end): """Return `data[start:end]` filling in with zeros outside array bounds Assumes that either `start<0` or `end>len(data)` but not both. """ if start < 0 and end > data.shape[0]: raise RuntimeError() if start < 0: start_zeros = np.zeros((-start, data.shape[1]), dtype=data.dtype) return np.vstack((start_zeros, data[:end])) elif end > data.shape[0]: end_zeros = np.zeros((end - data.shape[0], data.shape[1]), dtype=data.dtype) return np.vstack((data[start:], end_zeros)) else: return data[start:end]
[ "def", "_get_padded", "(", "data", ",", "start", ",", "end", ")", ":", "if", "start", "<", "0", "and", "end", ">", "data", ".", "shape", "[", "0", "]", ":", "raise", "RuntimeError", "(", ")", "if", "start", "<", "0", ":", "start_zeros", "=", "np", ".", "zeros", "(", "(", "-", "start", ",", "data", ".", "shape", "[", "1", "]", ")", ",", "dtype", "=", "data", ".", "dtype", ")", "return", "np", ".", "vstack", "(", "(", "start_zeros", ",", "data", "[", ":", "end", "]", ")", ")", "elif", "end", ">", "data", ".", "shape", "[", "0", "]", ":", "end_zeros", "=", "np", ".", "zeros", "(", "(", "end", "-", "data", ".", "shape", "[", "0", "]", ",", "data", ".", "shape", "[", "1", "]", ")", ",", "dtype", "=", "data", ".", "dtype", ")", "return", "np", ".", "vstack", "(", "(", "data", "[", "start", ":", "]", ",", "end_zeros", ")", ")", "else", ":", "return", "data", "[", "start", ":", "end", "]" ]
Return `data[start:end]` filling in with zeros outside array bounds Assumes that either `start<0` or `end>len(data)` but not both.
[ "Return", "data", "[", "start", ":", "end", "]", "filling", "in", "with", "zeros", "outside", "array", "bounds" ]
python
train
facebook/pyre-check
sapp/sapp/base_parser.py
https://github.com/facebook/pyre-check/blob/4a9604d943d28ef20238505a51acfb1f666328d7/sapp/sapp/base_parser.py#L224-L231
def compute_diff_handle(filename, old_line, code): """Uses the absolute line and ignores the callable/character offsets. Used only in determining whether new issues are old issues. """ key = "{filename}:{old_line}:{code}".format( filename=filename, old_line=old_line, code=code ) return BaseParser.compute_handle_from_key(key)
[ "def", "compute_diff_handle", "(", "filename", ",", "old_line", ",", "code", ")", ":", "key", "=", "\"{filename}:{old_line}:{code}\"", ".", "format", "(", "filename", "=", "filename", ",", "old_line", "=", "old_line", ",", "code", "=", "code", ")", "return", "BaseParser", ".", "compute_handle_from_key", "(", "key", ")" ]
Uses the absolute line and ignores the callable/character offsets. Used only in determining whether new issues are old issues.
[ "Uses", "the", "absolute", "line", "and", "ignores", "the", "callable", "/", "character", "offsets", ".", "Used", "only", "in", "determining", "whether", "new", "issues", "are", "old", "issues", "." ]
python
train
Lagg/steamodd
steam/api.py
https://github.com/Lagg/steamodd/blob/2e9ced4e7a6dbe3e09d5a648450bafc12b937b95/steam/api.py#L248-L259
def call(self): """ Make the API call again and fetch fresh data. """ data = self._downloader.download() # Only try to pass errors arg if supported if sys.version >= "2.7": data = data.decode("utf-8", errors="ignore") else: data = data.decode("utf-8") self.update(json.loads(data)) self._fetched = True
[ "def", "call", "(", "self", ")", ":", "data", "=", "self", ".", "_downloader", ".", "download", "(", ")", "# Only try to pass errors arg if supported", "if", "sys", ".", "version", ">=", "\"2.7\"", ":", "data", "=", "data", ".", "decode", "(", "\"utf-8\"", ",", "errors", "=", "\"ignore\"", ")", "else", ":", "data", "=", "data", ".", "decode", "(", "\"utf-8\"", ")", "self", ".", "update", "(", "json", ".", "loads", "(", "data", ")", ")", "self", ".", "_fetched", "=", "True" ]
Make the API call again and fetch fresh data.
[ "Make", "the", "API", "call", "again", "and", "fetch", "fresh", "data", "." ]
python
train
atlassian-api/atlassian-python-api
atlassian/service_desk.py
https://github.com/atlassian-api/atlassian-python-api/blob/540d269905c3e7547b666fe30c647b2d512cf358/atlassian/service_desk.py#L90-L100
def get_customer_request_status(self, issue_id_or_key): """ Get customer request status name :param issue_id_or_key: str :return: Status name """ request = self.get('rest/servicedeskapi/request/{}/status'.format(issue_id_or_key)).get('values') status = request[0].get('status') return status
[ "def", "get_customer_request_status", "(", "self", ",", "issue_id_or_key", ")", ":", "request", "=", "self", ".", "get", "(", "'rest/servicedeskapi/request/{}/status'", ".", "format", "(", "issue_id_or_key", ")", ")", ".", "get", "(", "'values'", ")", "status", "=", "request", "[", "0", "]", ".", "get", "(", "'status'", ")", "return", "status" ]
Get customer request status name :param issue_id_or_key: str :return: Status name
[ "Get", "customer", "request", "status", "name" ]
python
train
influxdata/influxdb-python
influxdb/influxdb08/client.py
https://github.com/influxdata/influxdb-python/blob/d5d12499f3755199d5eedd8b363450f1cf4073bd/influxdb/influxdb08/client.py#L815-L825
def delete_database_user(self, username): """Delete database user.""" url = "db/{0}/users/{1}".format(self._database, username) self.request( url=url, method='DELETE', expected_response_code=200 ) return True
[ "def", "delete_database_user", "(", "self", ",", "username", ")", ":", "url", "=", "\"db/{0}/users/{1}\"", ".", "format", "(", "self", ".", "_database", ",", "username", ")", "self", ".", "request", "(", "url", "=", "url", ",", "method", "=", "'DELETE'", ",", "expected_response_code", "=", "200", ")", "return", "True" ]
Delete database user.
[ "Delete", "database", "user", "." ]
python
train
Julian/jsonschema
jsonschema/validators.py
https://github.com/Julian/jsonschema/blob/a72332004cdc3ba456de7918bc32059822b2f69a/jsonschema/validators.py#L154-L364
def create( meta_schema, validators=(), version=None, default_types=None, type_checker=None, id_of=_id_of, ): """ Create a new validator class. Arguments: meta_schema (collections.Mapping): the meta schema for the new validator class validators (collections.Mapping): a mapping from names to callables, where each callable will validate the schema property with the given name. Each callable should take 4 arguments: 1. a validator instance, 2. the value of the property being validated within the instance 3. the instance 4. the schema version (str): an identifier for the version that this validator class will validate. If provided, the returned validator class will have its ``__name__`` set to include the version, and also will have `jsonschema.validators.validates` automatically called for the given version. type_checker (jsonschema.TypeChecker): a type checker, used when applying the :validator:`type` validator. If unprovided, a `jsonschema.TypeChecker` will be created with a set of default types typical of JSON Schema drafts. default_types (collections.Mapping): .. deprecated:: 3.0.0 Please use the type_checker argument instead. If set, it provides mappings of JSON types to Python types that will be converted to functions and redefined in this object's `jsonschema.TypeChecker`. id_of (callable): A function that given a schema, returns its ID. Returns: a new `jsonschema.IValidator` class """ if default_types is not None: if type_checker is not None: raise TypeError( "Do not specify default_types when providing a type checker.", ) _created_with_default_types = True warn( ( "The default_types argument is deprecated. " "Use the type_checker argument instead." ), DeprecationWarning, stacklevel=2, ) type_checker = _types.TypeChecker( type_checkers=_generate_legacy_type_checks(default_types), ) else: default_types = _DEPRECATED_DEFAULT_TYPES if type_checker is None: _created_with_default_types = False type_checker = _TYPE_CHECKER_FOR_DEPRECATED_DEFAULT_TYPES elif type_checker is _TYPE_CHECKER_FOR_DEPRECATED_DEFAULT_TYPES: _created_with_default_types = False else: _created_with_default_types = None @add_metaclass(_DefaultTypesDeprecatingMetaClass) class Validator(object): VALIDATORS = dict(validators) META_SCHEMA = dict(meta_schema) TYPE_CHECKER = type_checker ID_OF = staticmethod(id_of) DEFAULT_TYPES = property(_DEFAULT_TYPES) _DEFAULT_TYPES = dict(default_types) _CREATED_WITH_DEFAULT_TYPES = _created_with_default_types def __init__( self, schema, types=(), resolver=None, format_checker=None, ): if types: warn( ( "The types argument is deprecated. Provide " "a type_checker to jsonschema.validators.extend " "instead." ), DeprecationWarning, stacklevel=2, ) self.TYPE_CHECKER = self.TYPE_CHECKER.redefine_many( _generate_legacy_type_checks(types), ) if resolver is None: resolver = RefResolver.from_schema(schema, id_of=id_of) self.resolver = resolver self.format_checker = format_checker self.schema = schema @classmethod def check_schema(cls, schema): for error in cls(cls.META_SCHEMA).iter_errors(schema): raise exceptions.SchemaError.create_from(error) def iter_errors(self, instance, _schema=None): if _schema is None: _schema = self.schema if _schema is True: return elif _schema is False: yield exceptions.ValidationError( "False schema does not allow %r" % (instance,), validator=None, validator_value=None, instance=instance, schema=_schema, ) return scope = id_of(_schema) if scope: self.resolver.push_scope(scope) try: ref = _schema.get(u"$ref") if ref is not None: validators = [(u"$ref", ref)] else: validators = iteritems(_schema) for k, v in validators: validator = self.VALIDATORS.get(k) if validator is None: continue errors = validator(self, v, instance, _schema) or () for error in errors: # set details if not already set by the called fn error._set( validator=k, validator_value=v, instance=instance, schema=_schema, ) if k != u"$ref": error.schema_path.appendleft(k) yield error finally: if scope: self.resolver.pop_scope() def descend(self, instance, schema, path=None, schema_path=None): for error in self.iter_errors(instance, schema): if path is not None: error.path.appendleft(path) if schema_path is not None: error.schema_path.appendleft(schema_path) yield error def validate(self, *args, **kwargs): for error in self.iter_errors(*args, **kwargs): raise error def is_type(self, instance, type): try: return self.TYPE_CHECKER.is_type(instance, type) except exceptions.UndefinedTypeCheck: raise exceptions.UnknownType(type, instance, self.schema) def is_valid(self, instance, _schema=None): error = next(self.iter_errors(instance, _schema), None) return error is None if version is not None: Validator = validates(version)(Validator) Validator.__name__ = version.title().replace(" ", "") + "Validator" return Validator
[ "def", "create", "(", "meta_schema", ",", "validators", "=", "(", ")", ",", "version", "=", "None", ",", "default_types", "=", "None", ",", "type_checker", "=", "None", ",", "id_of", "=", "_id_of", ",", ")", ":", "if", "default_types", "is", "not", "None", ":", "if", "type_checker", "is", "not", "None", ":", "raise", "TypeError", "(", "\"Do not specify default_types when providing a type checker.\"", ",", ")", "_created_with_default_types", "=", "True", "warn", "(", "(", "\"The default_types argument is deprecated. \"", "\"Use the type_checker argument instead.\"", ")", ",", "DeprecationWarning", ",", "stacklevel", "=", "2", ",", ")", "type_checker", "=", "_types", ".", "TypeChecker", "(", "type_checkers", "=", "_generate_legacy_type_checks", "(", "default_types", ")", ",", ")", "else", ":", "default_types", "=", "_DEPRECATED_DEFAULT_TYPES", "if", "type_checker", "is", "None", ":", "_created_with_default_types", "=", "False", "type_checker", "=", "_TYPE_CHECKER_FOR_DEPRECATED_DEFAULT_TYPES", "elif", "type_checker", "is", "_TYPE_CHECKER_FOR_DEPRECATED_DEFAULT_TYPES", ":", "_created_with_default_types", "=", "False", "else", ":", "_created_with_default_types", "=", "None", "@", "add_metaclass", "(", "_DefaultTypesDeprecatingMetaClass", ")", "class", "Validator", "(", "object", ")", ":", "VALIDATORS", "=", "dict", "(", "validators", ")", "META_SCHEMA", "=", "dict", "(", "meta_schema", ")", "TYPE_CHECKER", "=", "type_checker", "ID_OF", "=", "staticmethod", "(", "id_of", ")", "DEFAULT_TYPES", "=", "property", "(", "_DEFAULT_TYPES", ")", "_DEFAULT_TYPES", "=", "dict", "(", "default_types", ")", "_CREATED_WITH_DEFAULT_TYPES", "=", "_created_with_default_types", "def", "__init__", "(", "self", ",", "schema", ",", "types", "=", "(", ")", ",", "resolver", "=", "None", ",", "format_checker", "=", "None", ",", ")", ":", "if", "types", ":", "warn", "(", "(", "\"The types argument is deprecated. Provide \"", "\"a type_checker to jsonschema.validators.extend \"", "\"instead.\"", ")", ",", "DeprecationWarning", ",", "stacklevel", "=", "2", ",", ")", "self", ".", "TYPE_CHECKER", "=", "self", ".", "TYPE_CHECKER", ".", "redefine_many", "(", "_generate_legacy_type_checks", "(", "types", ")", ",", ")", "if", "resolver", "is", "None", ":", "resolver", "=", "RefResolver", ".", "from_schema", "(", "schema", ",", "id_of", "=", "id_of", ")", "self", ".", "resolver", "=", "resolver", "self", ".", "format_checker", "=", "format_checker", "self", ".", "schema", "=", "schema", "@", "classmethod", "def", "check_schema", "(", "cls", ",", "schema", ")", ":", "for", "error", "in", "cls", "(", "cls", ".", "META_SCHEMA", ")", ".", "iter_errors", "(", "schema", ")", ":", "raise", "exceptions", ".", "SchemaError", ".", "create_from", "(", "error", ")", "def", "iter_errors", "(", "self", ",", "instance", ",", "_schema", "=", "None", ")", ":", "if", "_schema", "is", "None", ":", "_schema", "=", "self", ".", "schema", "if", "_schema", "is", "True", ":", "return", "elif", "_schema", "is", "False", ":", "yield", "exceptions", ".", "ValidationError", "(", "\"False schema does not allow %r\"", "%", "(", "instance", ",", ")", ",", "validator", "=", "None", ",", "validator_value", "=", "None", ",", "instance", "=", "instance", ",", "schema", "=", "_schema", ",", ")", "return", "scope", "=", "id_of", "(", "_schema", ")", "if", "scope", ":", "self", ".", "resolver", ".", "push_scope", "(", "scope", ")", "try", ":", "ref", "=", "_schema", ".", "get", "(", "u\"$ref\"", ")", "if", "ref", "is", "not", "None", ":", "validators", "=", "[", "(", "u\"$ref\"", ",", "ref", ")", "]", "else", ":", "validators", "=", "iteritems", "(", "_schema", ")", "for", "k", ",", "v", "in", "validators", ":", "validator", "=", "self", ".", "VALIDATORS", ".", "get", "(", "k", ")", "if", "validator", "is", "None", ":", "continue", "errors", "=", "validator", "(", "self", ",", "v", ",", "instance", ",", "_schema", ")", "or", "(", ")", "for", "error", "in", "errors", ":", "# set details if not already set by the called fn", "error", ".", "_set", "(", "validator", "=", "k", ",", "validator_value", "=", "v", ",", "instance", "=", "instance", ",", "schema", "=", "_schema", ",", ")", "if", "k", "!=", "u\"$ref\"", ":", "error", ".", "schema_path", ".", "appendleft", "(", "k", ")", "yield", "error", "finally", ":", "if", "scope", ":", "self", ".", "resolver", ".", "pop_scope", "(", ")", "def", "descend", "(", "self", ",", "instance", ",", "schema", ",", "path", "=", "None", ",", "schema_path", "=", "None", ")", ":", "for", "error", "in", "self", ".", "iter_errors", "(", "instance", ",", "schema", ")", ":", "if", "path", "is", "not", "None", ":", "error", ".", "path", ".", "appendleft", "(", "path", ")", "if", "schema_path", "is", "not", "None", ":", "error", ".", "schema_path", ".", "appendleft", "(", "schema_path", ")", "yield", "error", "def", "validate", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "for", "error", "in", "self", ".", "iter_errors", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "raise", "error", "def", "is_type", "(", "self", ",", "instance", ",", "type", ")", ":", "try", ":", "return", "self", ".", "TYPE_CHECKER", ".", "is_type", "(", "instance", ",", "type", ")", "except", "exceptions", ".", "UndefinedTypeCheck", ":", "raise", "exceptions", ".", "UnknownType", "(", "type", ",", "instance", ",", "self", ".", "schema", ")", "def", "is_valid", "(", "self", ",", "instance", ",", "_schema", "=", "None", ")", ":", "error", "=", "next", "(", "self", ".", "iter_errors", "(", "instance", ",", "_schema", ")", ",", "None", ")", "return", "error", "is", "None", "if", "version", "is", "not", "None", ":", "Validator", "=", "validates", "(", "version", ")", "(", "Validator", ")", "Validator", ".", "__name__", "=", "version", ".", "title", "(", ")", ".", "replace", "(", "\" \"", ",", "\"\"", ")", "+", "\"Validator\"", "return", "Validator" ]
Create a new validator class. Arguments: meta_schema (collections.Mapping): the meta schema for the new validator class validators (collections.Mapping): a mapping from names to callables, where each callable will validate the schema property with the given name. Each callable should take 4 arguments: 1. a validator instance, 2. the value of the property being validated within the instance 3. the instance 4. the schema version (str): an identifier for the version that this validator class will validate. If provided, the returned validator class will have its ``__name__`` set to include the version, and also will have `jsonschema.validators.validates` automatically called for the given version. type_checker (jsonschema.TypeChecker): a type checker, used when applying the :validator:`type` validator. If unprovided, a `jsonschema.TypeChecker` will be created with a set of default types typical of JSON Schema drafts. default_types (collections.Mapping): .. deprecated:: 3.0.0 Please use the type_checker argument instead. If set, it provides mappings of JSON types to Python types that will be converted to functions and redefined in this object's `jsonschema.TypeChecker`. id_of (callable): A function that given a schema, returns its ID. Returns: a new `jsonschema.IValidator` class
[ "Create", "a", "new", "validator", "class", "." ]
python
train
PSPC-SPAC-buyandsell/von_anchor
von_anchor/op/setnym.py
https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/op/setnym.py#L185-L211
def main(args: Sequence[str] = None) -> int: """ Main line for script: check arguments and dispatch operation to set nym. :param args: command-line arguments :return: 0 for OK, 1 for failure """ logging.basicConfig( level=logging.INFO, format='%(asctime)-15s | %(levelname)-8s | %(message)s', datefmt='%Y-%m-%d %H:%M:%S') logging.getLogger('von_anchor').setLevel(logging.WARNING) logging.getLogger('indy').setLevel(logging.ERROR) if args is None: args = sys.argv[1:] if len(sys.argv) == 2: try: return do_wait(setnym(sys.argv[1])) except VonAnchorError as vax: print(str(vax)) return 1 else: usage() return 1
[ "def", "main", "(", "args", ":", "Sequence", "[", "str", "]", "=", "None", ")", "->", "int", ":", "logging", ".", "basicConfig", "(", "level", "=", "logging", ".", "INFO", ",", "format", "=", "'%(asctime)-15s | %(levelname)-8s | %(message)s'", ",", "datefmt", "=", "'%Y-%m-%d %H:%M:%S'", ")", "logging", ".", "getLogger", "(", "'von_anchor'", ")", ".", "setLevel", "(", "logging", ".", "WARNING", ")", "logging", ".", "getLogger", "(", "'indy'", ")", ".", "setLevel", "(", "logging", ".", "ERROR", ")", "if", "args", "is", "None", ":", "args", "=", "sys", ".", "argv", "[", "1", ":", "]", "if", "len", "(", "sys", ".", "argv", ")", "==", "2", ":", "try", ":", "return", "do_wait", "(", "setnym", "(", "sys", ".", "argv", "[", "1", "]", ")", ")", "except", "VonAnchorError", "as", "vax", ":", "print", "(", "str", "(", "vax", ")", ")", "return", "1", "else", ":", "usage", "(", ")", "return", "1" ]
Main line for script: check arguments and dispatch operation to set nym. :param args: command-line arguments :return: 0 for OK, 1 for failure
[ "Main", "line", "for", "script", ":", "check", "arguments", "and", "dispatch", "operation", "to", "set", "nym", "." ]
python
train
ray-project/ray
python/ray/log_monitor.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/log_monitor.py#L210-L223
def run(self): """Run the log monitor. This will query Redis once every second to check if there are new log files to monitor. It will also store those log files in Redis. """ while True: self.update_log_filenames() self.open_closed_files() anything_published = self.check_log_files_and_publish_updates() # If nothing was published, then wait a little bit before checking # for logs to avoid using too much CPU. if not anything_published: time.sleep(0.05)
[ "def", "run", "(", "self", ")", ":", "while", "True", ":", "self", ".", "update_log_filenames", "(", ")", "self", ".", "open_closed_files", "(", ")", "anything_published", "=", "self", ".", "check_log_files_and_publish_updates", "(", ")", "# If nothing was published, then wait a little bit before checking", "# for logs to avoid using too much CPU.", "if", "not", "anything_published", ":", "time", ".", "sleep", "(", "0.05", ")" ]
Run the log monitor. This will query Redis once every second to check if there are new log files to monitor. It will also store those log files in Redis.
[ "Run", "the", "log", "monitor", "." ]
python
train
JamesPHoughton/pysd
pysd/py_backend/functions.py
https://github.com/JamesPHoughton/pysd/blob/bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda/pysd/py_backend/functions.py#L935-L943
def lookup_discrete(x, xs, ys): """ Intermediate values take on the value associated with the next lower x-coordinate (also called a step-wise function). The last two points of a discrete graphical function must have the same y value. Out-of-range values are the same as the closest endpoint (i.e, no extrapolation is performed). """ for index in range(0, len(xs)): if x < xs[index]: return ys[index - 1] if index > 0 else ys[index] return ys[len(ys) - 1]
[ "def", "lookup_discrete", "(", "x", ",", "xs", ",", "ys", ")", ":", "for", "index", "in", "range", "(", "0", ",", "len", "(", "xs", ")", ")", ":", "if", "x", "<", "xs", "[", "index", "]", ":", "return", "ys", "[", "index", "-", "1", "]", "if", "index", ">", "0", "else", "ys", "[", "index", "]", "return", "ys", "[", "len", "(", "ys", ")", "-", "1", "]" ]
Intermediate values take on the value associated with the next lower x-coordinate (also called a step-wise function). The last two points of a discrete graphical function must have the same y value. Out-of-range values are the same as the closest endpoint (i.e, no extrapolation is performed).
[ "Intermediate", "values", "take", "on", "the", "value", "associated", "with", "the", "next", "lower", "x", "-", "coordinate", "(", "also", "called", "a", "step", "-", "wise", "function", ")", ".", "The", "last", "two", "points", "of", "a", "discrete", "graphical", "function", "must", "have", "the", "same", "y", "value", ".", "Out", "-", "of", "-", "range", "values", "are", "the", "same", "as", "the", "closest", "endpoint", "(", "i", ".", "e", "no", "extrapolation", "is", "performed", ")", "." ]
python
train
lucasmaystre/choix
choix/ep.py
https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/ep.py#L157-L176
def _log_phi(z): """Stable computation of the log of the Normal CDF and its derivative.""" # Adapted from the GPML function `logphi.m`. if z * z < 0.0492: # First case: z close to zero. coef = -z / SQRT2PI val = functools.reduce(lambda acc, c: coef * (c + acc), CS, 0) res = -2 * val - log(2) dres = exp(-(z * z) / 2 - res) / SQRT2PI elif z < -11.3137: # Second case: z very small. num = functools.reduce( lambda acc, r: -z * acc / SQRT2 + r, RS, 0.5641895835477550741) den = functools.reduce(lambda acc, q: -z * acc / SQRT2 + q, QS, 1.0) res = log(num / (2 * den)) - (z * z) / 2 dres = abs(den / num) * sqrt(2.0 / pi) else: res = log(normal_cdf(z)) dres = exp(-(z * z) / 2 - res) / SQRT2PI return res, dres
[ "def", "_log_phi", "(", "z", ")", ":", "# Adapted from the GPML function `logphi.m`.", "if", "z", "*", "z", "<", "0.0492", ":", "# First case: z close to zero.", "coef", "=", "-", "z", "/", "SQRT2PI", "val", "=", "functools", ".", "reduce", "(", "lambda", "acc", ",", "c", ":", "coef", "*", "(", "c", "+", "acc", ")", ",", "CS", ",", "0", ")", "res", "=", "-", "2", "*", "val", "-", "log", "(", "2", ")", "dres", "=", "exp", "(", "-", "(", "z", "*", "z", ")", "/", "2", "-", "res", ")", "/", "SQRT2PI", "elif", "z", "<", "-", "11.3137", ":", "# Second case: z very small.", "num", "=", "functools", ".", "reduce", "(", "lambda", "acc", ",", "r", ":", "-", "z", "*", "acc", "/", "SQRT2", "+", "r", ",", "RS", ",", "0.5641895835477550741", ")", "den", "=", "functools", ".", "reduce", "(", "lambda", "acc", ",", "q", ":", "-", "z", "*", "acc", "/", "SQRT2", "+", "q", ",", "QS", ",", "1.0", ")", "res", "=", "log", "(", "num", "/", "(", "2", "*", "den", ")", ")", "-", "(", "z", "*", "z", ")", "/", "2", "dres", "=", "abs", "(", "den", "/", "num", ")", "*", "sqrt", "(", "2.0", "/", "pi", ")", "else", ":", "res", "=", "log", "(", "normal_cdf", "(", "z", ")", ")", "dres", "=", "exp", "(", "-", "(", "z", "*", "z", ")", "/", "2", "-", "res", ")", "/", "SQRT2PI", "return", "res", ",", "dres" ]
Stable computation of the log of the Normal CDF and its derivative.
[ "Stable", "computation", "of", "the", "log", "of", "the", "Normal", "CDF", "and", "its", "derivative", "." ]
python
train
jcrist/skein
skein/objects.py
https://github.com/jcrist/skein/blob/16f8b1d3b3d9f79f36e2f152e45893339a1793e8/skein/objects.py#L243-L245
def to_json(self, skip_nulls=True): """Convert object to a json string""" return json.dumps(self.to_dict(skip_nulls=skip_nulls))
[ "def", "to_json", "(", "self", ",", "skip_nulls", "=", "True", ")", ":", "return", "json", ".", "dumps", "(", "self", ".", "to_dict", "(", "skip_nulls", "=", "skip_nulls", ")", ")" ]
Convert object to a json string
[ "Convert", "object", "to", "a", "json", "string" ]
python
train
diging/tethne
tethne/classes/graphcollection.py
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/classes/graphcollection.py#L190-L211
def nodes(self, data=False, native=True): """ Returns a list of all nodes in the :class:`.GraphCollection`\. Parameters ---------- data : bool (default: False) If True, returns a list of 2-tuples containing node labels and attributes. Returns ------- nodes : list """ nodes = self.master_graph.nodes(data=data) if native: if data: nodes = [(self.node_index[n], attrs) for n, attrs in nodes] else: nodes = [self.node_index[n] for n in nodes] return nodes
[ "def", "nodes", "(", "self", ",", "data", "=", "False", ",", "native", "=", "True", ")", ":", "nodes", "=", "self", ".", "master_graph", ".", "nodes", "(", "data", "=", "data", ")", "if", "native", ":", "if", "data", ":", "nodes", "=", "[", "(", "self", ".", "node_index", "[", "n", "]", ",", "attrs", ")", "for", "n", ",", "attrs", "in", "nodes", "]", "else", ":", "nodes", "=", "[", "self", ".", "node_index", "[", "n", "]", "for", "n", "in", "nodes", "]", "return", "nodes" ]
Returns a list of all nodes in the :class:`.GraphCollection`\. Parameters ---------- data : bool (default: False) If True, returns a list of 2-tuples containing node labels and attributes. Returns ------- nodes : list
[ "Returns", "a", "list", "of", "all", "nodes", "in", "the", ":", "class", ":", ".", "GraphCollection", "\\", "." ]
python
train
O365/python-o365
O365/excel.py
https://github.com/O365/python-o365/blob/02a71cf3775cc6a3c042e003365d6a07c8c75a73/O365/excel.py#L1186-L1208
def get_columns(self, *, top=None, skip=None): """ Return the columns of this table :param int top: specify n columns to retrieve :param int skip: specify n columns to skip """ url = self.build_url(self._endpoints.get('get_columns')) params = {} if top is not None: params['$top'] = top if skip is not None: params['$skip'] = skip params = None if not params else params response = self.session.get(url, params=params) if not response: return iter(()) data = response.json() return (self.column_constructor(parent=self, **{self._cloud_data_key: column}) for column in data.get('value', []))
[ "def", "get_columns", "(", "self", ",", "*", ",", "top", "=", "None", ",", "skip", "=", "None", ")", ":", "url", "=", "self", ".", "build_url", "(", "self", ".", "_endpoints", ".", "get", "(", "'get_columns'", ")", ")", "params", "=", "{", "}", "if", "top", "is", "not", "None", ":", "params", "[", "'$top'", "]", "=", "top", "if", "skip", "is", "not", "None", ":", "params", "[", "'$skip'", "]", "=", "skip", "params", "=", "None", "if", "not", "params", "else", "params", "response", "=", "self", ".", "session", ".", "get", "(", "url", ",", "params", "=", "params", ")", "if", "not", "response", ":", "return", "iter", "(", "(", ")", ")", "data", "=", "response", ".", "json", "(", ")", "return", "(", "self", ".", "column_constructor", "(", "parent", "=", "self", ",", "*", "*", "{", "self", ".", "_cloud_data_key", ":", "column", "}", ")", "for", "column", "in", "data", ".", "get", "(", "'value'", ",", "[", "]", ")", ")" ]
Return the columns of this table :param int top: specify n columns to retrieve :param int skip: specify n columns to skip
[ "Return", "the", "columns", "of", "this", "table", ":", "param", "int", "top", ":", "specify", "n", "columns", "to", "retrieve", ":", "param", "int", "skip", ":", "specify", "n", "columns", "to", "skip" ]
python
train
LeastAuthority/txkube
src/txkube/_swagger.py
https://github.com/LeastAuthority/txkube/blob/a7e555d00535ff787d4b1204c264780da40cf736/src/txkube/_swagger.py#L158-L197
def pclass_for_definition(self, name): """ Get a ``pyrsistent.PClass`` subclass representing the Swagger definition in this specification which corresponds to the given name. :param unicode name: The name of the definition to use. :return: A Python class which can be used to represent the Swagger definition of the given name. """ while True: try: cls = self._pclasses[name] except KeyError: try: original_definition = self.definitions[name] except KeyError: raise NoSuchDefinition(name) if "$ref" in original_definition: # Identify definitions that are merely a reference to # another and restart processing. There is some # duplication of logic between this and the $ref handling # in _ClassModel. It would be nice to eliminate this # duplication. name = original_definition[u"$ref"] assert name.startswith(u"#/definitions/") name = name[len(u"#/definitions/"):] continue definition = self.transform_definition(name, original_definition) kind = self._identify_kind(definition) if kind is None: raise NotClassLike(name, definition) generator = getattr(self, "_model_for_{}".format(kind)) model = generator(name, definition) bases = tuple(self._behaviors.get(name, [])) cls = model.pclass(bases) self._pclasses[name] = cls return cls
[ "def", "pclass_for_definition", "(", "self", ",", "name", ")", ":", "while", "True", ":", "try", ":", "cls", "=", "self", ".", "_pclasses", "[", "name", "]", "except", "KeyError", ":", "try", ":", "original_definition", "=", "self", ".", "definitions", "[", "name", "]", "except", "KeyError", ":", "raise", "NoSuchDefinition", "(", "name", ")", "if", "\"$ref\"", "in", "original_definition", ":", "# Identify definitions that are merely a reference to", "# another and restart processing. There is some", "# duplication of logic between this and the $ref handling", "# in _ClassModel. It would be nice to eliminate this", "# duplication.", "name", "=", "original_definition", "[", "u\"$ref\"", "]", "assert", "name", ".", "startswith", "(", "u\"#/definitions/\"", ")", "name", "=", "name", "[", "len", "(", "u\"#/definitions/\"", ")", ":", "]", "continue", "definition", "=", "self", ".", "transform_definition", "(", "name", ",", "original_definition", ")", "kind", "=", "self", ".", "_identify_kind", "(", "definition", ")", "if", "kind", "is", "None", ":", "raise", "NotClassLike", "(", "name", ",", "definition", ")", "generator", "=", "getattr", "(", "self", ",", "\"_model_for_{}\"", ".", "format", "(", "kind", ")", ")", "model", "=", "generator", "(", "name", ",", "definition", ")", "bases", "=", "tuple", "(", "self", ".", "_behaviors", ".", "get", "(", "name", ",", "[", "]", ")", ")", "cls", "=", "model", ".", "pclass", "(", "bases", ")", "self", ".", "_pclasses", "[", "name", "]", "=", "cls", "return", "cls" ]
Get a ``pyrsistent.PClass`` subclass representing the Swagger definition in this specification which corresponds to the given name. :param unicode name: The name of the definition to use. :return: A Python class which can be used to represent the Swagger definition of the given name.
[ "Get", "a", "pyrsistent", ".", "PClass", "subclass", "representing", "the", "Swagger", "definition", "in", "this", "specification", "which", "corresponds", "to", "the", "given", "name", "." ]
python
train
google/grr
grr/server/grr_response_server/flows/cron/system.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/flows/cron/system.py#L167-L193
def Run(self): """Retrieve all the clients for the AbstractClientStatsCollectors.""" try: self.stats = {} self.BeginProcessing() processed_count = 0 for client_info_batch in _IterateAllClients( recency_window=self.recency_window): for client_info in client_info_batch: self.ProcessClientFullInfo(client_info) processed_count += len(client_info_batch) self.Log("Processed %d clients.", processed_count) self.HeartBeat() self.FinishProcessing() for fd in itervalues(self.stats): fd.Close() logging.info("%s: processed %d clients.", self.__class__.__name__, processed_count) except Exception as e: # pylint: disable=broad-except logging.exception("Error while calculating stats: %s", e) raise
[ "def", "Run", "(", "self", ")", ":", "try", ":", "self", ".", "stats", "=", "{", "}", "self", ".", "BeginProcessing", "(", ")", "processed_count", "=", "0", "for", "client_info_batch", "in", "_IterateAllClients", "(", "recency_window", "=", "self", ".", "recency_window", ")", ":", "for", "client_info", "in", "client_info_batch", ":", "self", ".", "ProcessClientFullInfo", "(", "client_info", ")", "processed_count", "+=", "len", "(", "client_info_batch", ")", "self", ".", "Log", "(", "\"Processed %d clients.\"", ",", "processed_count", ")", "self", ".", "HeartBeat", "(", ")", "self", ".", "FinishProcessing", "(", ")", "for", "fd", "in", "itervalues", "(", "self", ".", "stats", ")", ":", "fd", ".", "Close", "(", ")", "logging", ".", "info", "(", "\"%s: processed %d clients.\"", ",", "self", ".", "__class__", ".", "__name__", ",", "processed_count", ")", "except", "Exception", "as", "e", ":", "# pylint: disable=broad-except", "logging", ".", "exception", "(", "\"Error while calculating stats: %s\"", ",", "e", ")", "raise" ]
Retrieve all the clients for the AbstractClientStatsCollectors.
[ "Retrieve", "all", "the", "clients", "for", "the", "AbstractClientStatsCollectors", "." ]
python
train
cloud-custodian/cloud-custodian
tools/c7n_logexporter/c7n_logexporter/exporter.py
https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/tools/c7n_logexporter/c7n_logexporter/exporter.py#L372-L388
def filter_creation_date(groups, start, end): """Filter log groups by their creation date. Also sets group specific value for start to the minimum of creation date or start. """ results = [] for g in groups: created = datetime.fromtimestamp(g['creationTime'] / 1000.0) if created > end: continue if created > start: g['exportStart'] = created else: g['exportStart'] = start results.append(g) return results
[ "def", "filter_creation_date", "(", "groups", ",", "start", ",", "end", ")", ":", "results", "=", "[", "]", "for", "g", "in", "groups", ":", "created", "=", "datetime", ".", "fromtimestamp", "(", "g", "[", "'creationTime'", "]", "/", "1000.0", ")", "if", "created", ">", "end", ":", "continue", "if", "created", ">", "start", ":", "g", "[", "'exportStart'", "]", "=", "created", "else", ":", "g", "[", "'exportStart'", "]", "=", "start", "results", ".", "append", "(", "g", ")", "return", "results" ]
Filter log groups by their creation date. Also sets group specific value for start to the minimum of creation date or start.
[ "Filter", "log", "groups", "by", "their", "creation", "date", "." ]
python
train
logston/py3s3
py3s3/storage.py
https://github.com/logston/py3s3/blob/1910ca60c53a53d839d6f7b09c05b555f3bfccf4/py3s3/storage.py#L276-L298
def _put_file(self, file): """Send PUT request to S3 with file contents""" post_params = { 'file_size': file.size, 'file_hash': file.md5hash(), 'content_type': self._get_content_type(file), } headers = self._request_headers('PUT', file.prefixed_name, post_params=post_params) with closing(HTTPConnection(self.netloc)) as conn: conn.request('PUT', file.prefixed_name, file.read(), headers=headers) response = conn.getresponse() if response.status not in (200,): raise S3IOError( 'py3s3 PUT error. ' 'Response status: {}. ' 'Reason: {}. ' 'Response Text: \n' '{}'.format(response.status, response.reason, response.read()))
[ "def", "_put_file", "(", "self", ",", "file", ")", ":", "post_params", "=", "{", "'file_size'", ":", "file", ".", "size", ",", "'file_hash'", ":", "file", ".", "md5hash", "(", ")", ",", "'content_type'", ":", "self", ".", "_get_content_type", "(", "file", ")", ",", "}", "headers", "=", "self", ".", "_request_headers", "(", "'PUT'", ",", "file", ".", "prefixed_name", ",", "post_params", "=", "post_params", ")", "with", "closing", "(", "HTTPConnection", "(", "self", ".", "netloc", ")", ")", "as", "conn", ":", "conn", ".", "request", "(", "'PUT'", ",", "file", ".", "prefixed_name", ",", "file", ".", "read", "(", ")", ",", "headers", "=", "headers", ")", "response", "=", "conn", ".", "getresponse", "(", ")", "if", "response", ".", "status", "not", "in", "(", "200", ",", ")", ":", "raise", "S3IOError", "(", "'py3s3 PUT error. '", "'Response status: {}. '", "'Reason: {}. '", "'Response Text: \\n'", "'{}'", ".", "format", "(", "response", ".", "status", ",", "response", ".", "reason", ",", "response", ".", "read", "(", ")", ")", ")" ]
Send PUT request to S3 with file contents
[ "Send", "PUT", "request", "to", "S3", "with", "file", "contents" ]
python
train
Miserlou/SoundScrape
soundscrape/soundscrape.py
https://github.com/Miserlou/SoundScrape/blob/efc63b99ce7e78b352e2ba22d5e51f83445546d7/soundscrape/soundscrape.py#L731-L774
def scrape_mixcloud_url(mc_url, num_tracks=sys.maxsize, folders=False, custom_path=''): """ Returns: list: filenames to open """ try: data = get_mixcloud_data(mc_url) except Exception as e: puts_safe(colored.red("Problem downloading ") + mc_url) print(e) return [] filenames = [] track_artist = sanitize_filename(data['artist']) track_title = sanitize_filename(data['title']) track_filename = track_artist + ' - ' + track_title + data['mp3_url'][-4:] if folders: track_artist_path = join(custom_path, track_artist) if not exists(track_artist_path): mkdir(track_artist_path) track_filename = join(track_artist_path, track_filename) if exists(track_filename): puts_safe(colored.yellow("Skipping") + colored.white(': ' + data['title'] + " - it already exists!")) return [] else: track_filename = join(custom_path, track_filename) puts_safe(colored.green("Downloading") + colored.white( ': ' + data['artist'] + " - " + data['title'] + " (" + track_filename[-4:] + ")")) download_file(data['mp3_url'], track_filename) if track_filename[-4:] == '.mp3': tag_file(track_filename, artist=data['artist'], title=data['title'], year=data['year'], genre="Mix", artwork_url=data['artwork_url']) filenames.append(track_filename) return filenames
[ "def", "scrape_mixcloud_url", "(", "mc_url", ",", "num_tracks", "=", "sys", ".", "maxsize", ",", "folders", "=", "False", ",", "custom_path", "=", "''", ")", ":", "try", ":", "data", "=", "get_mixcloud_data", "(", "mc_url", ")", "except", "Exception", "as", "e", ":", "puts_safe", "(", "colored", ".", "red", "(", "\"Problem downloading \"", ")", "+", "mc_url", ")", "print", "(", "e", ")", "return", "[", "]", "filenames", "=", "[", "]", "track_artist", "=", "sanitize_filename", "(", "data", "[", "'artist'", "]", ")", "track_title", "=", "sanitize_filename", "(", "data", "[", "'title'", "]", ")", "track_filename", "=", "track_artist", "+", "' - '", "+", "track_title", "+", "data", "[", "'mp3_url'", "]", "[", "-", "4", ":", "]", "if", "folders", ":", "track_artist_path", "=", "join", "(", "custom_path", ",", "track_artist", ")", "if", "not", "exists", "(", "track_artist_path", ")", ":", "mkdir", "(", "track_artist_path", ")", "track_filename", "=", "join", "(", "track_artist_path", ",", "track_filename", ")", "if", "exists", "(", "track_filename", ")", ":", "puts_safe", "(", "colored", ".", "yellow", "(", "\"Skipping\"", ")", "+", "colored", ".", "white", "(", "': '", "+", "data", "[", "'title'", "]", "+", "\" - it already exists!\"", ")", ")", "return", "[", "]", "else", ":", "track_filename", "=", "join", "(", "custom_path", ",", "track_filename", ")", "puts_safe", "(", "colored", ".", "green", "(", "\"Downloading\"", ")", "+", "colored", ".", "white", "(", "': '", "+", "data", "[", "'artist'", "]", "+", "\" - \"", "+", "data", "[", "'title'", "]", "+", "\" (\"", "+", "track_filename", "[", "-", "4", ":", "]", "+", "\")\"", ")", ")", "download_file", "(", "data", "[", "'mp3_url'", "]", ",", "track_filename", ")", "if", "track_filename", "[", "-", "4", ":", "]", "==", "'.mp3'", ":", "tag_file", "(", "track_filename", ",", "artist", "=", "data", "[", "'artist'", "]", ",", "title", "=", "data", "[", "'title'", "]", ",", "year", "=", "data", "[", "'year'", "]", ",", "genre", "=", "\"Mix\"", ",", "artwork_url", "=", "data", "[", "'artwork_url'", "]", ")", "filenames", ".", "append", "(", "track_filename", ")", "return", "filenames" ]
Returns: list: filenames to open
[ "Returns", ":", "list", ":", "filenames", "to", "open" ]
python
train
ScriptSmith/socialreaper
socialreaper/tools.py
https://github.com/ScriptSmith/socialreaper/blob/87fcc3b74bbed6c4f8e7f49a5f0eb8a616cf38da/socialreaper/tools.py#L132-L194
def to_csv(data, field_names=None, filename='data.csv', overwrite=True, write_headers=True, append=False, flat=True, primary_fields=None, sort_fields=True): """ DEPRECATED Write a list of dicts to a csv file :param data: List of dicts :param field_names: The list column names :param filename: The name of the file :param overwrite: Overwrite the file if exists :param write_headers: Write the headers to the csv file :param append: Write new rows if the file exists :param flat: Flatten the dictionary before saving :param primary_fields: The first columns of the csv file :param sort_fields: Sort the field names alphabetically :return: None """ # Don't overwrite if not specified if not overwrite and path.isfile(filename): raise FileExistsError('The file already exists') # Replace file if append not specified write_type = 'w' if not append else 'a' # Flatten if flat is specified, or there are no predefined field names if flat or not field_names: data = [flatten(datum) for datum in data] # Fill in gaps between dicts with empty string if not field_names: field_names, data = fill_gaps(data) # Sort fields if specified if sort_fields: field_names.sort() # If there are primary fields, move the field names to the front and sort # based on first field if primary_fields: for key in primary_fields[::-1]: field_names.insert(0, field_names.pop(field_names.index(key))) data = sorted(data, key=lambda k: k[field_names[0]], reverse=True) # Write the file with open(filename, write_type, encoding='utf-8') as f: writer = csv.DictWriter(f, fieldnames=field_names, lineterminator='\n') if not append or write_headers: writer.writeheader() # Write rows containing fields in field names for datum in data: for key in list(datum.keys()): if key not in field_names: del datum[key] elif type(datum[key]) is str: datum[key] = datum[key].strip() datum[key] = str(datum[key]) writer.writerow(datum)
[ "def", "to_csv", "(", "data", ",", "field_names", "=", "None", ",", "filename", "=", "'data.csv'", ",", "overwrite", "=", "True", ",", "write_headers", "=", "True", ",", "append", "=", "False", ",", "flat", "=", "True", ",", "primary_fields", "=", "None", ",", "sort_fields", "=", "True", ")", ":", "# Don't overwrite if not specified\r", "if", "not", "overwrite", "and", "path", ".", "isfile", "(", "filename", ")", ":", "raise", "FileExistsError", "(", "'The file already exists'", ")", "# Replace file if append not specified\r", "write_type", "=", "'w'", "if", "not", "append", "else", "'a'", "# Flatten if flat is specified, or there are no predefined field names\r", "if", "flat", "or", "not", "field_names", ":", "data", "=", "[", "flatten", "(", "datum", ")", "for", "datum", "in", "data", "]", "# Fill in gaps between dicts with empty string\r", "if", "not", "field_names", ":", "field_names", ",", "data", "=", "fill_gaps", "(", "data", ")", "# Sort fields if specified\r", "if", "sort_fields", ":", "field_names", ".", "sort", "(", ")", "# If there are primary fields, move the field names to the front and sort\r", "# based on first field\r", "if", "primary_fields", ":", "for", "key", "in", "primary_fields", "[", ":", ":", "-", "1", "]", ":", "field_names", ".", "insert", "(", "0", ",", "field_names", ".", "pop", "(", "field_names", ".", "index", "(", "key", ")", ")", ")", "data", "=", "sorted", "(", "data", ",", "key", "=", "lambda", "k", ":", "k", "[", "field_names", "[", "0", "]", "]", ",", "reverse", "=", "True", ")", "# Write the file\r", "with", "open", "(", "filename", ",", "write_type", ",", "encoding", "=", "'utf-8'", ")", "as", "f", ":", "writer", "=", "csv", ".", "DictWriter", "(", "f", ",", "fieldnames", "=", "field_names", ",", "lineterminator", "=", "'\\n'", ")", "if", "not", "append", "or", "write_headers", ":", "writer", ".", "writeheader", "(", ")", "# Write rows containing fields in field names\r", "for", "datum", "in", "data", ":", "for", "key", "in", "list", "(", "datum", ".", "keys", "(", ")", ")", ":", "if", "key", "not", "in", "field_names", ":", "del", "datum", "[", "key", "]", "elif", "type", "(", "datum", "[", "key", "]", ")", "is", "str", ":", "datum", "[", "key", "]", "=", "datum", "[", "key", "]", ".", "strip", "(", ")", "datum", "[", "key", "]", "=", "str", "(", "datum", "[", "key", "]", ")", "writer", ".", "writerow", "(", "datum", ")" ]
DEPRECATED Write a list of dicts to a csv file :param data: List of dicts :param field_names: The list column names :param filename: The name of the file :param overwrite: Overwrite the file if exists :param write_headers: Write the headers to the csv file :param append: Write new rows if the file exists :param flat: Flatten the dictionary before saving :param primary_fields: The first columns of the csv file :param sort_fields: Sort the field names alphabetically :return: None
[ "DEPRECATED", "Write", "a", "list", "of", "dicts", "to", "a", "csv", "file", ":", "param", "data", ":", "List", "of", "dicts", ":", "param", "field_names", ":", "The", "list", "column", "names", ":", "param", "filename", ":", "The", "name", "of", "the", "file", ":", "param", "overwrite", ":", "Overwrite", "the", "file", "if", "exists", ":", "param", "write_headers", ":", "Write", "the", "headers", "to", "the", "csv", "file", ":", "param", "append", ":", "Write", "new", "rows", "if", "the", "file", "exists", ":", "param", "flat", ":", "Flatten", "the", "dictionary", "before", "saving", ":", "param", "primary_fields", ":", "The", "first", "columns", "of", "the", "csv", "file", ":", "param", "sort_fields", ":", "Sort", "the", "field", "names", "alphabetically", ":", "return", ":", "None" ]
python
valid
lucaslamounier/USGSDownload
usgsdownload/usgs.py
https://github.com/lucaslamounier/USGSDownload/blob/0969483ea9f9648aa17b099f36d2e1010488b2a4/usgsdownload/usgs.py#L86-L92
def validate_sceneInfo(self): """Check scene name and whether remote file exists. Raises WrongSceneNameError if the scene name is wrong. """ if self.sceneInfo.prefix not in self.__satellitesMap: raise WrongSceneNameError('USGS Downloader: Prefix of %s (%s) is invalid' % (self.sceneInfo.name, self.sceneInfo.prefix))
[ "def", "validate_sceneInfo", "(", "self", ")", ":", "if", "self", ".", "sceneInfo", ".", "prefix", "not", "in", "self", ".", "__satellitesMap", ":", "raise", "WrongSceneNameError", "(", "'USGS Downloader: Prefix of %s (%s) is invalid'", "%", "(", "self", ".", "sceneInfo", ".", "name", ",", "self", ".", "sceneInfo", ".", "prefix", ")", ")" ]
Check scene name and whether remote file exists. Raises WrongSceneNameError if the scene name is wrong.
[ "Check", "scene", "name", "and", "whether", "remote", "file", "exists", ".", "Raises", "WrongSceneNameError", "if", "the", "scene", "name", "is", "wrong", "." ]
python
test
pyrogram/pyrogram
pyrogram/client/client.py
https://github.com/pyrogram/pyrogram/blob/e7258a341ba905cfa86264c22040654db732ec1c/pyrogram/client/client.py#L467-L484
def remove_handler(self, handler: Handler, group: int = 0): """Removes a previously-added update handler. Make sure to provide the right group that the handler was added in. You can use the return value of the :meth:`add_handler` method, a tuple of (handler, group), and pass it directly. Args: handler (``Handler``): The handler to be removed. group (``int``, *optional*): The group identifier, defaults to 0. """ if isinstance(handler, DisconnectHandler): self.disconnect_handler = None else: self.dispatcher.remove_handler(handler, group)
[ "def", "remove_handler", "(", "self", ",", "handler", ":", "Handler", ",", "group", ":", "int", "=", "0", ")", ":", "if", "isinstance", "(", "handler", ",", "DisconnectHandler", ")", ":", "self", ".", "disconnect_handler", "=", "None", "else", ":", "self", ".", "dispatcher", ".", "remove_handler", "(", "handler", ",", "group", ")" ]
Removes a previously-added update handler. Make sure to provide the right group that the handler was added in. You can use the return value of the :meth:`add_handler` method, a tuple of (handler, group), and pass it directly. Args: handler (``Handler``): The handler to be removed. group (``int``, *optional*): The group identifier, defaults to 0.
[ "Removes", "a", "previously", "-", "added", "update", "handler", "." ]
python
train
pysathq/pysat
pysat/solvers.py
https://github.com/pysathq/pysat/blob/522742e8f2d4c6ac50ecd9087f7a346206774c67/pysat/solvers.py#L1876-L1882
def conf_budget(self, budget): """ Set limit on the number of conflicts. """ if self.maplesat: pysolvers.maplechrono_cbudget(self.maplesat, budget)
[ "def", "conf_budget", "(", "self", ",", "budget", ")", ":", "if", "self", ".", "maplesat", ":", "pysolvers", ".", "maplechrono_cbudget", "(", "self", ".", "maplesat", ",", "budget", ")" ]
Set limit on the number of conflicts.
[ "Set", "limit", "on", "the", "number", "of", "conflicts", "." ]
python
train
devoperate/chronos
chronos/cli.py
https://github.com/devoperate/chronos/blob/5ae6047c4f13db9f5e85a0c72a3dc47f05a8d7bd/chronos/cli.py#L91-L111
def bump(args: argparse.Namespace) -> None: """ :args: An argparse.Namespace object. This function is bound to the 'bump' sub-command. It increments the version integer of the user's choice ('major', 'minor', or 'patch'). """ try: last_tag = last_git_release_tag(git_tags()) except NoGitTagsException: print(SemVer(0, 1, 0)) exit(0) last_ver = git_tag_to_semver(last_tag) if args.type == 'patch': print(last_ver.bump_patch()) elif args.type == 'minor': print(last_ver.bump_minor()) elif args.type == 'major': print(last_ver.bump_major())
[ "def", "bump", "(", "args", ":", "argparse", ".", "Namespace", ")", "->", "None", ":", "try", ":", "last_tag", "=", "last_git_release_tag", "(", "git_tags", "(", ")", ")", "except", "NoGitTagsException", ":", "print", "(", "SemVer", "(", "0", ",", "1", ",", "0", ")", ")", "exit", "(", "0", ")", "last_ver", "=", "git_tag_to_semver", "(", "last_tag", ")", "if", "args", ".", "type", "==", "'patch'", ":", "print", "(", "last_ver", ".", "bump_patch", "(", ")", ")", "elif", "args", ".", "type", "==", "'minor'", ":", "print", "(", "last_ver", ".", "bump_minor", "(", ")", ")", "elif", "args", ".", "type", "==", "'major'", ":", "print", "(", "last_ver", ".", "bump_major", "(", ")", ")" ]
:args: An argparse.Namespace object. This function is bound to the 'bump' sub-command. It increments the version integer of the user's choice ('major', 'minor', or 'patch').
[ ":", "args", ":", "An", "argparse", ".", "Namespace", "object", "." ]
python
train
bcbio/bcbio-nextgen
bcbio/workflow/template.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/workflow/template.py#L421-L461
def _add_metadata(item, metadata, remotes, only_metadata=False): """Add metadata information from CSV file to current item. Retrieves metadata based on 'description' parsed from input CSV file. Adds to object and handles special keys: - `description`: A new description for the item. Used to relabel items based on the pre-determined description from fastq name or BAM read groups. - Keys matching supported names in the algorithm section map to key/value pairs there instead of metadata. """ for check_key in [item["description"]] + _get_file_keys(item) + _get_vrn_keys(item): item_md = metadata.get(check_key) if item_md: break if not item_md: item_md = _find_glob_metadata(item["files"], metadata) if remotes.get("region"): item["algorithm"]["variant_regions"] = remotes["region"] TOP_LEVEL = set(["description", "genome_build", "lane", "vrn_file", "files", "analysis"]) keep_sample = True if item_md and len(item_md) > 0: if "metadata" not in item: item["metadata"] = {} for k, v in item_md.items(): if v: if k in TOP_LEVEL: item[k] = v elif k in run_info.ALGORITHM_KEYS: v = _handle_special_yaml_cases(v) item["algorithm"][k] = v else: v = _handle_special_yaml_cases(v) item["metadata"][k] = v elif len(metadata) > 0: warn = "Dropped sample" if only_metadata else "Added minimal sample information" print("WARNING: %s: metadata not found for %s, %s" % (warn, item["description"], [os.path.basename(f) for f in item["files"]])) keep_sample = not only_metadata if tz.get_in(["metadata", "ped"], item): item["metadata"] = _add_ped_metadata(item["description"], item["metadata"]) return item if keep_sample else None
[ "def", "_add_metadata", "(", "item", ",", "metadata", ",", "remotes", ",", "only_metadata", "=", "False", ")", ":", "for", "check_key", "in", "[", "item", "[", "\"description\"", "]", "]", "+", "_get_file_keys", "(", "item", ")", "+", "_get_vrn_keys", "(", "item", ")", ":", "item_md", "=", "metadata", ".", "get", "(", "check_key", ")", "if", "item_md", ":", "break", "if", "not", "item_md", ":", "item_md", "=", "_find_glob_metadata", "(", "item", "[", "\"files\"", "]", ",", "metadata", ")", "if", "remotes", ".", "get", "(", "\"region\"", ")", ":", "item", "[", "\"algorithm\"", "]", "[", "\"variant_regions\"", "]", "=", "remotes", "[", "\"region\"", "]", "TOP_LEVEL", "=", "set", "(", "[", "\"description\"", ",", "\"genome_build\"", ",", "\"lane\"", ",", "\"vrn_file\"", ",", "\"files\"", ",", "\"analysis\"", "]", ")", "keep_sample", "=", "True", "if", "item_md", "and", "len", "(", "item_md", ")", ">", "0", ":", "if", "\"metadata\"", "not", "in", "item", ":", "item", "[", "\"metadata\"", "]", "=", "{", "}", "for", "k", ",", "v", "in", "item_md", ".", "items", "(", ")", ":", "if", "v", ":", "if", "k", "in", "TOP_LEVEL", ":", "item", "[", "k", "]", "=", "v", "elif", "k", "in", "run_info", ".", "ALGORITHM_KEYS", ":", "v", "=", "_handle_special_yaml_cases", "(", "v", ")", "item", "[", "\"algorithm\"", "]", "[", "k", "]", "=", "v", "else", ":", "v", "=", "_handle_special_yaml_cases", "(", "v", ")", "item", "[", "\"metadata\"", "]", "[", "k", "]", "=", "v", "elif", "len", "(", "metadata", ")", ">", "0", ":", "warn", "=", "\"Dropped sample\"", "if", "only_metadata", "else", "\"Added minimal sample information\"", "print", "(", "\"WARNING: %s: metadata not found for %s, %s\"", "%", "(", "warn", ",", "item", "[", "\"description\"", "]", ",", "[", "os", ".", "path", ".", "basename", "(", "f", ")", "for", "f", "in", "item", "[", "\"files\"", "]", "]", ")", ")", "keep_sample", "=", "not", "only_metadata", "if", "tz", ".", "get_in", "(", "[", "\"metadata\"", ",", "\"ped\"", "]", ",", "item", ")", ":", "item", "[", "\"metadata\"", "]", "=", "_add_ped_metadata", "(", "item", "[", "\"description\"", "]", ",", "item", "[", "\"metadata\"", "]", ")", "return", "item", "if", "keep_sample", "else", "None" ]
Add metadata information from CSV file to current item. Retrieves metadata based on 'description' parsed from input CSV file. Adds to object and handles special keys: - `description`: A new description for the item. Used to relabel items based on the pre-determined description from fastq name or BAM read groups. - Keys matching supported names in the algorithm section map to key/value pairs there instead of metadata.
[ "Add", "metadata", "information", "from", "CSV", "file", "to", "current", "item", "." ]
python
train
benzrf/parthial
parthial/context.py
https://github.com/benzrf/parthial/blob/ab1e316aec87ed34dda0ec0e145fe0c8cc8e907f/parthial/context.py#L51-L59
def new_scope(self, new_scope={}): """Add a new innermost scope for the duration of the with block. Args: new_scope (dict-like): The scope to add. """ old_scopes, self.scopes = self.scopes, self.scopes.new_child(new_scope) yield self.scopes = old_scopes
[ "def", "new_scope", "(", "self", ",", "new_scope", "=", "{", "}", ")", ":", "old_scopes", ",", "self", ".", "scopes", "=", "self", ".", "scopes", ",", "self", ".", "scopes", ".", "new_child", "(", "new_scope", ")", "yield", "self", ".", "scopes", "=", "old_scopes" ]
Add a new innermost scope for the duration of the with block. Args: new_scope (dict-like): The scope to add.
[ "Add", "a", "new", "innermost", "scope", "for", "the", "duration", "of", "the", "with", "block", "." ]
python
train
genepattern/genepattern-python
gp/modules.py
https://github.com/genepattern/genepattern-python/blob/9478ea65362b91c72a94f7300c3de8d710bebb71/gp/modules.py#L665-L691
def register(self, task_spec): """ Registers a module specification with the LSID authority. Validates that it possesses an LSID assigned by the authority. Raises an exception if registration wasn't successful. :param task_spec: :return: boolean - True if registration was successful """ if self.validate(task_spec.lsid): # Add the module name to the map self.registered_modules[task_spec.lsid] = task_spec.name # Increment module count self.module_count += 1 # Write the updated LSID authority file and reload with open(self.authority, 'w') as authority_file: json.dump({ 'base_lsid': self.base_lsid, 'module_count': self.module_count, 'registered_modules': self.registered_modules, }, authority_file, sort_keys=True, indent=4, separators=(',', ': ')) self._load_lsid_authority() else: raise RuntimeError("Module LSID id not valid: " + str(task_spec.lsid)) return True
[ "def", "register", "(", "self", ",", "task_spec", ")", ":", "if", "self", ".", "validate", "(", "task_spec", ".", "lsid", ")", ":", "# Add the module name to the map", "self", ".", "registered_modules", "[", "task_spec", ".", "lsid", "]", "=", "task_spec", ".", "name", "# Increment module count", "self", ".", "module_count", "+=", "1", "# Write the updated LSID authority file and reload", "with", "open", "(", "self", ".", "authority", ",", "'w'", ")", "as", "authority_file", ":", "json", ".", "dump", "(", "{", "'base_lsid'", ":", "self", ".", "base_lsid", ",", "'module_count'", ":", "self", ".", "module_count", ",", "'registered_modules'", ":", "self", ".", "registered_modules", ",", "}", ",", "authority_file", ",", "sort_keys", "=", "True", ",", "indent", "=", "4", ",", "separators", "=", "(", "','", ",", "': '", ")", ")", "self", ".", "_load_lsid_authority", "(", ")", "else", ":", "raise", "RuntimeError", "(", "\"Module LSID id not valid: \"", "+", "str", "(", "task_spec", ".", "lsid", ")", ")", "return", "True" ]
Registers a module specification with the LSID authority. Validates that it possesses an LSID assigned by the authority. Raises an exception if registration wasn't successful. :param task_spec: :return: boolean - True if registration was successful
[ "Registers", "a", "module", "specification", "with", "the", "LSID", "authority", ".", "Validates", "that", "it", "possesses", "an", "LSID", "assigned", "by", "the", "authority", ".", "Raises", "an", "exception", "if", "registration", "wasn", "t", "successful", ".", ":", "param", "task_spec", ":", ":", "return", ":", "boolean", "-", "True", "if", "registration", "was", "successful" ]
python
train
numenta/htmresearch
htmresearch/algorithms/column_pooler.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/algorithms/column_pooler.py#L643-L655
def _sampleRange(rng, start, end, step, k): """ Equivalent to: random.sample(xrange(start, end, step), k) except it uses our random number generator. This wouldn't need to create the arange if it were implemented in C. """ array = numpy.empty(k, dtype="uint32") rng.sample(numpy.arange(start, end, step, dtype="uint32"), array) return array
[ "def", "_sampleRange", "(", "rng", ",", "start", ",", "end", ",", "step", ",", "k", ")", ":", "array", "=", "numpy", ".", "empty", "(", "k", ",", "dtype", "=", "\"uint32\"", ")", "rng", ".", "sample", "(", "numpy", ".", "arange", "(", "start", ",", "end", ",", "step", ",", "dtype", "=", "\"uint32\"", ")", ",", "array", ")", "return", "array" ]
Equivalent to: random.sample(xrange(start, end, step), k) except it uses our random number generator. This wouldn't need to create the arange if it were implemented in C.
[ "Equivalent", "to", ":" ]
python
train
gmr/rejected
rejected/consumer.py
https://github.com/gmr/rejected/blob/610a3e1401122ecb98d891b6795cca0255e5b044/rejected/consumer.py#L532-L550
def require_setting(self, name, feature='this feature'): """Raises an exception if the given app setting is not defined. As a generalization, this method should called from a Consumer's :py:meth:`~rejected.consumer.Consumer.initialize` method. If a required setting is not found, this method will cause the consumer to shutdown prior to receiving any messages from RabbitMQ. :param name: The parameter name :type name: :class:`str` :param feature: A friendly name for the setting feature :type feature: :class:`str` :raises: :exc:`~rejected.errors.ConfigurationException` """ if name not in self.settings: raise ConfigurationException( "You must define the '{}' setting in your " "application to use {}".format(name, feature))
[ "def", "require_setting", "(", "self", ",", "name", ",", "feature", "=", "'this feature'", ")", ":", "if", "name", "not", "in", "self", ".", "settings", ":", "raise", "ConfigurationException", "(", "\"You must define the '{}' setting in your \"", "\"application to use {}\"", ".", "format", "(", "name", ",", "feature", ")", ")" ]
Raises an exception if the given app setting is not defined. As a generalization, this method should called from a Consumer's :py:meth:`~rejected.consumer.Consumer.initialize` method. If a required setting is not found, this method will cause the consumer to shutdown prior to receiving any messages from RabbitMQ. :param name: The parameter name :type name: :class:`str` :param feature: A friendly name for the setting feature :type feature: :class:`str` :raises: :exc:`~rejected.errors.ConfigurationException`
[ "Raises", "an", "exception", "if", "the", "given", "app", "setting", "is", "not", "defined", "." ]
python
train
googleapis/oauth2client
oauth2client/contrib/keyring_storage.py
https://github.com/googleapis/oauth2client/blob/50d20532a748f18e53f7d24ccbe6647132c979a9/oauth2client/contrib/keyring_storage.py#L62-L78
def locked_get(self): """Retrieve Credential from file. Returns: oauth2client.client.Credentials """ credentials = None content = keyring.get_password(self._service_name, self._user_name) if content is not None: try: credentials = client.Credentials.new_from_json(content) credentials.set_store(self) except ValueError: pass return credentials
[ "def", "locked_get", "(", "self", ")", ":", "credentials", "=", "None", "content", "=", "keyring", ".", "get_password", "(", "self", ".", "_service_name", ",", "self", ".", "_user_name", ")", "if", "content", "is", "not", "None", ":", "try", ":", "credentials", "=", "client", ".", "Credentials", ".", "new_from_json", "(", "content", ")", "credentials", ".", "set_store", "(", "self", ")", "except", "ValueError", ":", "pass", "return", "credentials" ]
Retrieve Credential from file. Returns: oauth2client.client.Credentials
[ "Retrieve", "Credential", "from", "file", "." ]
python
valid
theislab/scanpy
scanpy/neighbors/__init__.py
https://github.com/theislab/scanpy/blob/9e4e5ee02e04cf618872d9b098e24f0542e8b227/scanpy/neighbors/__init__.py#L579-L661
def compute_neighbors( self, n_neighbors: int = 30, knn: bool = True, n_pcs: Optional[int] = None, use_rep: Optional[str] = None, method: str = 'umap', random_state: Optional[Union[RandomState, int]] = 0, write_knn_indices: bool = False, metric: str = 'euclidean', metric_kwds: Mapping[str, Any] = {} ) -> None: """\ Compute distances and connectivities of neighbors. Parameters ---------- n_neighbors Use this number of nearest neighbors. knn Restrict result to `n_neighbors` nearest neighbors. {n_pcs} {use_rep} Returns ------- Writes sparse graph attributes `.distances` and `.connectivities`. Also writes `.knn_indices` and `.knn_distances` if `write_knn_indices==True`. """ if n_neighbors > self._adata.shape[0]: # very small datasets n_neighbors = 1 + int(0.5*self._adata.shape[0]) logg.warn('n_obs too small: adjusting to `n_neighbors = {}`' .format(n_neighbors)) if method == 'umap' and not knn: raise ValueError('`method = \'umap\' only with `knn = True`.') if method not in {'umap', 'gauss'}: raise ValueError('`method` needs to be \'umap\' or \'gauss\'.') if self._adata.shape[0] >= 10000 and not knn: logg.warn( 'Using high n_obs without `knn=True` takes a lot of memory...') self.n_neighbors = n_neighbors self.knn = knn X = choose_representation(self._adata, use_rep=use_rep, n_pcs=n_pcs) # neighbor search use_dense_distances = (metric == 'euclidean' and X.shape[0] < 8192) or knn == False if use_dense_distances: _distances = pairwise_distances(X, metric=metric, **metric_kwds) knn_indices, knn_distances = get_indices_distances_from_dense_matrix( _distances, n_neighbors) if knn: self._distances = get_sparse_matrix_from_indices_distances_numpy( knn_indices, knn_distances, X.shape[0], n_neighbors) else: self._distances = _distances else: # non-euclidean case and approx nearest neighbors if X.shape[0] < 4096: X = pairwise_distances(X, metric=metric, **metric_kwds) metric = 'precomputed' knn_indices, knn_distances, _ = compute_neighbors_umap( X, n_neighbors, random_state, metric=metric, metric_kwds=metric_kwds) #self._rp_forest = _make_forest_dict(forest) # write indices as attributes if write_knn_indices: self.knn_indices = knn_indices self.knn_distances = knn_distances logg.msg('computed neighbors', t=True, v=4) if not use_dense_distances or method == 'umap': # we need self._distances also for method == 'gauss' if we didn't # use dense distances self._distances, self._connectivities = compute_connectivities_umap( knn_indices, knn_distances, self._adata.shape[0], self.n_neighbors) # overwrite the umap connectivities if method is 'gauss' # self._distances is unaffected by this if method == 'gauss': self._compute_connectivities_diffmap() logg.msg('computed connectivities', t=True, v=4) self._number_connected_components = 1 if issparse(self._connectivities): from scipy.sparse.csgraph import connected_components self._connected_components = connected_components(self._connectivities) self._number_connected_components = self._connected_components[0]
[ "def", "compute_neighbors", "(", "self", ",", "n_neighbors", ":", "int", "=", "30", ",", "knn", ":", "bool", "=", "True", ",", "n_pcs", ":", "Optional", "[", "int", "]", "=", "None", ",", "use_rep", ":", "Optional", "[", "str", "]", "=", "None", ",", "method", ":", "str", "=", "'umap'", ",", "random_state", ":", "Optional", "[", "Union", "[", "RandomState", ",", "int", "]", "]", "=", "0", ",", "write_knn_indices", ":", "bool", "=", "False", ",", "metric", ":", "str", "=", "'euclidean'", ",", "metric_kwds", ":", "Mapping", "[", "str", ",", "Any", "]", "=", "{", "}", ")", "->", "None", ":", "if", "n_neighbors", ">", "self", ".", "_adata", ".", "shape", "[", "0", "]", ":", "# very small datasets", "n_neighbors", "=", "1", "+", "int", "(", "0.5", "*", "self", ".", "_adata", ".", "shape", "[", "0", "]", ")", "logg", ".", "warn", "(", "'n_obs too small: adjusting to `n_neighbors = {}`'", ".", "format", "(", "n_neighbors", ")", ")", "if", "method", "==", "'umap'", "and", "not", "knn", ":", "raise", "ValueError", "(", "'`method = \\'umap\\' only with `knn = True`.'", ")", "if", "method", "not", "in", "{", "'umap'", ",", "'gauss'", "}", ":", "raise", "ValueError", "(", "'`method` needs to be \\'umap\\' or \\'gauss\\'.'", ")", "if", "self", ".", "_adata", ".", "shape", "[", "0", "]", ">=", "10000", "and", "not", "knn", ":", "logg", ".", "warn", "(", "'Using high n_obs without `knn=True` takes a lot of memory...'", ")", "self", ".", "n_neighbors", "=", "n_neighbors", "self", ".", "knn", "=", "knn", "X", "=", "choose_representation", "(", "self", ".", "_adata", ",", "use_rep", "=", "use_rep", ",", "n_pcs", "=", "n_pcs", ")", "# neighbor search", "use_dense_distances", "=", "(", "metric", "==", "'euclidean'", "and", "X", ".", "shape", "[", "0", "]", "<", "8192", ")", "or", "knn", "==", "False", "if", "use_dense_distances", ":", "_distances", "=", "pairwise_distances", "(", "X", ",", "metric", "=", "metric", ",", "*", "*", "metric_kwds", ")", "knn_indices", ",", "knn_distances", "=", "get_indices_distances_from_dense_matrix", "(", "_distances", ",", "n_neighbors", ")", "if", "knn", ":", "self", ".", "_distances", "=", "get_sparse_matrix_from_indices_distances_numpy", "(", "knn_indices", ",", "knn_distances", ",", "X", ".", "shape", "[", "0", "]", ",", "n_neighbors", ")", "else", ":", "self", ".", "_distances", "=", "_distances", "else", ":", "# non-euclidean case and approx nearest neighbors", "if", "X", ".", "shape", "[", "0", "]", "<", "4096", ":", "X", "=", "pairwise_distances", "(", "X", ",", "metric", "=", "metric", ",", "*", "*", "metric_kwds", ")", "metric", "=", "'precomputed'", "knn_indices", ",", "knn_distances", ",", "_", "=", "compute_neighbors_umap", "(", "X", ",", "n_neighbors", ",", "random_state", ",", "metric", "=", "metric", ",", "metric_kwds", "=", "metric_kwds", ")", "#self._rp_forest = _make_forest_dict(forest)", "# write indices as attributes", "if", "write_knn_indices", ":", "self", ".", "knn_indices", "=", "knn_indices", "self", ".", "knn_distances", "=", "knn_distances", "logg", ".", "msg", "(", "'computed neighbors'", ",", "t", "=", "True", ",", "v", "=", "4", ")", "if", "not", "use_dense_distances", "or", "method", "==", "'umap'", ":", "# we need self._distances also for method == 'gauss' if we didn't", "# use dense distances", "self", ".", "_distances", ",", "self", ".", "_connectivities", "=", "compute_connectivities_umap", "(", "knn_indices", ",", "knn_distances", ",", "self", ".", "_adata", ".", "shape", "[", "0", "]", ",", "self", ".", "n_neighbors", ")", "# overwrite the umap connectivities if method is 'gauss'", "# self._distances is unaffected by this", "if", "method", "==", "'gauss'", ":", "self", ".", "_compute_connectivities_diffmap", "(", ")", "logg", ".", "msg", "(", "'computed connectivities'", ",", "t", "=", "True", ",", "v", "=", "4", ")", "self", ".", "_number_connected_components", "=", "1", "if", "issparse", "(", "self", ".", "_connectivities", ")", ":", "from", "scipy", ".", "sparse", ".", "csgraph", "import", "connected_components", "self", ".", "_connected_components", "=", "connected_components", "(", "self", ".", "_connectivities", ")", "self", ".", "_number_connected_components", "=", "self", ".", "_connected_components", "[", "0", "]" ]
\ Compute distances and connectivities of neighbors. Parameters ---------- n_neighbors Use this number of nearest neighbors. knn Restrict result to `n_neighbors` nearest neighbors. {n_pcs} {use_rep} Returns ------- Writes sparse graph attributes `.distances` and `.connectivities`. Also writes `.knn_indices` and `.knn_distances` if `write_knn_indices==True`.
[ "\\", "Compute", "distances", "and", "connectivities", "of", "neighbors", "." ]
python
train
nathancahill/mimicdb
mimicdb/s3/bucket.py
https://github.com/nathancahill/mimicdb/blob/9d0e8ebcba31d937f73752f9b88e5a4fec860765/mimicdb/s3/bucket.py#L102-L127
def list(self, *args, **kwargs): """Return an iterable of keys from MimicDB. :param boolean force: If true, API call is forced to S3 """ if kwargs.pop('force', None): headers = kwargs.get('headers', args[4] if len(args) > 4 else None) or dict() headers['force'] = True kwargs['headers'] = headers for key in super(Bucket, self).list(*args, **kwargs): yield key else: prefix = kwargs.get('prefix', args[0] if args else '') for key in mimicdb.backend.smembers(tpl.bucket % self.name): if key.startswith(prefix): k = Key(self, key) meta = mimicdb.backend.hgetall(tpl.key % (self.name, key)) if meta: k._load_meta(meta['size'], meta['md5']) yield k
[ "def", "list", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "kwargs", ".", "pop", "(", "'force'", ",", "None", ")", ":", "headers", "=", "kwargs", ".", "get", "(", "'headers'", ",", "args", "[", "4", "]", "if", "len", "(", "args", ")", ">", "4", "else", "None", ")", "or", "dict", "(", ")", "headers", "[", "'force'", "]", "=", "True", "kwargs", "[", "'headers'", "]", "=", "headers", "for", "key", "in", "super", "(", "Bucket", ",", "self", ")", ".", "list", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "yield", "key", "else", ":", "prefix", "=", "kwargs", ".", "get", "(", "'prefix'", ",", "args", "[", "0", "]", "if", "args", "else", "''", ")", "for", "key", "in", "mimicdb", ".", "backend", ".", "smembers", "(", "tpl", ".", "bucket", "%", "self", ".", "name", ")", ":", "if", "key", ".", "startswith", "(", "prefix", ")", ":", "k", "=", "Key", "(", "self", ",", "key", ")", "meta", "=", "mimicdb", ".", "backend", ".", "hgetall", "(", "tpl", ".", "key", "%", "(", "self", ".", "name", ",", "key", ")", ")", "if", "meta", ":", "k", ".", "_load_meta", "(", "meta", "[", "'size'", "]", ",", "meta", "[", "'md5'", "]", ")", "yield", "k" ]
Return an iterable of keys from MimicDB. :param boolean force: If true, API call is forced to S3
[ "Return", "an", "iterable", "of", "keys", "from", "MimicDB", "." ]
python
valid
pantsbuild/pants
src/python/pants/cache/cache_setup.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/cache/cache_setup.py#L175-L185
def get_write_cache(self): """Returns the write cache for this setup, creating it if necessary. Returns None if no write cache is configured. """ if self._options.write_to and not self._write_cache: cache_spec = self._resolve(self._sanitize_cache_spec(self._options.write_to)) if cache_spec: with self._cache_setup_lock: self._write_cache = self._do_create_artifact_cache(cache_spec, 'will write to') return self._write_cache
[ "def", "get_write_cache", "(", "self", ")", ":", "if", "self", ".", "_options", ".", "write_to", "and", "not", "self", ".", "_write_cache", ":", "cache_spec", "=", "self", ".", "_resolve", "(", "self", ".", "_sanitize_cache_spec", "(", "self", ".", "_options", ".", "write_to", ")", ")", "if", "cache_spec", ":", "with", "self", ".", "_cache_setup_lock", ":", "self", ".", "_write_cache", "=", "self", ".", "_do_create_artifact_cache", "(", "cache_spec", ",", "'will write to'", ")", "return", "self", ".", "_write_cache" ]
Returns the write cache for this setup, creating it if necessary. Returns None if no write cache is configured.
[ "Returns", "the", "write", "cache", "for", "this", "setup", "creating", "it", "if", "necessary", "." ]
python
train
openstack/networking-cisco
networking_cisco/apps/saf/agent/topo_disc/topo_disc.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/agent/topo_disc/topo_disc.py#L135-L140
def remote_sys_desc_uneq_store(self, remote_system_desc): """This function saves the system desc, if different from stored. """ if remote_system_desc != self.remote_system_desc: self.remote_system_desc = remote_system_desc return True return False
[ "def", "remote_sys_desc_uneq_store", "(", "self", ",", "remote_system_desc", ")", ":", "if", "remote_system_desc", "!=", "self", ".", "remote_system_desc", ":", "self", ".", "remote_system_desc", "=", "remote_system_desc", "return", "True", "return", "False" ]
This function saves the system desc, if different from stored.
[ "This", "function", "saves", "the", "system", "desc", "if", "different", "from", "stored", "." ]
python
train
mixcloud/django-experiments
experiments/admin_utils.py
https://github.com/mixcloud/django-experiments/blob/1f45e9f8a108b51e44918daa647269b2b8d43f1d/experiments/admin_utils.py#L61-L82
def points_with_surrounding_gaps(points): """ This function makes sure that any gaps in the sequence provided have stopper points at their beginning and end so a graph will be drawn with correct 0 ranges. This is more efficient than filling in all points up to the maximum value. For example: input: [1,2,3,10,11,13] output [1,2,3,4,9,10,11,12,13] """ points_with_gaps = [] last_point = -1 for point in points: if last_point + 1 == point: pass elif last_point + 2 == point: points_with_gaps.append(last_point + 1) else: points_with_gaps.append(last_point + 1) points_with_gaps.append(point - 1) points_with_gaps.append(point) last_point = point return points_with_gaps
[ "def", "points_with_surrounding_gaps", "(", "points", ")", ":", "points_with_gaps", "=", "[", "]", "last_point", "=", "-", "1", "for", "point", "in", "points", ":", "if", "last_point", "+", "1", "==", "point", ":", "pass", "elif", "last_point", "+", "2", "==", "point", ":", "points_with_gaps", ".", "append", "(", "last_point", "+", "1", ")", "else", ":", "points_with_gaps", ".", "append", "(", "last_point", "+", "1", ")", "points_with_gaps", ".", "append", "(", "point", "-", "1", ")", "points_with_gaps", ".", "append", "(", "point", ")", "last_point", "=", "point", "return", "points_with_gaps" ]
This function makes sure that any gaps in the sequence provided have stopper points at their beginning and end so a graph will be drawn with correct 0 ranges. This is more efficient than filling in all points up to the maximum value. For example: input: [1,2,3,10,11,13] output [1,2,3,4,9,10,11,12,13]
[ "This", "function", "makes", "sure", "that", "any", "gaps", "in", "the", "sequence", "provided", "have", "stopper", "points", "at", "their", "beginning", "and", "end", "so", "a", "graph", "will", "be", "drawn", "with", "correct", "0", "ranges", ".", "This", "is", "more", "efficient", "than", "filling", "in", "all", "points", "up", "to", "the", "maximum", "value", ".", "For", "example", ":" ]
python
train
tonybaloney/wily
wily/cache.py
https://github.com/tonybaloney/wily/blob/bae259354a91b57d56603f0ca7403186f086a84c/wily/cache.py#L236-L252
def get_archiver_index(config, archiver): """ Get the contents of the archiver index file. :param config: The configuration :type config: :class:`wily.config.WilyConfig` :param archiver: The name of the archiver type (e.g. 'git') :type archiver: ``str`` :return: The index data :rtype: ``dict`` """ root = pathlib.Path(config.cache_path) / archiver with (root / "index.json").open("r") as index_f: index = json.load(index_f) return index
[ "def", "get_archiver_index", "(", "config", ",", "archiver", ")", ":", "root", "=", "pathlib", ".", "Path", "(", "config", ".", "cache_path", ")", "/", "archiver", "with", "(", "root", "/", "\"index.json\"", ")", ".", "open", "(", "\"r\"", ")", "as", "index_f", ":", "index", "=", "json", ".", "load", "(", "index_f", ")", "return", "index" ]
Get the contents of the archiver index file. :param config: The configuration :type config: :class:`wily.config.WilyConfig` :param archiver: The name of the archiver type (e.g. 'git') :type archiver: ``str`` :return: The index data :rtype: ``dict``
[ "Get", "the", "contents", "of", "the", "archiver", "index", "file", "." ]
python
train
twilio/twilio-python
twilio/rest/preview/hosted_numbers/authorization_document/dependent_hosted_number_order.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/preview/hosted_numbers/authorization_document/dependent_hosted_number_order.py#L194-L207
def get_instance(self, payload): """ Build an instance of DependentHostedNumberOrderInstance :param dict payload: Payload response from the API :returns: twilio.rest.preview.hosted_numbers.authorization_document.dependent_hosted_number_order.DependentHostedNumberOrderInstance :rtype: twilio.rest.preview.hosted_numbers.authorization_document.dependent_hosted_number_order.DependentHostedNumberOrderInstance """ return DependentHostedNumberOrderInstance( self._version, payload, signing_document_sid=self._solution['signing_document_sid'], )
[ "def", "get_instance", "(", "self", ",", "payload", ")", ":", "return", "DependentHostedNumberOrderInstance", "(", "self", ".", "_version", ",", "payload", ",", "signing_document_sid", "=", "self", ".", "_solution", "[", "'signing_document_sid'", "]", ",", ")" ]
Build an instance of DependentHostedNumberOrderInstance :param dict payload: Payload response from the API :returns: twilio.rest.preview.hosted_numbers.authorization_document.dependent_hosted_number_order.DependentHostedNumberOrderInstance :rtype: twilio.rest.preview.hosted_numbers.authorization_document.dependent_hosted_number_order.DependentHostedNumberOrderInstance
[ "Build", "an", "instance", "of", "DependentHostedNumberOrderInstance" ]
python
train
emlazzarin/acrylic
acrylic/datatable.py
https://github.com/emlazzarin/acrylic/blob/08c6702d73b9660ead1024653f4fa016f6340e46/acrylic/datatable.py#L486-L507
def apply(self, func, *fields): """ Applies the function, `func`, to every row in the DataTable. If no fields are supplied, the entire row is passed to `func`. If fields are supplied, the values at all of those fields are passed into func in that order. --- data['diff'] = data.apply(short_diff, 'old_count', 'new_count') """ results = [] for row in self: if not fields: results.append(func(row)) else: if any(field not in self for field in fields): for field in fields: if field not in self: raise Exception("Column `%s` does not exist " "in DataTable" % field) results.append(func(*[row[field] for field in fields])) return results
[ "def", "apply", "(", "self", ",", "func", ",", "*", "fields", ")", ":", "results", "=", "[", "]", "for", "row", "in", "self", ":", "if", "not", "fields", ":", "results", ".", "append", "(", "func", "(", "row", ")", ")", "else", ":", "if", "any", "(", "field", "not", "in", "self", "for", "field", "in", "fields", ")", ":", "for", "field", "in", "fields", ":", "if", "field", "not", "in", "self", ":", "raise", "Exception", "(", "\"Column `%s` does not exist \"", "\"in DataTable\"", "%", "field", ")", "results", ".", "append", "(", "func", "(", "*", "[", "row", "[", "field", "]", "for", "field", "in", "fields", "]", ")", ")", "return", "results" ]
Applies the function, `func`, to every row in the DataTable. If no fields are supplied, the entire row is passed to `func`. If fields are supplied, the values at all of those fields are passed into func in that order. --- data['diff'] = data.apply(short_diff, 'old_count', 'new_count')
[ "Applies", "the", "function", "func", "to", "every", "row", "in", "the", "DataTable", "." ]
python
train
anomaly/prestans
prestans/parser/parameter_set.py
https://github.com/anomaly/prestans/blob/13f5b2467bfd403dcd2d085f15cbf4644044f105/prestans/parser/parameter_set.py#L56-L90
def blueprint(self): """ blueprint support, returns a partial dictionary """ blueprint = dict() blueprint['type'] = "%s.%s" % (self.__module__, self.__class__.__name__) # Fields fields = dict() # inspects the attributes of a parameter set and tries to validate the input for attribute_name, type_instance in self.getmembers(): # must be one of the following types if not isinstance(type_instance, String) and \ not isinstance(type_instance, Float) and \ not isinstance(type_instance, Integer) and \ not isinstance(type_instance, Date) and \ not isinstance(type_instance, DateTime) and \ not isinstance(type_instance, Array): raise TypeError("%s should be instance of\ prestans.types.String/Integer/Float/Date/DateTime/Array" % attribute_name) if isinstance(type_instance, Array): if not isinstance(type_instance.element_template, String) and \ not isinstance(type_instance.element_template, Float) and \ not isinstance(type_instance.element_template, Integer): raise TypeError("%s should be instance of \ prestans.types.String/Integer/Float/Array" % attribute_name) fields[attribute_name] = type_instance.blueprint() blueprint['fields'] = fields return blueprint
[ "def", "blueprint", "(", "self", ")", ":", "blueprint", "=", "dict", "(", ")", "blueprint", "[", "'type'", "]", "=", "\"%s.%s\"", "%", "(", "self", ".", "__module__", ",", "self", ".", "__class__", ".", "__name__", ")", "# Fields", "fields", "=", "dict", "(", ")", "# inspects the attributes of a parameter set and tries to validate the input", "for", "attribute_name", ",", "type_instance", "in", "self", ".", "getmembers", "(", ")", ":", "# must be one of the following types", "if", "not", "isinstance", "(", "type_instance", ",", "String", ")", "and", "not", "isinstance", "(", "type_instance", ",", "Float", ")", "and", "not", "isinstance", "(", "type_instance", ",", "Integer", ")", "and", "not", "isinstance", "(", "type_instance", ",", "Date", ")", "and", "not", "isinstance", "(", "type_instance", ",", "DateTime", ")", "and", "not", "isinstance", "(", "type_instance", ",", "Array", ")", ":", "raise", "TypeError", "(", "\"%s should be instance of\\\n prestans.types.String/Integer/Float/Date/DateTime/Array\"", "%", "attribute_name", ")", "if", "isinstance", "(", "type_instance", ",", "Array", ")", ":", "if", "not", "isinstance", "(", "type_instance", ".", "element_template", ",", "String", ")", "and", "not", "isinstance", "(", "type_instance", ".", "element_template", ",", "Float", ")", "and", "not", "isinstance", "(", "type_instance", ".", "element_template", ",", "Integer", ")", ":", "raise", "TypeError", "(", "\"%s should be instance of \\\n prestans.types.String/Integer/Float/Array\"", "%", "attribute_name", ")", "fields", "[", "attribute_name", "]", "=", "type_instance", ".", "blueprint", "(", ")", "blueprint", "[", "'fields'", "]", "=", "fields", "return", "blueprint" ]
blueprint support, returns a partial dictionary
[ "blueprint", "support", "returns", "a", "partial", "dictionary" ]
python
train
ulf1/oxyba
oxyba/clean_german_date.py
https://github.com/ulf1/oxyba/blob/b3043116050de275124365cb11e7df91fb40169d/oxyba/clean_german_date.py#L2-L61
def clean_german_date(x): """Convert a string with a German date 'DD.MM.YYYY' to Datetime objects Parameters ---------- x : str, list, tuple, numpy.ndarray, pandas.DataFrame A string with a German formated date, or an array of these strings, e.g. list, ndarray, df. Returns ------- y : str, list, tuple, numpy.ndarray, pandas.DataFrame A datetime object or array of datetime objects. Example ------- The function aims to convert a string as follows '23.09.2012' => datetime(2012, 9, 23, 0, 0) Code Example ------------ print(clean_german_date('23.09.2012')) Behavior -------- - If it is not a string with date format 'DD.MM.YYYY' then None is returned """ import numpy as np import pandas as pd from datetime import datetime def proc_elem(e): try: return datetime.strptime(e, '%d.%m.%Y') except Exception as e: print(e) return None def proc_list(x): return [proc_elem(e) for e in x] def proc_ndarray(x): tmp = proc_list(list(x.reshape((x.size,)))) return np.array(tmp).reshape(x.shape) # transform string, list/tuple, numpy array, pandas dataframe if isinstance(x, str): return proc_elem(x) elif isinstance(x, (list, tuple)): return proc_list(x) elif isinstance(x, np.ndarray): return proc_ndarray(x) elif isinstance(x, pd.DataFrame): return pd.DataFrame(proc_ndarray(x.values), columns=x.columns, index=x.index) else: return None
[ "def", "clean_german_date", "(", "x", ")", ":", "import", "numpy", "as", "np", "import", "pandas", "as", "pd", "from", "datetime", "import", "datetime", "def", "proc_elem", "(", "e", ")", ":", "try", ":", "return", "datetime", ".", "strptime", "(", "e", ",", "'%d.%m.%Y'", ")", "except", "Exception", "as", "e", ":", "print", "(", "e", ")", "return", "None", "def", "proc_list", "(", "x", ")", ":", "return", "[", "proc_elem", "(", "e", ")", "for", "e", "in", "x", "]", "def", "proc_ndarray", "(", "x", ")", ":", "tmp", "=", "proc_list", "(", "list", "(", "x", ".", "reshape", "(", "(", "x", ".", "size", ",", ")", ")", ")", ")", "return", "np", ".", "array", "(", "tmp", ")", ".", "reshape", "(", "x", ".", "shape", ")", "# transform string, list/tuple, numpy array, pandas dataframe", "if", "isinstance", "(", "x", ",", "str", ")", ":", "return", "proc_elem", "(", "x", ")", "elif", "isinstance", "(", "x", ",", "(", "list", ",", "tuple", ")", ")", ":", "return", "proc_list", "(", "x", ")", "elif", "isinstance", "(", "x", ",", "np", ".", "ndarray", ")", ":", "return", "proc_ndarray", "(", "x", ")", "elif", "isinstance", "(", "x", ",", "pd", ".", "DataFrame", ")", ":", "return", "pd", ".", "DataFrame", "(", "proc_ndarray", "(", "x", ".", "values", ")", ",", "columns", "=", "x", ".", "columns", ",", "index", "=", "x", ".", "index", ")", "else", ":", "return", "None" ]
Convert a string with a German date 'DD.MM.YYYY' to Datetime objects Parameters ---------- x : str, list, tuple, numpy.ndarray, pandas.DataFrame A string with a German formated date, or an array of these strings, e.g. list, ndarray, df. Returns ------- y : str, list, tuple, numpy.ndarray, pandas.DataFrame A datetime object or array of datetime objects. Example ------- The function aims to convert a string as follows '23.09.2012' => datetime(2012, 9, 23, 0, 0) Code Example ------------ print(clean_german_date('23.09.2012')) Behavior -------- - If it is not a string with date format 'DD.MM.YYYY' then None is returned
[ "Convert", "a", "string", "with", "a", "German", "date", "DD", ".", "MM", ".", "YYYY", "to", "Datetime", "objects" ]
python
train
twisted/mantissa
xmantissa/signup.py
https://github.com/twisted/mantissa/blob/53e5502aba23ce99be78b27f923a276593033fe8/xmantissa/signup.py#L213-L220
def attemptByKey(self, key): """ Locate the L{_PasswordResetAttempt} that corresponds to C{key} """ return self.store.findUnique(_PasswordResetAttempt, _PasswordResetAttempt.key == key, default=None)
[ "def", "attemptByKey", "(", "self", ",", "key", ")", ":", "return", "self", ".", "store", ".", "findUnique", "(", "_PasswordResetAttempt", ",", "_PasswordResetAttempt", ".", "key", "==", "key", ",", "default", "=", "None", ")" ]
Locate the L{_PasswordResetAttempt} that corresponds to C{key}
[ "Locate", "the", "L", "{", "_PasswordResetAttempt", "}", "that", "corresponds", "to", "C", "{", "key", "}" ]
python
train
bcbio/bcbio-nextgen
bcbio/ngsalign/alignprep.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/alignprep.py#L281-L292
def total_reads_from_grabix(in_file): """Retrieve total reads in a fastq file from grabix index. """ gbi_file = _get_grabix_index(in_file) if gbi_file: with open(gbi_file) as in_handle: next(in_handle) # throw away num_lines = int(next(in_handle).strip()) assert num_lines % 4 == 0, "Expected lines to be multiple of 4" return num_lines // 4 else: return 0
[ "def", "total_reads_from_grabix", "(", "in_file", ")", ":", "gbi_file", "=", "_get_grabix_index", "(", "in_file", ")", "if", "gbi_file", ":", "with", "open", "(", "gbi_file", ")", "as", "in_handle", ":", "next", "(", "in_handle", ")", "# throw away", "num_lines", "=", "int", "(", "next", "(", "in_handle", ")", ".", "strip", "(", ")", ")", "assert", "num_lines", "%", "4", "==", "0", ",", "\"Expected lines to be multiple of 4\"", "return", "num_lines", "//", "4", "else", ":", "return", "0" ]
Retrieve total reads in a fastq file from grabix index.
[ "Retrieve", "total", "reads", "in", "a", "fastq", "file", "from", "grabix", "index", "." ]
python
train
peterbrittain/asciimatics
asciimatics/scene.py
https://github.com/peterbrittain/asciimatics/blob/f471427d7786ce2d5f1eeb2dae0e67d19e46e085/asciimatics/scene.py#L39-L60
def reset(self, old_scene=None, screen=None): """ Reset the scene ready for playing. :param old_scene: The previous version of this Scene that was running before the application reset - e.g. due to a screen resize. :param screen: New screen to use if old_scene is not None. """ # Always reset all the effects. for effect in self._effects: effect.reset() # If we have an old Scene to recreate, get the data out of that and # apply it where possible by cloning objects where appropriate. if old_scene: for old_effect in old_scene.effects: # Using the "easier to ask forgiveness..." mantra, just try # cloning everything and ignore any AttributeErrors. try: old_effect.clone(screen, self) except AttributeError: pass
[ "def", "reset", "(", "self", ",", "old_scene", "=", "None", ",", "screen", "=", "None", ")", ":", "# Always reset all the effects.", "for", "effect", "in", "self", ".", "_effects", ":", "effect", ".", "reset", "(", ")", "# If we have an old Scene to recreate, get the data out of that and", "# apply it where possible by cloning objects where appropriate.", "if", "old_scene", ":", "for", "old_effect", "in", "old_scene", ".", "effects", ":", "# Using the \"easier to ask forgiveness...\" mantra, just try", "# cloning everything and ignore any AttributeErrors.", "try", ":", "old_effect", ".", "clone", "(", "screen", ",", "self", ")", "except", "AttributeError", ":", "pass" ]
Reset the scene ready for playing. :param old_scene: The previous version of this Scene that was running before the application reset - e.g. due to a screen resize. :param screen: New screen to use if old_scene is not None.
[ "Reset", "the", "scene", "ready", "for", "playing", "." ]
python
train
shoebot/shoebot
lib/sbaudio/__init__.py
https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/lib/sbaudio/__init__.py#L14-L34
def fft_bandpassfilter(data, fs, lowcut, highcut): """ http://www.swharden.com/blog/2009-01-21-signal-filtering-with-python/#comment-16801 """ fft = np.fft.fft(data) # n = len(data) # timestep = 1.0 / fs # freq = np.fft.fftfreq(n, d=timestep) bp = fft.copy() # Zero out fft coefficients # bp[10:-10] = 0 # Normalise # bp *= real(fft.dot(fft))/real(bp.dot(bp)) bp *= fft.dot(fft) / bp.dot(bp) # must multipy by 2 to get the correct amplitude ibp = 12 * np.fft.ifft(bp) return ibp
[ "def", "fft_bandpassfilter", "(", "data", ",", "fs", ",", "lowcut", ",", "highcut", ")", ":", "fft", "=", "np", ".", "fft", ".", "fft", "(", "data", ")", "# n = len(data)", "# timestep = 1.0 / fs", "# freq = np.fft.fftfreq(n, d=timestep)", "bp", "=", "fft", ".", "copy", "(", ")", "# Zero out fft coefficients", "# bp[10:-10] = 0", "# Normalise", "# bp *= real(fft.dot(fft))/real(bp.dot(bp))", "bp", "*=", "fft", ".", "dot", "(", "fft", ")", "/", "bp", ".", "dot", "(", "bp", ")", "# must multipy by 2 to get the correct amplitude", "ibp", "=", "12", "*", "np", ".", "fft", ".", "ifft", "(", "bp", ")", "return", "ibp" ]
http://www.swharden.com/blog/2009-01-21-signal-filtering-with-python/#comment-16801
[ "http", ":", "//", "www", ".", "swharden", ".", "com", "/", "blog", "/", "2009", "-", "01", "-", "21", "-", "signal", "-", "filtering", "-", "with", "-", "python", "/", "#comment", "-", "16801" ]
python
valid
materialsproject/pymatgen
pymatgen/io/abinit/qadapters.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/qadapters.py#L750-L774
def validate(self): """Validate the parameters of the run. Raises self.Error if invalid parameters.""" errors = [] app = errors.append if not self.hint_cores >= self.mpi_procs * self.omp_threads >= self.min_cores: app("self.hint_cores >= mpi_procs * omp_threads >= self.min_cores not satisfied") if self.omp_threads > self.hw.cores_per_node: app("omp_threads > hw.cores_per_node") if self.mem_per_proc > self.hw.mem_per_node: app("mem_mb >= self.hw.mem_per_node") if not self.max_mem_per_proc >= self.mem_per_proc >= self.min_mem_per_proc: app("self.max_mem_per_proc >= mem_mb >= self.min_mem_per_proc not satisfied") if self.priority <= 0: app("priority must be > 0") if not (1 <= self.min_cores <= self.hw.num_cores >= self.hint_cores): app("1 <= min_cores <= hardware num_cores >= hint_cores not satisfied") if errors: raise self.Error(str(self) + "\n".join(errors))
[ "def", "validate", "(", "self", ")", ":", "errors", "=", "[", "]", "app", "=", "errors", ".", "append", "if", "not", "self", ".", "hint_cores", ">=", "self", ".", "mpi_procs", "*", "self", ".", "omp_threads", ">=", "self", ".", "min_cores", ":", "app", "(", "\"self.hint_cores >= mpi_procs * omp_threads >= self.min_cores not satisfied\"", ")", "if", "self", ".", "omp_threads", ">", "self", ".", "hw", ".", "cores_per_node", ":", "app", "(", "\"omp_threads > hw.cores_per_node\"", ")", "if", "self", ".", "mem_per_proc", ">", "self", ".", "hw", ".", "mem_per_node", ":", "app", "(", "\"mem_mb >= self.hw.mem_per_node\"", ")", "if", "not", "self", ".", "max_mem_per_proc", ">=", "self", ".", "mem_per_proc", ">=", "self", ".", "min_mem_per_proc", ":", "app", "(", "\"self.max_mem_per_proc >= mem_mb >= self.min_mem_per_proc not satisfied\"", ")", "if", "self", ".", "priority", "<=", "0", ":", "app", "(", "\"priority must be > 0\"", ")", "if", "not", "(", "1", "<=", "self", ".", "min_cores", "<=", "self", ".", "hw", ".", "num_cores", ">=", "self", ".", "hint_cores", ")", ":", "app", "(", "\"1 <= min_cores <= hardware num_cores >= hint_cores not satisfied\"", ")", "if", "errors", ":", "raise", "self", ".", "Error", "(", "str", "(", "self", ")", "+", "\"\\n\"", ".", "join", "(", "errors", ")", ")" ]
Validate the parameters of the run. Raises self.Error if invalid parameters.
[ "Validate", "the", "parameters", "of", "the", "run", ".", "Raises", "self", ".", "Error", "if", "invalid", "parameters", "." ]
python
train
jldantas/libmft
libmft/attribute.py
https://github.com/jldantas/libmft/blob/65a988605fe7663b788bd81dcb52c0a4eaad1549/libmft/attribute.py#L1025-L1031
def _from_binary_volname(cls, binary_stream): """See base class.""" name = binary_stream.tobytes().decode("utf_16_le") _MOD_LOGGER.debug("Attempted to unpack VOLUME_NAME Entry from \"%s\"\nResult: %s", binary_stream.tobytes(), name) return cls(name)
[ "def", "_from_binary_volname", "(", "cls", ",", "binary_stream", ")", ":", "name", "=", "binary_stream", ".", "tobytes", "(", ")", ".", "decode", "(", "\"utf_16_le\"", ")", "_MOD_LOGGER", ".", "debug", "(", "\"Attempted to unpack VOLUME_NAME Entry from \\\"%s\\\"\\nResult: %s\"", ",", "binary_stream", ".", "tobytes", "(", ")", ",", "name", ")", "return", "cls", "(", "name", ")" ]
See base class.
[ "See", "base", "class", "." ]
python
train
theislab/scanpy
scanpy/tools/_sim.py
https://github.com/theislab/scanpy/blob/9e4e5ee02e04cf618872d9b098e24f0542e8b227/scanpy/tools/_sim.py#L798-L803
def process_rule(self,rule,pa,tuple): ''' Process a string that denotes a boolean rule. ''' for i,v in enumerate(tuple): rule = rule.replace(pa[i],str(v)) return eval(rule)
[ "def", "process_rule", "(", "self", ",", "rule", ",", "pa", ",", "tuple", ")", ":", "for", "i", ",", "v", "in", "enumerate", "(", "tuple", ")", ":", "rule", "=", "rule", ".", "replace", "(", "pa", "[", "i", "]", ",", "str", "(", "v", ")", ")", "return", "eval", "(", "rule", ")" ]
Process a string that denotes a boolean rule.
[ "Process", "a", "string", "that", "denotes", "a", "boolean", "rule", "." ]
python
train
gouthambs/Flask-Blogging
flask_blogging/sqlastorage.py
https://github.com/gouthambs/Flask-Blogging/blob/6636b8941175e9910f116a329521f96b8b05a9ac/flask_blogging/sqlastorage.py#L233-L266
def get_post_by_id(self, post_id): """ Fetch the blog post given by ``post_id`` :param post_id: The post identifier for the blog post :type post_id: str :return: If the ``post_id`` is valid, the post data is retrieved, else returns ``None``. """ r = None post_id = _as_int(post_id) with self._engine.begin() as conn: try: post_statement = sqla.select([self._post_table]) \ .where(self._post_table.c.id == post_id) \ .alias('post') joined_statement = post_statement.join(self._tag_posts_table) \ .join(self._tag_table) \ .join(self._user_posts_table) \ .alias('join') # Note this will retrieve one row per tag all_rows = conn.execute( sqla.select([joined_statement]) ).fetchall() r = self._serialise_posts_and_tags_from_joined_rows( all_rows )[0] except Exception as e: self._logger.exception(str(e)) r = None return r
[ "def", "get_post_by_id", "(", "self", ",", "post_id", ")", ":", "r", "=", "None", "post_id", "=", "_as_int", "(", "post_id", ")", "with", "self", ".", "_engine", ".", "begin", "(", ")", "as", "conn", ":", "try", ":", "post_statement", "=", "sqla", ".", "select", "(", "[", "self", ".", "_post_table", "]", ")", ".", "where", "(", "self", ".", "_post_table", ".", "c", ".", "id", "==", "post_id", ")", ".", "alias", "(", "'post'", ")", "joined_statement", "=", "post_statement", ".", "join", "(", "self", ".", "_tag_posts_table", ")", ".", "join", "(", "self", ".", "_tag_table", ")", ".", "join", "(", "self", ".", "_user_posts_table", ")", ".", "alias", "(", "'join'", ")", "# Note this will retrieve one row per tag", "all_rows", "=", "conn", ".", "execute", "(", "sqla", ".", "select", "(", "[", "joined_statement", "]", ")", ")", ".", "fetchall", "(", ")", "r", "=", "self", ".", "_serialise_posts_and_tags_from_joined_rows", "(", "all_rows", ")", "[", "0", "]", "except", "Exception", "as", "e", ":", "self", ".", "_logger", ".", "exception", "(", "str", "(", "e", ")", ")", "r", "=", "None", "return", "r" ]
Fetch the blog post given by ``post_id`` :param post_id: The post identifier for the blog post :type post_id: str :return: If the ``post_id`` is valid, the post data is retrieved, else returns ``None``.
[ "Fetch", "the", "blog", "post", "given", "by", "post_id" ]
python
train
python-openxml/python-docx
docx/image/tiff.py
https://github.com/python-openxml/python-docx/blob/6756f6cd145511d3eb6d1d188beea391b1ddfd53/docx/image/tiff.py#L318-L326
def _parse_value(cls, stream_rdr, offset, value_count, value_offset): """ Return the long int value contained in the *value_offset* field of this entry. Only supports single values at present. """ if value_count == 1: return stream_rdr.read_long(offset, 8) else: # pragma: no cover return 'Multi-value long integer NOT IMPLEMENTED'
[ "def", "_parse_value", "(", "cls", ",", "stream_rdr", ",", "offset", ",", "value_count", ",", "value_offset", ")", ":", "if", "value_count", "==", "1", ":", "return", "stream_rdr", ".", "read_long", "(", "offset", ",", "8", ")", "else", ":", "# pragma: no cover", "return", "'Multi-value long integer NOT IMPLEMENTED'" ]
Return the long int value contained in the *value_offset* field of this entry. Only supports single values at present.
[ "Return", "the", "long", "int", "value", "contained", "in", "the", "*", "value_offset", "*", "field", "of", "this", "entry", ".", "Only", "supports", "single", "values", "at", "present", "." ]
python
train
apache/incubator-superset
superset/connectors/base/models.py
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/connectors/base/models.py#L148-L159
def short_data(self): """Data representation of the datasource sent to the frontend""" return { 'edit_url': self.url, 'id': self.id, 'uid': self.uid, 'schema': self.schema, 'name': self.name, 'type': self.type, 'connection': self.connection, 'creator': str(self.created_by), }
[ "def", "short_data", "(", "self", ")", ":", "return", "{", "'edit_url'", ":", "self", ".", "url", ",", "'id'", ":", "self", ".", "id", ",", "'uid'", ":", "self", ".", "uid", ",", "'schema'", ":", "self", ".", "schema", ",", "'name'", ":", "self", ".", "name", ",", "'type'", ":", "self", ".", "type", ",", "'connection'", ":", "self", ".", "connection", ",", "'creator'", ":", "str", "(", "self", ".", "created_by", ")", ",", "}" ]
Data representation of the datasource sent to the frontend
[ "Data", "representation", "of", "the", "datasource", "sent", "to", "the", "frontend" ]
python
train
KnowledgeLinks/rdfframework
rdfframework/rml/processor.py
https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/rml/processor.py#L1010-L1128
def execute(self, triple_map, output, **kwargs): """Execute """ subjects = [] if NS_MGR.ql.JSON.rdflib in \ triple_map.logicalSource.reference_formulations: output_format = "json" else: output_format = "xml" if 'limit' not in kwargs: kwargs['limit'] = self.limit if 'offset' not in kwargs: kwargs['offset'] = self.offset # log.debug("triple_map.logicalSource: \n%s", # pprint.pformat(triple_map.logicalSource.__dict__)) iterator = str(triple_map.logicalSource.iterator) start = datetime.datetime.now() key, json_query = None, None # pdb.set_trace() if hasattr(triple_map.logicalSource, 'json_query') \ and self.use_json_qry: key = kwargs.get(str(triple_map.logicalSource.json_key)) if not key: key =[val for val in kwargs.values() \ if isinstance(val, rdflib.URIRef)][0] json_query = triple_map.logicalSource.json_query bindings = kwargs['dataset'].json_qry(json_query, {'$': key}) else: sparql = PREFIX + triple_map.logicalSource.query.format(**kwargs) bindings = self.__get_bindings__(sparql, output_format) for binding in bindings: if key: try: entity_raw = binding.subject.rdflib except AttributeError: entity_raw = binding else: entity_raw = binding.get(iterator) if isinstance(entity_raw, (rdflib.URIRef, rdflib.BNode, BaseRdfDataType)): entity = entity_raw else: raw_value = entity_raw.get('value') if entity_raw.get('type').startswith('bnode'): entity = BlankNode(raw_value) else: entity = Uri(raw_value) if triple_map.subjectMap.class_ is not None: sub = entity if isinstance(entity, BaseRdfDataType): sub = entity.rdflib output.add((sub, NS_MGR.rdf.type.rdflib, triple_map.subjectMap.class_)) # pdb.set_trace() for pred_obj_map in triple_map.predicateObjectMap: predicate = pred_obj_map.predicate kwargs[iterator] = entity if pred_obj_map.parentTriplesMap is not None: self.__handle_parents__( output=output, parent_map=pred_obj_map.parentTriplesMap, subject=entity, predicate=predicate, **kwargs) continue if pred_obj_map.reference is not None: ref_key = str(pred_obj_map.reference) if pred_obj_map.json_query: # if pred_obj_map.json_query =="$.schema_logo": # pdb.set_trace() if ref_key in binding: for item in binding[ref_key]: output.add((entity, predicate, item.rdflib)) continue else: if ref_key in binding: object_ = __get_object__( binding[ref_key]) output.add((entity, predicate, object_)) continue if pred_obj_map.constant is not None: if isinstance(entity, BaseRdfDataType): entity = entity.rdflib output.add( (entity, predicate, pred_obj_map.constant)) continue json_query = None if pred_obj_map.json_query and self.use_json_qry: json_query = pred_obj_map.json_query start = datetime.datetime.now() # pdb.set_trace() # if str(pred_obj_map.predicate) == "http://purl.org/dc/terms/creator": # pdb.set_trace() pre_obj_bindings = kwargs['dataset'].json_qry(json_query, {'$': entity}) else: sparql_query = PREFIX + pred_obj_map.query.format(**kwargs) pre_obj_bindings = self.__get_bindings__(sparql_query, output_format) for row in pre_obj_bindings: if json_query and self.use_json_qry: if isinstance(entity, BaseRdfDataType): entity = entity.rdflib output.add((entity, predicate, row.rdflib)) else: object_ = __get_object__(row) if object_ is None: continue if isinstance(entity, BaseRdfDataType): entity = entity.rdflib output.add((entity, predicate, object_)) subjects.append(entity) return subjects
[ "def", "execute", "(", "self", ",", "triple_map", ",", "output", ",", "*", "*", "kwargs", ")", ":", "subjects", "=", "[", "]", "if", "NS_MGR", ".", "ql", ".", "JSON", ".", "rdflib", "in", "triple_map", ".", "logicalSource", ".", "reference_formulations", ":", "output_format", "=", "\"json\"", "else", ":", "output_format", "=", "\"xml\"", "if", "'limit'", "not", "in", "kwargs", ":", "kwargs", "[", "'limit'", "]", "=", "self", ".", "limit", "if", "'offset'", "not", "in", "kwargs", ":", "kwargs", "[", "'offset'", "]", "=", "self", ".", "offset", "# log.debug(\"triple_map.logicalSource: \\n%s\",", "# pprint.pformat(triple_map.logicalSource.__dict__))", "iterator", "=", "str", "(", "triple_map", ".", "logicalSource", ".", "iterator", ")", "start", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "key", ",", "json_query", "=", "None", ",", "None", "# pdb.set_trace()", "if", "hasattr", "(", "triple_map", ".", "logicalSource", ",", "'json_query'", ")", "and", "self", ".", "use_json_qry", ":", "key", "=", "kwargs", ".", "get", "(", "str", "(", "triple_map", ".", "logicalSource", ".", "json_key", ")", ")", "if", "not", "key", ":", "key", "=", "[", "val", "for", "val", "in", "kwargs", ".", "values", "(", ")", "if", "isinstance", "(", "val", ",", "rdflib", ".", "URIRef", ")", "]", "[", "0", "]", "json_query", "=", "triple_map", ".", "logicalSource", ".", "json_query", "bindings", "=", "kwargs", "[", "'dataset'", "]", ".", "json_qry", "(", "json_query", ",", "{", "'$'", ":", "key", "}", ")", "else", ":", "sparql", "=", "PREFIX", "+", "triple_map", ".", "logicalSource", ".", "query", ".", "format", "(", "*", "*", "kwargs", ")", "bindings", "=", "self", ".", "__get_bindings__", "(", "sparql", ",", "output_format", ")", "for", "binding", "in", "bindings", ":", "if", "key", ":", "try", ":", "entity_raw", "=", "binding", ".", "subject", ".", "rdflib", "except", "AttributeError", ":", "entity_raw", "=", "binding", "else", ":", "entity_raw", "=", "binding", ".", "get", "(", "iterator", ")", "if", "isinstance", "(", "entity_raw", ",", "(", "rdflib", ".", "URIRef", ",", "rdflib", ".", "BNode", ",", "BaseRdfDataType", ")", ")", ":", "entity", "=", "entity_raw", "else", ":", "raw_value", "=", "entity_raw", ".", "get", "(", "'value'", ")", "if", "entity_raw", ".", "get", "(", "'type'", ")", ".", "startswith", "(", "'bnode'", ")", ":", "entity", "=", "BlankNode", "(", "raw_value", ")", "else", ":", "entity", "=", "Uri", "(", "raw_value", ")", "if", "triple_map", ".", "subjectMap", ".", "class_", "is", "not", "None", ":", "sub", "=", "entity", "if", "isinstance", "(", "entity", ",", "BaseRdfDataType", ")", ":", "sub", "=", "entity", ".", "rdflib", "output", ".", "add", "(", "(", "sub", ",", "NS_MGR", ".", "rdf", ".", "type", ".", "rdflib", ",", "triple_map", ".", "subjectMap", ".", "class_", ")", ")", "# pdb.set_trace()", "for", "pred_obj_map", "in", "triple_map", ".", "predicateObjectMap", ":", "predicate", "=", "pred_obj_map", ".", "predicate", "kwargs", "[", "iterator", "]", "=", "entity", "if", "pred_obj_map", ".", "parentTriplesMap", "is", "not", "None", ":", "self", ".", "__handle_parents__", "(", "output", "=", "output", ",", "parent_map", "=", "pred_obj_map", ".", "parentTriplesMap", ",", "subject", "=", "entity", ",", "predicate", "=", "predicate", ",", "*", "*", "kwargs", ")", "continue", "if", "pred_obj_map", ".", "reference", "is", "not", "None", ":", "ref_key", "=", "str", "(", "pred_obj_map", ".", "reference", ")", "if", "pred_obj_map", ".", "json_query", ":", "# if pred_obj_map.json_query ==\"$.schema_logo\":", "# pdb.set_trace()", "if", "ref_key", "in", "binding", ":", "for", "item", "in", "binding", "[", "ref_key", "]", ":", "output", ".", "add", "(", "(", "entity", ",", "predicate", ",", "item", ".", "rdflib", ")", ")", "continue", "else", ":", "if", "ref_key", "in", "binding", ":", "object_", "=", "__get_object__", "(", "binding", "[", "ref_key", "]", ")", "output", ".", "add", "(", "(", "entity", ",", "predicate", ",", "object_", ")", ")", "continue", "if", "pred_obj_map", ".", "constant", "is", "not", "None", ":", "if", "isinstance", "(", "entity", ",", "BaseRdfDataType", ")", ":", "entity", "=", "entity", ".", "rdflib", "output", ".", "add", "(", "(", "entity", ",", "predicate", ",", "pred_obj_map", ".", "constant", ")", ")", "continue", "json_query", "=", "None", "if", "pred_obj_map", ".", "json_query", "and", "self", ".", "use_json_qry", ":", "json_query", "=", "pred_obj_map", ".", "json_query", "start", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "# pdb.set_trace()", "# if str(pred_obj_map.predicate) == \"http://purl.org/dc/terms/creator\":", "# pdb.set_trace()", "pre_obj_bindings", "=", "kwargs", "[", "'dataset'", "]", ".", "json_qry", "(", "json_query", ",", "{", "'$'", ":", "entity", "}", ")", "else", ":", "sparql_query", "=", "PREFIX", "+", "pred_obj_map", ".", "query", ".", "format", "(", "*", "*", "kwargs", ")", "pre_obj_bindings", "=", "self", ".", "__get_bindings__", "(", "sparql_query", ",", "output_format", ")", "for", "row", "in", "pre_obj_bindings", ":", "if", "json_query", "and", "self", ".", "use_json_qry", ":", "if", "isinstance", "(", "entity", ",", "BaseRdfDataType", ")", ":", "entity", "=", "entity", ".", "rdflib", "output", ".", "add", "(", "(", "entity", ",", "predicate", ",", "row", ".", "rdflib", ")", ")", "else", ":", "object_", "=", "__get_object__", "(", "row", ")", "if", "object_", "is", "None", ":", "continue", "if", "isinstance", "(", "entity", ",", "BaseRdfDataType", ")", ":", "entity", "=", "entity", ".", "rdflib", "output", ".", "add", "(", "(", "entity", ",", "predicate", ",", "object_", ")", ")", "subjects", ".", "append", "(", "entity", ")", "return", "subjects" ]
Execute
[ "Execute" ]
python
train
google/grr
grr/server/grr_response_server/aff4_objects/filestore.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/aff4_objects/filestore.py#L636-L660
def Run(self): """Create FileStore and HashFileStore namespaces.""" if not data_store.AFF4Enabled(): return try: filestore = aff4.FACTORY.Create( FileStore.PATH, FileStore, mode="rw", token=aff4.FACTORY.root_token) filestore.Close() hash_filestore = aff4.FACTORY.Create( HashFileStore.PATH, HashFileStore, mode="rw", token=aff4.FACTORY.root_token) hash_filestore.Close() nsrl_filestore = aff4.FACTORY.Create( NSRLFileStore.PATH, NSRLFileStore, mode="rw", token=aff4.FACTORY.root_token) nsrl_filestore.Close() except access_control.UnauthorizedAccess: # The aff4:/files area is ACL protected, this might not work on components # that have ACL enforcement. pass
[ "def", "Run", "(", "self", ")", ":", "if", "not", "data_store", ".", "AFF4Enabled", "(", ")", ":", "return", "try", ":", "filestore", "=", "aff4", ".", "FACTORY", ".", "Create", "(", "FileStore", ".", "PATH", ",", "FileStore", ",", "mode", "=", "\"rw\"", ",", "token", "=", "aff4", ".", "FACTORY", ".", "root_token", ")", "filestore", ".", "Close", "(", ")", "hash_filestore", "=", "aff4", ".", "FACTORY", ".", "Create", "(", "HashFileStore", ".", "PATH", ",", "HashFileStore", ",", "mode", "=", "\"rw\"", ",", "token", "=", "aff4", ".", "FACTORY", ".", "root_token", ")", "hash_filestore", ".", "Close", "(", ")", "nsrl_filestore", "=", "aff4", ".", "FACTORY", ".", "Create", "(", "NSRLFileStore", ".", "PATH", ",", "NSRLFileStore", ",", "mode", "=", "\"rw\"", ",", "token", "=", "aff4", ".", "FACTORY", ".", "root_token", ")", "nsrl_filestore", ".", "Close", "(", ")", "except", "access_control", ".", "UnauthorizedAccess", ":", "# The aff4:/files area is ACL protected, this might not work on components", "# that have ACL enforcement.", "pass" ]
Create FileStore and HashFileStore namespaces.
[ "Create", "FileStore", "and", "HashFileStore", "namespaces", "." ]
python
train
rbw/pysnow
pysnow/query_builder.py
https://github.com/rbw/pysnow/blob/87c8ce0d3a089c2f59247f30efbd545fcdb8e985/pysnow/query_builder.py#L87-L100
def equals(self, data): """Adds new `IN` or `=` condition depending on if a list or string was provided :param data: string or list of values :raise: - QueryTypeError: if `data` is of an unexpected type """ if isinstance(data, six.string_types): return self._add_condition('=', data, types=[int, str]) elif isinstance(data, list): return self._add_condition('IN', ",".join(map(str, data)), types=[str]) raise QueryTypeError('Expected value of type `str` or `list`, not %s' % type(data))
[ "def", "equals", "(", "self", ",", "data", ")", ":", "if", "isinstance", "(", "data", ",", "six", ".", "string_types", ")", ":", "return", "self", ".", "_add_condition", "(", "'='", ",", "data", ",", "types", "=", "[", "int", ",", "str", "]", ")", "elif", "isinstance", "(", "data", ",", "list", ")", ":", "return", "self", ".", "_add_condition", "(", "'IN'", ",", "\",\"", ".", "join", "(", "map", "(", "str", ",", "data", ")", ")", ",", "types", "=", "[", "str", "]", ")", "raise", "QueryTypeError", "(", "'Expected value of type `str` or `list`, not %s'", "%", "type", "(", "data", ")", ")" ]
Adds new `IN` or `=` condition depending on if a list or string was provided :param data: string or list of values :raise: - QueryTypeError: if `data` is of an unexpected type
[ "Adds", "new", "IN", "or", "=", "condition", "depending", "on", "if", "a", "list", "or", "string", "was", "provided" ]
python
train
althonos/InstaLooter
instalooter/batch.py
https://github.com/althonos/InstaLooter/blob/e894d8da368dd57423dd0fda4ac479ea2ea0c3c1/instalooter/batch.py#L120-L128
def run_all(self): # type: () -> None """Run all the jobs specified in the configuration file. """ logger.debug("Creating batch session") session = Session() for section_id in self.parser.sections(): self.run_job(section_id, session=session)
[ "def", "run_all", "(", "self", ")", ":", "# type: () -> None", "logger", ".", "debug", "(", "\"Creating batch session\"", ")", "session", "=", "Session", "(", ")", "for", "section_id", "in", "self", ".", "parser", ".", "sections", "(", ")", ":", "self", ".", "run_job", "(", "section_id", ",", "session", "=", "session", ")" ]
Run all the jobs specified in the configuration file.
[ "Run", "all", "the", "jobs", "specified", "in", "the", "configuration", "file", "." ]
python
train
Jajcus/pyxmpp2
pyxmpp2/stanzaprocessor.py
https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/stanzaprocessor.py#L277-L300
def process_message(self, stanza): """Process message stanza. Pass it to a handler of the stanza's type and payload namespace. If no handler for the actual stanza type succeeds then hadlers for type "normal" are used. :Parameters: - `stanza`: message stanza to be handled """ stanza_type = stanza.stanza_type if stanza_type is None: stanza_type = "normal" if self.__try_handlers(self._message_handlers, stanza, stanza_type = stanza_type): return True if stanza_type not in ("error", "normal"): # try 'normal' handler additionaly to the regular handler return self.__try_handlers(self._message_handlers, stanza, stanza_type = "normal") return False
[ "def", "process_message", "(", "self", ",", "stanza", ")", ":", "stanza_type", "=", "stanza", ".", "stanza_type", "if", "stanza_type", "is", "None", ":", "stanza_type", "=", "\"normal\"", "if", "self", ".", "__try_handlers", "(", "self", ".", "_message_handlers", ",", "stanza", ",", "stanza_type", "=", "stanza_type", ")", ":", "return", "True", "if", "stanza_type", "not", "in", "(", "\"error\"", ",", "\"normal\"", ")", ":", "# try 'normal' handler additionaly to the regular handler", "return", "self", ".", "__try_handlers", "(", "self", ".", "_message_handlers", ",", "stanza", ",", "stanza_type", "=", "\"normal\"", ")", "return", "False" ]
Process message stanza. Pass it to a handler of the stanza's type and payload namespace. If no handler for the actual stanza type succeeds then hadlers for type "normal" are used. :Parameters: - `stanza`: message stanza to be handled
[ "Process", "message", "stanza", "." ]
python
valid
smarie/python-parsyfiles
parsyfiles/parsing_registries.py
https://github.com/smarie/python-parsyfiles/blob/344b37e1151e8d4e7c2ee49ae09d6568715ae64e/parsyfiles/parsing_registries.py#L922-L930
def get_all_conversion_chains_to_type(self, to_type: Type[Any])\ -> Tuple[List[Converter], List[Converter], List[Converter]]: """ Utility method to find all converters to a given type :param to_type: :return: """ return self.get_all_conversion_chains(to_type=to_type)
[ "def", "get_all_conversion_chains_to_type", "(", "self", ",", "to_type", ":", "Type", "[", "Any", "]", ")", "->", "Tuple", "[", "List", "[", "Converter", "]", ",", "List", "[", "Converter", "]", ",", "List", "[", "Converter", "]", "]", ":", "return", "self", ".", "get_all_conversion_chains", "(", "to_type", "=", "to_type", ")" ]
Utility method to find all converters to a given type :param to_type: :return:
[ "Utility", "method", "to", "find", "all", "converters", "to", "a", "given", "type" ]
python
train
spacetelescope/drizzlepac
drizzlepac/alignimages.py
https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/alignimages.py#L868-L897
def generate_astrometric_catalog(imglist, **pars): """Generates a catalog of all sources from an existing astrometric catalog are in or near the FOVs of the images in the input list. Parameters ---------- imglist : list List of one or more calibrated fits images that will be used for catalog generation. Returns ======= ref_table : object Astropy Table object of the catalog """ # generate catalog temp_pars = pars.copy() if pars['output'] == True: pars['output'] = 'ref_cat.ecsv' else: pars['output'] = None out_catalog = amutils.create_astrometric_catalog(imglist,**pars) pars = temp_pars.copy() #if the catalog has contents, write the catalog to ascii text file if len(out_catalog) > 0 and pars['output']: catalog_filename = "refcatalog.cat" out_catalog.write(catalog_filename, format="ascii.fast_commented_header") log.info("Wrote reference catalog {}.".format(catalog_filename)) return(out_catalog)
[ "def", "generate_astrometric_catalog", "(", "imglist", ",", "*", "*", "pars", ")", ":", "# generate catalog", "temp_pars", "=", "pars", ".", "copy", "(", ")", "if", "pars", "[", "'output'", "]", "==", "True", ":", "pars", "[", "'output'", "]", "=", "'ref_cat.ecsv'", "else", ":", "pars", "[", "'output'", "]", "=", "None", "out_catalog", "=", "amutils", ".", "create_astrometric_catalog", "(", "imglist", ",", "*", "*", "pars", ")", "pars", "=", "temp_pars", ".", "copy", "(", ")", "#if the catalog has contents, write the catalog to ascii text file", "if", "len", "(", "out_catalog", ")", ">", "0", "and", "pars", "[", "'output'", "]", ":", "catalog_filename", "=", "\"refcatalog.cat\"", "out_catalog", ".", "write", "(", "catalog_filename", ",", "format", "=", "\"ascii.fast_commented_header\"", ")", "log", ".", "info", "(", "\"Wrote reference catalog {}.\"", ".", "format", "(", "catalog_filename", ")", ")", "return", "(", "out_catalog", ")" ]
Generates a catalog of all sources from an existing astrometric catalog are in or near the FOVs of the images in the input list. Parameters ---------- imglist : list List of one or more calibrated fits images that will be used for catalog generation. Returns ======= ref_table : object Astropy Table object of the catalog
[ "Generates", "a", "catalog", "of", "all", "sources", "from", "an", "existing", "astrometric", "catalog", "are", "in", "or", "near", "the", "FOVs", "of", "the", "images", "in", "the", "input", "list", "." ]
python
train
flo-compbio/genometools
genometools/gdc/tcga.py
https://github.com/flo-compbio/genometools/blob/dd962bb26d60a0f14ca14d8c9a4dd75768962c7d/genometools/gdc/tcga.py#L174-L221
def get_masked_cnv_manifest(tcga_id): """Get manifest for masked TCGA copy-number variation data. Params ------ tcga_id : str The TCGA project ID. download_file : str The path of the download file. Returns ------- `pandas.DataFrame` The manifest. """ payload = { "filters": json.dumps({ "op": "and", "content" : [ { "op":"in", "content":{ "field":"cases.project.program.name", "value":["TCGA"]}}, { "op":"in", "content":{ "field":"cases.project.project_id", "value":[tcga_id]}}, { "op":"in", "content":{ "field":"files.data_category", "value":["Copy Number Variation"]}}, { "op":"in", "content":{ "field":"files.data_type", "value":["Masked Copy Number Segment"]}}] }), "return_type":"manifest", "size":10000, } r = requests.get('https://gdc-api.nci.nih.gov/files', params=payload) df = pd.read_csv(io.StringIO(r.text), sep='\t', header=0) logger.info('Obtained manifest with %d files.', df.shape[0]) return df
[ "def", "get_masked_cnv_manifest", "(", "tcga_id", ")", ":", "payload", "=", "{", "\"filters\"", ":", "json", ".", "dumps", "(", "{", "\"op\"", ":", "\"and\"", ",", "\"content\"", ":", "[", "{", "\"op\"", ":", "\"in\"", ",", "\"content\"", ":", "{", "\"field\"", ":", "\"cases.project.program.name\"", ",", "\"value\"", ":", "[", "\"TCGA\"", "]", "}", "}", ",", "{", "\"op\"", ":", "\"in\"", ",", "\"content\"", ":", "{", "\"field\"", ":", "\"cases.project.project_id\"", ",", "\"value\"", ":", "[", "tcga_id", "]", "}", "}", ",", "{", "\"op\"", ":", "\"in\"", ",", "\"content\"", ":", "{", "\"field\"", ":", "\"files.data_category\"", ",", "\"value\"", ":", "[", "\"Copy Number Variation\"", "]", "}", "}", ",", "{", "\"op\"", ":", "\"in\"", ",", "\"content\"", ":", "{", "\"field\"", ":", "\"files.data_type\"", ",", "\"value\"", ":", "[", "\"Masked Copy Number Segment\"", "]", "}", "}", "]", "}", ")", ",", "\"return_type\"", ":", "\"manifest\"", ",", "\"size\"", ":", "10000", ",", "}", "r", "=", "requests", ".", "get", "(", "'https://gdc-api.nci.nih.gov/files'", ",", "params", "=", "payload", ")", "df", "=", "pd", ".", "read_csv", "(", "io", ".", "StringIO", "(", "r", ".", "text", ")", ",", "sep", "=", "'\\t'", ",", "header", "=", "0", ")", "logger", ".", "info", "(", "'Obtained manifest with %d files.'", ",", "df", ".", "shape", "[", "0", "]", ")", "return", "df" ]
Get manifest for masked TCGA copy-number variation data. Params ------ tcga_id : str The TCGA project ID. download_file : str The path of the download file. Returns ------- `pandas.DataFrame` The manifest.
[ "Get", "manifest", "for", "masked", "TCGA", "copy", "-", "number", "variation", "data", ".", "Params", "------", "tcga_id", ":", "str", "The", "TCGA", "project", "ID", ".", "download_file", ":", "str", "The", "path", "of", "the", "download", "file", ".", "Returns", "-------", "pandas", ".", "DataFrame", "The", "manifest", "." ]
python
train
mmp2/megaman
megaman/geometry/rmetric.py
https://github.com/mmp2/megaman/blob/faccaf267aad0a8b18ec8a705735fd9dd838ca1e/megaman/geometry/rmetric.py#L270-L281
def get_rmetric( self, mode_inv = 'svd', return_svd = False ): """ Compute the Reimannian Metric """ if self.H is None: self.H, self.G, self.Hvv, self.Hsval = riemann_metric(self.Y, self.L, self.mdimG, invert_h = True, mode_inv = mode_inv) if self.G is None: self.G, self.Hvv, self.Hsvals, self.Gsvals = compute_G_from_H( self.H, mode_inv = self.mode_inv ) if mode_inv is 'svd' and return_svd: return self.G, self.Hvv, self.Hsvals, self.Gsvals else: return self.G
[ "def", "get_rmetric", "(", "self", ",", "mode_inv", "=", "'svd'", ",", "return_svd", "=", "False", ")", ":", "if", "self", ".", "H", "is", "None", ":", "self", ".", "H", ",", "self", ".", "G", ",", "self", ".", "Hvv", ",", "self", ".", "Hsval", "=", "riemann_metric", "(", "self", ".", "Y", ",", "self", ".", "L", ",", "self", ".", "mdimG", ",", "invert_h", "=", "True", ",", "mode_inv", "=", "mode_inv", ")", "if", "self", ".", "G", "is", "None", ":", "self", ".", "G", ",", "self", ".", "Hvv", ",", "self", ".", "Hsvals", ",", "self", ".", "Gsvals", "=", "compute_G_from_H", "(", "self", ".", "H", ",", "mode_inv", "=", "self", ".", "mode_inv", ")", "if", "mode_inv", "is", "'svd'", "and", "return_svd", ":", "return", "self", ".", "G", ",", "self", ".", "Hvv", ",", "self", ".", "Hsvals", ",", "self", ".", "Gsvals", "else", ":", "return", "self", ".", "G" ]
Compute the Reimannian Metric
[ "Compute", "the", "Reimannian", "Metric" ]
python
train
mdickinson/bigfloat
bigfloat/core.py
https://github.com/mdickinson/bigfloat/blob/e5fdd1048615191ed32a2b7460e14b3b3ff24662/bigfloat/core.py#L1359-L1378
def cmpabs(op1, op2): """ Compare the absolute values of op1 and op2. Return a positive value if op1 > op2, zero if op1 = op2, and a negative value if op1 < op2. Both op1 and op2 are considered to their full own precision, which may differ. If one of the operands is NaN, raise ValueError. Note: This function may be useful to distinguish the three possible cases. If you need to distinguish two cases only, it is recommended to use the predicate functions like 'greaterequal'; they behave like the IEEE 754 comparisons, in particular when one or both arguments are NaN. """ op1 = BigFloat._implicit_convert(op1) op2 = BigFloat._implicit_convert(op2) if is_nan(op1) or is_nan(op2): raise ValueError("Cannot perform comparison with NaN.") return mpfr.mpfr_cmpabs(op1, op2)
[ "def", "cmpabs", "(", "op1", ",", "op2", ")", ":", "op1", "=", "BigFloat", ".", "_implicit_convert", "(", "op1", ")", "op2", "=", "BigFloat", ".", "_implicit_convert", "(", "op2", ")", "if", "is_nan", "(", "op1", ")", "or", "is_nan", "(", "op2", ")", ":", "raise", "ValueError", "(", "\"Cannot perform comparison with NaN.\"", ")", "return", "mpfr", ".", "mpfr_cmpabs", "(", "op1", ",", "op2", ")" ]
Compare the absolute values of op1 and op2. Return a positive value if op1 > op2, zero if op1 = op2, and a negative value if op1 < op2. Both op1 and op2 are considered to their full own precision, which may differ. If one of the operands is NaN, raise ValueError. Note: This function may be useful to distinguish the three possible cases. If you need to distinguish two cases only, it is recommended to use the predicate functions like 'greaterequal'; they behave like the IEEE 754 comparisons, in particular when one or both arguments are NaN.
[ "Compare", "the", "absolute", "values", "of", "op1", "and", "op2", "." ]
python
train