repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
sdispater/orator
orator/orm/collection.py
https://github.com/sdispater/orator/blob/bd90bf198ee897751848f9a92e49d18e60a74136/orator/orm/collection.py#L7-L16
def load(self, *relations): """ Load a set of relationships onto the collection. """ if len(self.items) > 0: query = self.first().new_query().with_(*relations) self._set_items(query.eager_load_relations(self.items)) return self
[ "def", "load", "(", "self", ",", "*", "relations", ")", ":", "if", "len", "(", "self", ".", "items", ")", ">", "0", ":", "query", "=", "self", ".", "first", "(", ")", ".", "new_query", "(", ")", ".", "with_", "(", "*", "relations", ")", "self", ".", "_set_items", "(", "query", ".", "eager_load_relations", "(", "self", ".", "items", ")", ")", "return", "self" ]
Load a set of relationships onto the collection.
[ "Load", "a", "set", "of", "relationships", "onto", "the", "collection", "." ]
python
train
28.4
arne-cl/discoursegraphs
src/discoursegraphs/discoursegraph.py
https://github.com/arne-cl/discoursegraphs/blob/842f0068a3190be2c75905754521b176b25a54fb/src/discoursegraphs/discoursegraph.py#L686-L723
def merge_graphs(self, other_docgraph, verbose=False): """ Merges another document graph into the current one, thereby adding all the necessary nodes and edges (with attributes, layers etc.). NOTE: This will only work if both graphs have exactly the same tokenization. """ # keep track of all merged/old root nodes in case we need to # delete them or their attributes (e.g. 'metadata') if hasattr(self, 'merged_rootnodes'): self.merged_rootnodes.append(other_docgraph.root) else: self.merged_rootnodes = [other_docgraph.root] # renaming the tokens of the other graph to match this one rename_tokens(other_docgraph, self, verbose=verbose) self.add_nodes_from(other_docgraph.nodes(data=True)) # copy token node attributes to the current namespace for node_id, node_attrs in other_docgraph.nodes(data=True): if istoken(other_docgraph, node_id) and \ self.ns+':token' not in self.node[node_id]: self.node[node_id].update({self.ns+':token': other_docgraph.get_token(node_id)}) self.add_edges_from(other_docgraph.edges(data=True)) # workaround for issues #89 and #96 # copy the token node IDs / sentence node IDs from the other graph, # if this graph doesn't have such lists, yet if other_docgraph.name and not self.name: self.name = other_docgraph.name if other_docgraph.tokens and not self.tokens: self.tokens = other_docgraph.tokens if other_docgraph.sentences and not self.sentences: self.sentences = other_docgraph.sentences # there should be no dangling, unused root nodes in a merged graph self.merge_rootnodes(other_docgraph)
[ "def", "merge_graphs", "(", "self", ",", "other_docgraph", ",", "verbose", "=", "False", ")", ":", "# keep track of all merged/old root nodes in case we need to", "# delete them or their attributes (e.g. 'metadata')", "if", "hasattr", "(", "self", ",", "'merged_rootnodes'", ")", ":", "self", ".", "merged_rootnodes", ".", "append", "(", "other_docgraph", ".", "root", ")", "else", ":", "self", ".", "merged_rootnodes", "=", "[", "other_docgraph", ".", "root", "]", "# renaming the tokens of the other graph to match this one", "rename_tokens", "(", "other_docgraph", ",", "self", ",", "verbose", "=", "verbose", ")", "self", ".", "add_nodes_from", "(", "other_docgraph", ".", "nodes", "(", "data", "=", "True", ")", ")", "# copy token node attributes to the current namespace", "for", "node_id", ",", "node_attrs", "in", "other_docgraph", ".", "nodes", "(", "data", "=", "True", ")", ":", "if", "istoken", "(", "other_docgraph", ",", "node_id", ")", "and", "self", ".", "ns", "+", "':token'", "not", "in", "self", ".", "node", "[", "node_id", "]", ":", "self", ".", "node", "[", "node_id", "]", ".", "update", "(", "{", "self", ".", "ns", "+", "':token'", ":", "other_docgraph", ".", "get_token", "(", "node_id", ")", "}", ")", "self", ".", "add_edges_from", "(", "other_docgraph", ".", "edges", "(", "data", "=", "True", ")", ")", "# workaround for issues #89 and #96", "# copy the token node IDs / sentence node IDs from the other graph,", "# if this graph doesn't have such lists, yet", "if", "other_docgraph", ".", "name", "and", "not", "self", ".", "name", ":", "self", ".", "name", "=", "other_docgraph", ".", "name", "if", "other_docgraph", ".", "tokens", "and", "not", "self", ".", "tokens", ":", "self", ".", "tokens", "=", "other_docgraph", ".", "tokens", "if", "other_docgraph", ".", "sentences", "and", "not", "self", ".", "sentences", ":", "self", ".", "sentences", "=", "other_docgraph", ".", "sentences", "# there should be no dangling, unused root nodes in a merged graph", "self", ".", "merge_rootnodes", "(", "other_docgraph", ")" ]
Merges another document graph into the current one, thereby adding all the necessary nodes and edges (with attributes, layers etc.). NOTE: This will only work if both graphs have exactly the same tokenization.
[ "Merges", "another", "document", "graph", "into", "the", "current", "one", "thereby", "adding", "all", "the", "necessary", "nodes", "and", "edges", "(", "with", "attributes", "layers", "etc", ".", ")", "." ]
python
train
47.236842
jilljenn/tryalgo
tryalgo/pq_tree.py
https://github.com/jilljenn/tryalgo/blob/89a4dd9655e7b6b0a176f72b4c60d0196420dfe1/tryalgo/pq_tree.py#L247-L271
def consecutive_ones_property(sets, universe=None): """ Check the consecutive ones property. :param list sets: is a list of subsets of the ground set. :param groundset: is the set of all elements, by default it is the union of the given sets :returns: returns a list of the ordered ground set where every given set is consecutive, or None if there is no solution. :complexity: O(len(groundset) * len(sets)) :disclaimer: an optimal implementation would have complexity O(len(groundset) + len(sets) + sum(map(len,sets))), and there are more recent easier algorithms for this problem. """ if universe is None: universe = set() for S in sets: universe |= set(S) tree = PQ_tree(universe) try: for S in sets: tree.reduce(S) return tree.border() except IsNotC1P: return None
[ "def", "consecutive_ones_property", "(", "sets", ",", "universe", "=", "None", ")", ":", "if", "universe", "is", "None", ":", "universe", "=", "set", "(", ")", "for", "S", "in", "sets", ":", "universe", "|=", "set", "(", "S", ")", "tree", "=", "PQ_tree", "(", "universe", ")", "try", ":", "for", "S", "in", "sets", ":", "tree", ".", "reduce", "(", "S", ")", "return", "tree", ".", "border", "(", ")", "except", "IsNotC1P", ":", "return", "None" ]
Check the consecutive ones property. :param list sets: is a list of subsets of the ground set. :param groundset: is the set of all elements, by default it is the union of the given sets :returns: returns a list of the ordered ground set where every given set is consecutive, or None if there is no solution. :complexity: O(len(groundset) * len(sets)) :disclaimer: an optimal implementation would have complexity O(len(groundset) + len(sets) + sum(map(len,sets))), and there are more recent easier algorithms for this problem.
[ "Check", "the", "consecutive", "ones", "property", "." ]
python
train
37.2
bretth/woven
woven/virtualenv.py
https://github.com/bretth/woven/blob/ec1da7b401a335f43129e7115fe7a4d145649f1e/woven/virtualenv.py#L184-L207
def mkvirtualenv(): """ Create the virtualenv project environment """ root = '/'.join([deployment_root(),'env']) path = '/'.join([root,env.project_fullname]) dirs_created = [] if env.verbosity: print env.host,'CREATING VIRTUALENV', path if not exists(root): dirs_created += mkdirs(root) with cd(root): run(' '.join(["virtualenv",env.project_fullname])) with cd(path): dirs_created += mkdirs('egg_cache') sudo('chown -R %s:www-data egg_cache'% env.user) sudo('chmod -R g+w egg_cache') run(''.join(["echo 'cd ",path,'/','project','/',env.project_package_name,'/sitesettings',"' > bin/postactivate"])) sudo('chmod ugo+rwx bin/postactivate') #Create a state out = State(' '.join([env.host,'virtualenv',path,'created'])) out.object = dirs_created + ['bin','lib','include'] out.failed = False return out
[ "def", "mkvirtualenv", "(", ")", ":", "root", "=", "'/'", ".", "join", "(", "[", "deployment_root", "(", ")", ",", "'env'", "]", ")", "path", "=", "'/'", ".", "join", "(", "[", "root", ",", "env", ".", "project_fullname", "]", ")", "dirs_created", "=", "[", "]", "if", "env", ".", "verbosity", ":", "print", "env", ".", "host", ",", "'CREATING VIRTUALENV'", ",", "path", "if", "not", "exists", "(", "root", ")", ":", "dirs_created", "+=", "mkdirs", "(", "root", ")", "with", "cd", "(", "root", ")", ":", "run", "(", "' '", ".", "join", "(", "[", "\"virtualenv\"", ",", "env", ".", "project_fullname", "]", ")", ")", "with", "cd", "(", "path", ")", ":", "dirs_created", "+=", "mkdirs", "(", "'egg_cache'", ")", "sudo", "(", "'chown -R %s:www-data egg_cache'", "%", "env", ".", "user", ")", "sudo", "(", "'chmod -R g+w egg_cache'", ")", "run", "(", "''", ".", "join", "(", "[", "\"echo 'cd \"", ",", "path", ",", "'/'", ",", "'project'", ",", "'/'", ",", "env", ".", "project_package_name", ",", "'/sitesettings'", ",", "\"' > bin/postactivate\"", "]", ")", ")", "sudo", "(", "'chmod ugo+rwx bin/postactivate'", ")", "#Create a state", "out", "=", "State", "(", "' '", ".", "join", "(", "[", "env", ".", "host", ",", "'virtualenv'", ",", "path", ",", "'created'", "]", ")", ")", "out", ".", "object", "=", "dirs_created", "+", "[", "'bin'", ",", "'lib'", ",", "'include'", "]", "out", ".", "failed", "=", "False", "return", "out" ]
Create the virtualenv project environment
[ "Create", "the", "virtualenv", "project", "environment" ]
python
train
37.125
suds-community/suds
suds/sudsobject.py
https://github.com/suds-community/suds/blob/6fb0a829337b5037a66c20aae6f89b41acd77e40/suds/sudsobject.py#L326-L350
def print_dictionary(self, d, h, n, nl=False): """Print complex using the specified indent (n) and newline (nl).""" if d in h: return "{}..." h.append(d) s = [] if nl: s.append("\n") s.append(self.indent(n)) s.append("{") for item in d.items(): s.append("\n") s.append(self.indent(n+1)) if isinstance(item[1], (list,tuple)): s.append(tostr(item[0])) s.append("[]") else: s.append(tostr(item[0])) s.append(" = ") s.append(self.process(item[1], h, n, True)) s.append("\n") s.append(self.indent(n)) s.append("}") h.pop() return "".join(s)
[ "def", "print_dictionary", "(", "self", ",", "d", ",", "h", ",", "n", ",", "nl", "=", "False", ")", ":", "if", "d", "in", "h", ":", "return", "\"{}...\"", "h", ".", "append", "(", "d", ")", "s", "=", "[", "]", "if", "nl", ":", "s", ".", "append", "(", "\"\\n\"", ")", "s", ".", "append", "(", "self", ".", "indent", "(", "n", ")", ")", "s", ".", "append", "(", "\"{\"", ")", "for", "item", "in", "d", ".", "items", "(", ")", ":", "s", ".", "append", "(", "\"\\n\"", ")", "s", ".", "append", "(", "self", ".", "indent", "(", "n", "+", "1", ")", ")", "if", "isinstance", "(", "item", "[", "1", "]", ",", "(", "list", ",", "tuple", ")", ")", ":", "s", ".", "append", "(", "tostr", "(", "item", "[", "0", "]", ")", ")", "s", ".", "append", "(", "\"[]\"", ")", "else", ":", "s", ".", "append", "(", "tostr", "(", "item", "[", "0", "]", ")", ")", "s", ".", "append", "(", "\" = \"", ")", "s", ".", "append", "(", "self", ".", "process", "(", "item", "[", "1", "]", ",", "h", ",", "n", ",", "True", ")", ")", "s", ".", "append", "(", "\"\\n\"", ")", "s", ".", "append", "(", "self", ".", "indent", "(", "n", ")", ")", "s", ".", "append", "(", "\"}\"", ")", "h", ".", "pop", "(", ")", "return", "\"\"", ".", "join", "(", "s", ")" ]
Print complex using the specified indent (n) and newline (nl).
[ "Print", "complex", "using", "the", "specified", "indent", "(", "n", ")", "and", "newline", "(", "nl", ")", "." ]
python
train
30.52
googleapis/google-cloud-python
storage/google/cloud/storage/acl.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/storage/google/cloud/storage/acl.py#L506-L524
def save_predefined(self, predefined, client=None): """Save this ACL for the current bucket using a predefined ACL. If :attr:`user_project` is set, bills the API request to that project. :type predefined: str :param predefined: An identifier for a predefined ACL. Must be one of the keys in :attr:`PREDEFINED_JSON_ACLS` or :attr:`PREDEFINED_XML_ACLS` (which will be aliased to the corresponding JSON name). If passed, `acl` must be None. :type client: :class:`~google.cloud.storage.client.Client` or ``NoneType`` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the ACL's parent. """ predefined = self.validate_predefined(predefined) self._save(None, predefined, client)
[ "def", "save_predefined", "(", "self", ",", "predefined", ",", "client", "=", "None", ")", ":", "predefined", "=", "self", ".", "validate_predefined", "(", "predefined", ")", "self", ".", "_save", "(", "None", ",", "predefined", ",", "client", ")" ]
Save this ACL for the current bucket using a predefined ACL. If :attr:`user_project` is set, bills the API request to that project. :type predefined: str :param predefined: An identifier for a predefined ACL. Must be one of the keys in :attr:`PREDEFINED_JSON_ACLS` or :attr:`PREDEFINED_XML_ACLS` (which will be aliased to the corresponding JSON name). If passed, `acl` must be None. :type client: :class:`~google.cloud.storage.client.Client` or ``NoneType`` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the ACL's parent.
[ "Save", "this", "ACL", "for", "the", "current", "bucket", "using", "a", "predefined", "ACL", "." ]
python
train
49
JohnVinyard/zounds
zounds/spectral/frequencyscale.py
https://github.com/JohnVinyard/zounds/blob/337b3f98753d09eaab1c72dcd37bb852a3fa5ac6/zounds/spectral/frequencyscale.py#L387-L398
def from_sample_rate(sample_rate, n_bands, always_even=False): """ Return a :class:`~zounds.spectral.LinearScale` instance whose upper frequency bound is informed by the nyquist frequency of the sample rate. Args: sample_rate (SamplingRate): the sample rate whose nyquist frequency will serve as the upper frequency bound of this scale n_bands (int): the number of evenly-spaced frequency bands """ fb = FrequencyBand(0, sample_rate.nyquist) return LinearScale(fb, n_bands, always_even=always_even)
[ "def", "from_sample_rate", "(", "sample_rate", ",", "n_bands", ",", "always_even", "=", "False", ")", ":", "fb", "=", "FrequencyBand", "(", "0", ",", "sample_rate", ".", "nyquist", ")", "return", "LinearScale", "(", "fb", ",", "n_bands", ",", "always_even", "=", "always_even", ")" ]
Return a :class:`~zounds.spectral.LinearScale` instance whose upper frequency bound is informed by the nyquist frequency of the sample rate. Args: sample_rate (SamplingRate): the sample rate whose nyquist frequency will serve as the upper frequency bound of this scale n_bands (int): the number of evenly-spaced frequency bands
[ "Return", "a", ":", "class", ":", "~zounds", ".", "spectral", ".", "LinearScale", "instance", "whose", "upper", "frequency", "bound", "is", "informed", "by", "the", "nyquist", "frequency", "of", "the", "sample", "rate", "." ]
python
train
48.666667
grantmcconnaughey/django-field-history
field_history/json_nested_serializer.py
https://github.com/grantmcconnaughey/django-field-history/blob/b61885d8bddf7d1f53addf3bea098f67fcf9a618/field_history/json_nested_serializer.py#L26-L65
def serialize(self, queryset, **options): """ Serialize a queryset. """ self.options = options self.stream = options.pop("stream", six.StringIO()) self.selected_fields = options.pop("fields", None) self.use_natural_keys = options.pop("use_natural_keys", False) if self.use_natural_keys and RemovedInDjango19Warning is not None: warnings.warn("``use_natural_keys`` is deprecated; use ``use_natural_foreign_keys`` instead.", RemovedInDjango19Warning) self.use_natural_foreign_keys = options.pop('use_natural_foreign_keys', False) or self.use_natural_keys self.use_natural_primary_keys = options.pop('use_natural_primary_keys', False) self.start_serialization() self.first = True for obj in queryset: self.start_object(obj) # Use the concrete parent class' _meta instead of the object's _meta # This is to avoid local_fields problems for proxy models. Refs #17717. concrete_model = obj._meta.concrete_model # only one change local_fields -> fields for supporting nested models for field in concrete_model._meta.fields: if field.serialize: if field.remote_field is None: if self.selected_fields is None or field.attname in self.selected_fields: self.handle_field(obj, field) else: if self.selected_fields is None or field.attname[:-3] in self.selected_fields: self.handle_fk_field(obj, field) for field in concrete_model._meta.many_to_many: if field.serialize: if self.selected_fields is None or field.attname in self.selected_fields: self.handle_m2m_field(obj, field) self.end_object(obj) if self.first: self.first = False self.end_serialization() return self.getvalue()
[ "def", "serialize", "(", "self", ",", "queryset", ",", "*", "*", "options", ")", ":", "self", ".", "options", "=", "options", "self", ".", "stream", "=", "options", ".", "pop", "(", "\"stream\"", ",", "six", ".", "StringIO", "(", ")", ")", "self", ".", "selected_fields", "=", "options", ".", "pop", "(", "\"fields\"", ",", "None", ")", "self", ".", "use_natural_keys", "=", "options", ".", "pop", "(", "\"use_natural_keys\"", ",", "False", ")", "if", "self", ".", "use_natural_keys", "and", "RemovedInDjango19Warning", "is", "not", "None", ":", "warnings", ".", "warn", "(", "\"``use_natural_keys`` is deprecated; use ``use_natural_foreign_keys`` instead.\"", ",", "RemovedInDjango19Warning", ")", "self", ".", "use_natural_foreign_keys", "=", "options", ".", "pop", "(", "'use_natural_foreign_keys'", ",", "False", ")", "or", "self", ".", "use_natural_keys", "self", ".", "use_natural_primary_keys", "=", "options", ".", "pop", "(", "'use_natural_primary_keys'", ",", "False", ")", "self", ".", "start_serialization", "(", ")", "self", ".", "first", "=", "True", "for", "obj", "in", "queryset", ":", "self", ".", "start_object", "(", "obj", ")", "# Use the concrete parent class' _meta instead of the object's _meta", "# This is to avoid local_fields problems for proxy models. Refs #17717.", "concrete_model", "=", "obj", ".", "_meta", ".", "concrete_model", "# only one change local_fields -> fields for supporting nested models", "for", "field", "in", "concrete_model", ".", "_meta", ".", "fields", ":", "if", "field", ".", "serialize", ":", "if", "field", ".", "remote_field", "is", "None", ":", "if", "self", ".", "selected_fields", "is", "None", "or", "field", ".", "attname", "in", "self", ".", "selected_fields", ":", "self", ".", "handle_field", "(", "obj", ",", "field", ")", "else", ":", "if", "self", ".", "selected_fields", "is", "None", "or", "field", ".", "attname", "[", ":", "-", "3", "]", "in", "self", ".", "selected_fields", ":", "self", ".", "handle_fk_field", "(", "obj", ",", "field", ")", "for", "field", "in", "concrete_model", ".", "_meta", ".", "many_to_many", ":", "if", "field", ".", "serialize", ":", "if", "self", ".", "selected_fields", "is", "None", "or", "field", ".", "attname", "in", "self", ".", "selected_fields", ":", "self", ".", "handle_m2m_field", "(", "obj", ",", "field", ")", "self", ".", "end_object", "(", "obj", ")", "if", "self", ".", "first", ":", "self", ".", "first", "=", "False", "self", ".", "end_serialization", "(", ")", "return", "self", ".", "getvalue", "(", ")" ]
Serialize a queryset.
[ "Serialize", "a", "queryset", "." ]
python
train
50.8
benoitguigal/python-epson-printer
epson_printer/epsonprinter.py
https://github.com/benoitguigal/python-epson-printer/blob/7d89b2f21bc76d2cc4d5ad548e19a356ca92fbc5/epson_printer/epsonprinter.py#L103-L153
def from_image(cls, image): """ Create a PrintableImage from a PIL Image :param image: a PIL Image :return: """ (w, h) = image.size # Thermal paper is 512 pixels wide if w > 512: ratio = 512. / w h = int(h * ratio) image = image.resize((512, h), Image.ANTIALIAS) if image.mode != '1': image = image.convert('1') pixels = np.array(list(image.getdata())).reshape(h, w) # Add white pixels so that image fits into bytes extra_rows = int(math.ceil(h / 24)) * 24 - h extra_pixels = np.ones((extra_rows, w), dtype=bool) pixels = np.vstack((pixels, extra_pixels)) h += extra_rows nb_stripes = h / 24 pixels = pixels.reshape(nb_stripes, 24, w).swapaxes(1, 2).reshape(-1, 8) nh = int(w / 256) nl = w % 256 data = [] pixels = np.invert(np.packbits(pixels)) stripes = np.split(pixels, nb_stripes) for stripe in stripes: data.extend([ ESC, 42, # * 33, # double density mode nl, nh]) data.extend(stripe) data.extend([ 27, # ESC 74, # J 48]) # account for double density mode height = h * 2 return cls(data, height)
[ "def", "from_image", "(", "cls", ",", "image", ")", ":", "(", "w", ",", "h", ")", "=", "image", ".", "size", "# Thermal paper is 512 pixels wide", "if", "w", ">", "512", ":", "ratio", "=", "512.", "/", "w", "h", "=", "int", "(", "h", "*", "ratio", ")", "image", "=", "image", ".", "resize", "(", "(", "512", ",", "h", ")", ",", "Image", ".", "ANTIALIAS", ")", "if", "image", ".", "mode", "!=", "'1'", ":", "image", "=", "image", ".", "convert", "(", "'1'", ")", "pixels", "=", "np", ".", "array", "(", "list", "(", "image", ".", "getdata", "(", ")", ")", ")", ".", "reshape", "(", "h", ",", "w", ")", "# Add white pixels so that image fits into bytes", "extra_rows", "=", "int", "(", "math", ".", "ceil", "(", "h", "/", "24", ")", ")", "*", "24", "-", "h", "extra_pixels", "=", "np", ".", "ones", "(", "(", "extra_rows", ",", "w", ")", ",", "dtype", "=", "bool", ")", "pixels", "=", "np", ".", "vstack", "(", "(", "pixels", ",", "extra_pixels", ")", ")", "h", "+=", "extra_rows", "nb_stripes", "=", "h", "/", "24", "pixels", "=", "pixels", ".", "reshape", "(", "nb_stripes", ",", "24", ",", "w", ")", ".", "swapaxes", "(", "1", ",", "2", ")", ".", "reshape", "(", "-", "1", ",", "8", ")", "nh", "=", "int", "(", "w", "/", "256", ")", "nl", "=", "w", "%", "256", "data", "=", "[", "]", "pixels", "=", "np", ".", "invert", "(", "np", ".", "packbits", "(", "pixels", ")", ")", "stripes", "=", "np", ".", "split", "(", "pixels", ",", "nb_stripes", ")", "for", "stripe", "in", "stripes", ":", "data", ".", "extend", "(", "[", "ESC", ",", "42", ",", "# *", "33", ",", "# double density mode", "nl", ",", "nh", "]", ")", "data", ".", "extend", "(", "stripe", ")", "data", ".", "extend", "(", "[", "27", ",", "# ESC", "74", ",", "# J", "48", "]", ")", "# account for double density mode", "height", "=", "h", "*", "2", "return", "cls", "(", "data", ",", "height", ")" ]
Create a PrintableImage from a PIL Image :param image: a PIL Image :return:
[ "Create", "a", "PrintableImage", "from", "a", "PIL", "Image", ":", "param", "image", ":", "a", "PIL", "Image", ":", "return", ":" ]
python
train
27.156863
pandas-dev/pandas
pandas/core/arrays/datetimes.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimes.py#L2114-L2140
def _maybe_localize_point(ts, is_none, is_not_none, freq, tz): """ Localize a start or end Timestamp to the timezone of the corresponding start or end Timestamp Parameters ---------- ts : start or end Timestamp to potentially localize is_none : argument that should be None is_not_none : argument that should not be None freq : Tick, DateOffset, or None tz : str, timezone object or None Returns ------- ts : Timestamp """ # Make sure start and end are timezone localized if: # 1) freq = a Timedelta-like frequency (Tick) # 2) freq = None i.e. generating a linspaced range if isinstance(freq, Tick) or freq is None: localize_args = {'tz': tz, 'ambiguous': False} else: localize_args = {'tz': None} if is_none is None and is_not_none is not None: ts = ts.tz_localize(**localize_args) return ts
[ "def", "_maybe_localize_point", "(", "ts", ",", "is_none", ",", "is_not_none", ",", "freq", ",", "tz", ")", ":", "# Make sure start and end are timezone localized if:", "# 1) freq = a Timedelta-like frequency (Tick)", "# 2) freq = None i.e. generating a linspaced range", "if", "isinstance", "(", "freq", ",", "Tick", ")", "or", "freq", "is", "None", ":", "localize_args", "=", "{", "'tz'", ":", "tz", ",", "'ambiguous'", ":", "False", "}", "else", ":", "localize_args", "=", "{", "'tz'", ":", "None", "}", "if", "is_none", "is", "None", "and", "is_not_none", "is", "not", "None", ":", "ts", "=", "ts", ".", "tz_localize", "(", "*", "*", "localize_args", ")", "return", "ts" ]
Localize a start or end Timestamp to the timezone of the corresponding start or end Timestamp Parameters ---------- ts : start or end Timestamp to potentially localize is_none : argument that should be None is_not_none : argument that should not be None freq : Tick, DateOffset, or None tz : str, timezone object or None Returns ------- ts : Timestamp
[ "Localize", "a", "start", "or", "end", "Timestamp", "to", "the", "timezone", "of", "the", "corresponding", "start", "or", "end", "Timestamp" ]
python
train
32.444444
rstoneback/pysat
pysat/instruments/sw_kp.py
https://github.com/rstoneback/pysat/blob/4ae1afd80e15e4449397d39dce8c3e969c32c422/pysat/instruments/sw_kp.py#L183-L234
def download(date_array, tag, sat_id, data_path, user=None, password=None): """Routine to download Kp index data Parameters ----------- tag : (string or NoneType) Denotes type of file to load. Accepted types are '1min' and '5min'. (default=None) sat_id : (string or NoneType) Specifies the satellite ID for a constellation. Not used. (default=None) data_path : (string or NoneType) Path to data directory. If None is specified, the value previously set in Instrument.files.data_path is used. (default=None) Returns -------- Void : (NoneType) data downloaded to disk, if available. Notes ----- Called by pysat. Not intended for direct use by user. """ import ftplib from ftplib import FTP import sys ftp = FTP('ftp.gfz-potsdam.de') # connect to host, default port ftp.login() # user anonymous, passwd anonymous@ ftp.cwd('/pub/home/obs/kp-ap/tab') for date in date_array: fname = 'kp{year:02d}{month:02d}.tab' fname = fname.format(year=(date.year - date.year//100*100), month=date.month) local_fname = fname saved_fname = os.path.join(data_path,local_fname) try: print('Downloading file for '+date.strftime('%D')) sys.stdout.flush() ftp.retrbinary('RETR '+fname, open(saved_fname,'wb').write) except ftplib.error_perm as exception: # if exception[0][0:3] != '550': if str(exception.args[0]).split(" ", 1)[0] != '550': raise else: os.remove(saved_fname) print('File not available for '+date.strftime('%D')) ftp.close() return
[ "def", "download", "(", "date_array", ",", "tag", ",", "sat_id", ",", "data_path", ",", "user", "=", "None", ",", "password", "=", "None", ")", ":", "import", "ftplib", "from", "ftplib", "import", "FTP", "import", "sys", "ftp", "=", "FTP", "(", "'ftp.gfz-potsdam.de'", ")", "# connect to host, default port", "ftp", ".", "login", "(", ")", "# user anonymous, passwd anonymous@", "ftp", ".", "cwd", "(", "'/pub/home/obs/kp-ap/tab'", ")", "for", "date", "in", "date_array", ":", "fname", "=", "'kp{year:02d}{month:02d}.tab'", "fname", "=", "fname", ".", "format", "(", "year", "=", "(", "date", ".", "year", "-", "date", ".", "year", "//", "100", "*", "100", ")", ",", "month", "=", "date", ".", "month", ")", "local_fname", "=", "fname", "saved_fname", "=", "os", ".", "path", ".", "join", "(", "data_path", ",", "local_fname", ")", "try", ":", "print", "(", "'Downloading file for '", "+", "date", ".", "strftime", "(", "'%D'", ")", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "ftp", ".", "retrbinary", "(", "'RETR '", "+", "fname", ",", "open", "(", "saved_fname", ",", "'wb'", ")", ".", "write", ")", "except", "ftplib", ".", "error_perm", "as", "exception", ":", "# if exception[0][0:3] != '550':", "if", "str", "(", "exception", ".", "args", "[", "0", "]", ")", ".", "split", "(", "\" \"", ",", "1", ")", "[", "0", "]", "!=", "'550'", ":", "raise", "else", ":", "os", ".", "remove", "(", "saved_fname", ")", "print", "(", "'File not available for '", "+", "date", ".", "strftime", "(", "'%D'", ")", ")", "ftp", ".", "close", "(", ")", "return" ]
Routine to download Kp index data Parameters ----------- tag : (string or NoneType) Denotes type of file to load. Accepted types are '1min' and '5min'. (default=None) sat_id : (string or NoneType) Specifies the satellite ID for a constellation. Not used. (default=None) data_path : (string or NoneType) Path to data directory. If None is specified, the value previously set in Instrument.files.data_path is used. (default=None) Returns -------- Void : (NoneType) data downloaded to disk, if available. Notes ----- Called by pysat. Not intended for direct use by user.
[ "Routine", "to", "download", "Kp", "index", "data" ]
python
train
33.057692
tensorflow/tensor2tensor
tensor2tensor/utils/registry.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/registry.py#L174-L177
def on_set(self, key, value): """Callback called on successful set. Uses function from __init__.""" if self._on_set is not None: self._on_set(key, value)
[ "def", "on_set", "(", "self", ",", "key", ",", "value", ")", ":", "if", "self", ".", "_on_set", "is", "not", "None", ":", "self", ".", "_on_set", "(", "key", ",", "value", ")" ]
Callback called on successful set. Uses function from __init__.
[ "Callback", "called", "on", "successful", "set", ".", "Uses", "function", "from", "__init__", "." ]
python
train
41
EUDAT-B2SAFE/B2HANDLE
b2handle/handleclient.py
https://github.com/EUDAT-B2SAFE/B2HANDLE/blob/a6d216d459644e01fbdfd5b318a535950bc5cdbb/b2handle/handleclient.py#L1068-L1082
def __send_handle_get_request(self, handle, indices=None): ''' Send a HTTP GET request to the handle server to read either an entire handle or to some specified values from a handle record, using the requests module. :param handle: The handle. :param indices: Optional. A list of indices to delete. Defaults to None (i.e. the entire handle is deleted.). The list can contain integers or strings. :return: The server's response. ''' resp = self.__handlesystemconnector.send_handle_get_request(handle, indices) return resp
[ "def", "__send_handle_get_request", "(", "self", ",", "handle", ",", "indices", "=", "None", ")", ":", "resp", "=", "self", ".", "__handlesystemconnector", ".", "send_handle_get_request", "(", "handle", ",", "indices", ")", "return", "resp" ]
Send a HTTP GET request to the handle server to read either an entire handle or to some specified values from a handle record, using the requests module. :param handle: The handle. :param indices: Optional. A list of indices to delete. Defaults to None (i.e. the entire handle is deleted.). The list can contain integers or strings. :return: The server's response.
[ "Send", "a", "HTTP", "GET", "request", "to", "the", "handle", "server", "to", "read", "either", "an", "entire", "handle", "or", "to", "some", "specified", "values", "from", "a", "handle", "record", "using", "the", "requests", "module", "." ]
python
train
41.333333
dswah/pyGAM
pygam/utils.py
https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/utils.py#L568-L707
def b_spline_basis(x, edge_knots, n_splines=20, spline_order=3, sparse=True, periodic=True, verbose=True): """ tool to generate b-spline basis using vectorized De Boor recursion the basis functions extrapolate linearly past the end-knots. Parameters ---------- x : array-like, with ndims == 1. edge_knots : array-like contaning locations of the 2 edge knots. n_splines : int. number of splines to generate. must be >= spline_order+1 default: 20 spline_order : int. order of spline basis to create default: 3 sparse : boolean. whether to return a sparse basis matrix or not. default: True verbose : bool, default: True whether to print warnings Returns ------- basis : sparse csc matrix or array containing b-spline basis functions with shape (len(x), n_splines) """ if np.ravel(x).ndim != 1: raise ValueError('Data must be 1-D, but found {}'\ .format(np.ravel(x).ndim)) if (n_splines < 1) or not isinstance(n_splines, numbers.Integral): raise ValueError('n_splines must be int >= 1') if (spline_order < 0) or not isinstance(spline_order, numbers.Integral): raise ValueError('spline_order must be int >= 1') if n_splines < spline_order + 1: raise ValueError('n_splines must be >= spline_order + 1. '\ 'found: n_splines = {} and spline_order = {}'\ .format(n_splines, spline_order)) if n_splines == 0 and verbose: warnings.warn('Requested 1 spline. This is equivalent to '\ 'fitting an intercept', stacklevel=2) n_splines += spline_order * periodic # rescale edge_knots to [0,1], and generate boundary knots edge_knots = np.sort(deepcopy(edge_knots)) offset = edge_knots[0] scale = edge_knots[-1] - edge_knots[0] if scale == 0: scale = 1 boundary_knots = np.linspace(0, 1, 1 + n_splines - spline_order) diff = np.diff(boundary_knots[:2])[0] # rescale x as well x = (np.ravel(deepcopy(x)) - offset) / scale # wrap periodic values if periodic: x = x % (1 + 1e-9) # append 0 and 1 in order to get derivatives for extrapolation x = np.r_[x, 0., 1.] # determine extrapolation indices x_extrapolte_l = (x < 0) x_extrapolte_r = (x > 1) x_interpolate = ~(x_extrapolte_r + x_extrapolte_l) # formatting x = np.atleast_2d(x).T n = len(x) # augment knots aug = np.arange(1, spline_order + 1) * diff aug_knots = np.r_[-aug[::-1], boundary_knots, 1 + aug] aug_knots[-1] += 1e-9 # want last knot inclusive # prepare Haar Basis bases = (x >= aug_knots[:-1]).astype(np.int) * \ (x < aug_knots[1:]).astype(np.int) bases[-1] = bases[-2][::-1] # force symmetric bases at 0 and 1 # do recursion from Hastie et al. vectorized maxi = len(aug_knots) - 1 for m in range(2, spline_order + 2): maxi -= 1 # left sub-basis num = (x - aug_knots[:maxi]) num *= bases[:, :maxi] denom = aug_knots[m-1 : maxi+m-1] - aug_knots[:maxi] left = num/denom # right sub-basis num = (aug_knots[m : maxi+m] - x) * bases[:, 1:maxi+1] denom = aug_knots[m:maxi+m] - aug_knots[1 : maxi+1] right = num/denom # track previous bases and update prev_bases = bases[-2:] bases = left + right if periodic and spline_order > 0: # make spline domain periodic bases[:, :spline_order] = np.max([bases[:, :spline_order], bases[:, -spline_order:]], axis=0) # remove extra splines used only for ensuring correct domain bases = bases[:, :-spline_order] # extrapolate # since we have repeated end-knots, only the last 2 basis functions are # non-zero at the end-knots, and they have equal and opposite gradient. if (any(x_extrapolte_r) or any(x_extrapolte_l)) and spline_order>0: bases[~x_interpolate] = 0. denom = (aug_knots[spline_order:-1] - aug_knots[: -spline_order - 1]) left = prev_bases[:, :-1] / denom denom = (aug_knots[spline_order+1:] - aug_knots[1: -spline_order]) right = prev_bases[:, 1:] / denom grads = (spline_order) * (left - right) if any(x_extrapolte_l): val = grads[0] * x[x_extrapolte_l] + bases[-2] bases[x_extrapolte_l] = val if any(x_extrapolte_r): val = grads[1] * (x[x_extrapolte_r] - 1) + bases[-1] bases[x_extrapolte_r] = val # get rid of the added values at 0, and 1 bases = bases[:-2] if sparse: return sp.sparse.csc_matrix(bases) return bases
[ "def", "b_spline_basis", "(", "x", ",", "edge_knots", ",", "n_splines", "=", "20", ",", "spline_order", "=", "3", ",", "sparse", "=", "True", ",", "periodic", "=", "True", ",", "verbose", "=", "True", ")", ":", "if", "np", ".", "ravel", "(", "x", ")", ".", "ndim", "!=", "1", ":", "raise", "ValueError", "(", "'Data must be 1-D, but found {}'", ".", "format", "(", "np", ".", "ravel", "(", "x", ")", ".", "ndim", ")", ")", "if", "(", "n_splines", "<", "1", ")", "or", "not", "isinstance", "(", "n_splines", ",", "numbers", ".", "Integral", ")", ":", "raise", "ValueError", "(", "'n_splines must be int >= 1'", ")", "if", "(", "spline_order", "<", "0", ")", "or", "not", "isinstance", "(", "spline_order", ",", "numbers", ".", "Integral", ")", ":", "raise", "ValueError", "(", "'spline_order must be int >= 1'", ")", "if", "n_splines", "<", "spline_order", "+", "1", ":", "raise", "ValueError", "(", "'n_splines must be >= spline_order + 1. '", "'found: n_splines = {} and spline_order = {}'", ".", "format", "(", "n_splines", ",", "spline_order", ")", ")", "if", "n_splines", "==", "0", "and", "verbose", ":", "warnings", ".", "warn", "(", "'Requested 1 spline. This is equivalent to '", "'fitting an intercept'", ",", "stacklevel", "=", "2", ")", "n_splines", "+=", "spline_order", "*", "periodic", "# rescale edge_knots to [0,1], and generate boundary knots", "edge_knots", "=", "np", ".", "sort", "(", "deepcopy", "(", "edge_knots", ")", ")", "offset", "=", "edge_knots", "[", "0", "]", "scale", "=", "edge_knots", "[", "-", "1", "]", "-", "edge_knots", "[", "0", "]", "if", "scale", "==", "0", ":", "scale", "=", "1", "boundary_knots", "=", "np", ".", "linspace", "(", "0", ",", "1", ",", "1", "+", "n_splines", "-", "spline_order", ")", "diff", "=", "np", ".", "diff", "(", "boundary_knots", "[", ":", "2", "]", ")", "[", "0", "]", "# rescale x as well", "x", "=", "(", "np", ".", "ravel", "(", "deepcopy", "(", "x", ")", ")", "-", "offset", ")", "/", "scale", "# wrap periodic values", "if", "periodic", ":", "x", "=", "x", "%", "(", "1", "+", "1e-9", ")", "# append 0 and 1 in order to get derivatives for extrapolation", "x", "=", "np", ".", "r_", "[", "x", ",", "0.", ",", "1.", "]", "# determine extrapolation indices", "x_extrapolte_l", "=", "(", "x", "<", "0", ")", "x_extrapolte_r", "=", "(", "x", ">", "1", ")", "x_interpolate", "=", "~", "(", "x_extrapolte_r", "+", "x_extrapolte_l", ")", "# formatting", "x", "=", "np", ".", "atleast_2d", "(", "x", ")", ".", "T", "n", "=", "len", "(", "x", ")", "# augment knots", "aug", "=", "np", ".", "arange", "(", "1", ",", "spline_order", "+", "1", ")", "*", "diff", "aug_knots", "=", "np", ".", "r_", "[", "-", "aug", "[", ":", ":", "-", "1", "]", ",", "boundary_knots", ",", "1", "+", "aug", "]", "aug_knots", "[", "-", "1", "]", "+=", "1e-9", "# want last knot inclusive", "# prepare Haar Basis", "bases", "=", "(", "x", ">=", "aug_knots", "[", ":", "-", "1", "]", ")", ".", "astype", "(", "np", ".", "int", ")", "*", "(", "x", "<", "aug_knots", "[", "1", ":", "]", ")", ".", "astype", "(", "np", ".", "int", ")", "bases", "[", "-", "1", "]", "=", "bases", "[", "-", "2", "]", "[", ":", ":", "-", "1", "]", "# force symmetric bases at 0 and 1", "# do recursion from Hastie et al. vectorized", "maxi", "=", "len", "(", "aug_knots", ")", "-", "1", "for", "m", "in", "range", "(", "2", ",", "spline_order", "+", "2", ")", ":", "maxi", "-=", "1", "# left sub-basis", "num", "=", "(", "x", "-", "aug_knots", "[", ":", "maxi", "]", ")", "num", "*=", "bases", "[", ":", ",", ":", "maxi", "]", "denom", "=", "aug_knots", "[", "m", "-", "1", ":", "maxi", "+", "m", "-", "1", "]", "-", "aug_knots", "[", ":", "maxi", "]", "left", "=", "num", "/", "denom", "# right sub-basis", "num", "=", "(", "aug_knots", "[", "m", ":", "maxi", "+", "m", "]", "-", "x", ")", "*", "bases", "[", ":", ",", "1", ":", "maxi", "+", "1", "]", "denom", "=", "aug_knots", "[", "m", ":", "maxi", "+", "m", "]", "-", "aug_knots", "[", "1", ":", "maxi", "+", "1", "]", "right", "=", "num", "/", "denom", "# track previous bases and update", "prev_bases", "=", "bases", "[", "-", "2", ":", "]", "bases", "=", "left", "+", "right", "if", "periodic", "and", "spline_order", ">", "0", ":", "# make spline domain periodic", "bases", "[", ":", ",", ":", "spline_order", "]", "=", "np", ".", "max", "(", "[", "bases", "[", ":", ",", ":", "spline_order", "]", ",", "bases", "[", ":", ",", "-", "spline_order", ":", "]", "]", ",", "axis", "=", "0", ")", "# remove extra splines used only for ensuring correct domain", "bases", "=", "bases", "[", ":", ",", ":", "-", "spline_order", "]", "# extrapolate", "# since we have repeated end-knots, only the last 2 basis functions are", "# non-zero at the end-knots, and they have equal and opposite gradient.", "if", "(", "any", "(", "x_extrapolte_r", ")", "or", "any", "(", "x_extrapolte_l", ")", ")", "and", "spline_order", ">", "0", ":", "bases", "[", "~", "x_interpolate", "]", "=", "0.", "denom", "=", "(", "aug_knots", "[", "spline_order", ":", "-", "1", "]", "-", "aug_knots", "[", ":", "-", "spline_order", "-", "1", "]", ")", "left", "=", "prev_bases", "[", ":", ",", ":", "-", "1", "]", "/", "denom", "denom", "=", "(", "aug_knots", "[", "spline_order", "+", "1", ":", "]", "-", "aug_knots", "[", "1", ":", "-", "spline_order", "]", ")", "right", "=", "prev_bases", "[", ":", ",", "1", ":", "]", "/", "denom", "grads", "=", "(", "spline_order", ")", "*", "(", "left", "-", "right", ")", "if", "any", "(", "x_extrapolte_l", ")", ":", "val", "=", "grads", "[", "0", "]", "*", "x", "[", "x_extrapolte_l", "]", "+", "bases", "[", "-", "2", "]", "bases", "[", "x_extrapolte_l", "]", "=", "val", "if", "any", "(", "x_extrapolte_r", ")", ":", "val", "=", "grads", "[", "1", "]", "*", "(", "x", "[", "x_extrapolte_r", "]", "-", "1", ")", "+", "bases", "[", "-", "1", "]", "bases", "[", "x_extrapolte_r", "]", "=", "val", "# get rid of the added values at 0, and 1", "bases", "=", "bases", "[", ":", "-", "2", "]", "if", "sparse", ":", "return", "sp", ".", "sparse", ".", "csc_matrix", "(", "bases", ")", "return", "bases" ]
tool to generate b-spline basis using vectorized De Boor recursion the basis functions extrapolate linearly past the end-knots. Parameters ---------- x : array-like, with ndims == 1. edge_knots : array-like contaning locations of the 2 edge knots. n_splines : int. number of splines to generate. must be >= spline_order+1 default: 20 spline_order : int. order of spline basis to create default: 3 sparse : boolean. whether to return a sparse basis matrix or not. default: True verbose : bool, default: True whether to print warnings Returns ------- basis : sparse csc matrix or array containing b-spline basis functions with shape (len(x), n_splines)
[ "tool", "to", "generate", "b", "-", "spline", "basis", "using", "vectorized", "De", "Boor", "recursion", "the", "basis", "functions", "extrapolate", "linearly", "past", "the", "end", "-", "knots", "." ]
python
train
34.085714
jeffh/pyconstraints
pyconstraints/solvers.py
https://github.com/jeffh/pyconstraints/blob/923abce2f9ba484d1964165616a253bbccd1a630/pyconstraints/solvers.py#L277-L293
def _next(self, possible_solution): """Where the magic happens. Produces a generator that returns all solutions given a base solution to start searching. """ # bail out if we have seen it already. See __iter__ to where seen is initially set. # A complete solution has all its variables set to a particular value. is_complete = (len(possible_solution) == len(self._vars)) if is_complete: self._solutions_seen += 1 if self.satisfies_constraints(possible_solution): yield dict(possible_solution) else: if self.is_feasible(possible_solution): for s in self.derived_solutions(possible_solution): for solution in self._next(s): yield solution
[ "def", "_next", "(", "self", ",", "possible_solution", ")", ":", "# bail out if we have seen it already. See __iter__ to where seen is initially set.", "# A complete solution has all its variables set to a particular value.", "is_complete", "=", "(", "len", "(", "possible_solution", ")", "==", "len", "(", "self", ".", "_vars", ")", ")", "if", "is_complete", ":", "self", ".", "_solutions_seen", "+=", "1", "if", "self", ".", "satisfies_constraints", "(", "possible_solution", ")", ":", "yield", "dict", "(", "possible_solution", ")", "else", ":", "if", "self", ".", "is_feasible", "(", "possible_solution", ")", ":", "for", "s", "in", "self", ".", "derived_solutions", "(", "possible_solution", ")", ":", "for", "solution", "in", "self", ".", "_next", "(", "s", ")", ":", "yield", "solution" ]
Where the magic happens. Produces a generator that returns all solutions given a base solution to start searching.
[ "Where", "the", "magic", "happens", ".", "Produces", "a", "generator", "that", "returns", "all", "solutions", "given", "a", "base", "solution", "to", "start", "searching", "." ]
python
train
46.882353
pallets/werkzeug
src/werkzeug/wrappers/common_descriptors.py
https://github.com/pallets/werkzeug/blob/a220671d66755a94630a212378754bb432811158/src/werkzeug/wrappers/common_descriptors.py#L147-L159
def mimetype_params(self): """The mimetype parameters as dict. For example if the content type is ``text/html; charset=utf-8`` the params would be ``{'charset': 'utf-8'}``. .. versionadded:: 0.5 """ def on_update(d): self.headers["Content-Type"] = dump_options_header(self.mimetype, d) d = parse_options_header(self.headers.get("content-type", ""))[1] return CallbackDict(d, on_update)
[ "def", "mimetype_params", "(", "self", ")", ":", "def", "on_update", "(", "d", ")", ":", "self", ".", "headers", "[", "\"Content-Type\"", "]", "=", "dump_options_header", "(", "self", ".", "mimetype", ",", "d", ")", "d", "=", "parse_options_header", "(", "self", ".", "headers", ".", "get", "(", "\"content-type\"", ",", "\"\"", ")", ")", "[", "1", "]", "return", "CallbackDict", "(", "d", ",", "on_update", ")" ]
The mimetype parameters as dict. For example if the content type is ``text/html; charset=utf-8`` the params would be ``{'charset': 'utf-8'}``. .. versionadded:: 0.5
[ "The", "mimetype", "parameters", "as", "dict", ".", "For", "example", "if", "the", "content", "type", "is", "text", "/", "html", ";", "charset", "=", "utf", "-", "8", "the", "params", "would", "be", "{", "charset", ":", "utf", "-", "8", "}", "." ]
python
train
34.769231
saltstack/salt
salt/modules/solr.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/solr.py#L902-L945
def set_is_polling(polling, host=None, core_name=None): ''' SLAVE CALL Prevent the slaves from polling the master for updates. polling : boolean True will enable polling. False will disable it. host : str (None) The solr host to query. __opts__['host'] is default. core_name : str (None) The name of the solr core if using cores. Leave this blank if you are not using cores or if you want to check all cores. Return : dict<str,obj>:: {'success':boolean, 'data':dict, 'errors':list, 'warnings':list} CLI Example: .. code-block:: bash salt '*' solr.set_is_polling False ''' ret = _get_return_dict() # since only slaves can call this let's check the config: if _is_master() and _get_none_or_value(host) is None: err = ['solr.set_is_polling can only be called by "slave" minions'] return ret.update({'success': False, 'errors': err}) cmd = "enablepoll" if polling else "disapblepoll" if _get_none_or_value(core_name) is None and _check_for_cores(): success = True for name in __opts__['solr.cores']: resp = set_is_polling(cmd, host=host, core_name=name) if not resp['success']: success = False data = {name: {'data': resp['data']}} ret = _update_return_dict(ret, success, data, resp['errors'], resp['warnings']) return ret else: resp = _replication_request(cmd, host=host, core_name=core_name) return resp
[ "def", "set_is_polling", "(", "polling", ",", "host", "=", "None", ",", "core_name", "=", "None", ")", ":", "ret", "=", "_get_return_dict", "(", ")", "# since only slaves can call this let's check the config:", "if", "_is_master", "(", ")", "and", "_get_none_or_value", "(", "host", ")", "is", "None", ":", "err", "=", "[", "'solr.set_is_polling can only be called by \"slave\" minions'", "]", "return", "ret", ".", "update", "(", "{", "'success'", ":", "False", ",", "'errors'", ":", "err", "}", ")", "cmd", "=", "\"enablepoll\"", "if", "polling", "else", "\"disapblepoll\"", "if", "_get_none_or_value", "(", "core_name", ")", "is", "None", "and", "_check_for_cores", "(", ")", ":", "success", "=", "True", "for", "name", "in", "__opts__", "[", "'solr.cores'", "]", ":", "resp", "=", "set_is_polling", "(", "cmd", ",", "host", "=", "host", ",", "core_name", "=", "name", ")", "if", "not", "resp", "[", "'success'", "]", ":", "success", "=", "False", "data", "=", "{", "name", ":", "{", "'data'", ":", "resp", "[", "'data'", "]", "}", "}", "ret", "=", "_update_return_dict", "(", "ret", ",", "success", ",", "data", ",", "resp", "[", "'errors'", "]", ",", "resp", "[", "'warnings'", "]", ")", "return", "ret", "else", ":", "resp", "=", "_replication_request", "(", "cmd", ",", "host", "=", "host", ",", "core_name", "=", "core_name", ")", "return", "resp" ]
SLAVE CALL Prevent the slaves from polling the master for updates. polling : boolean True will enable polling. False will disable it. host : str (None) The solr host to query. __opts__['host'] is default. core_name : str (None) The name of the solr core if using cores. Leave this blank if you are not using cores or if you want to check all cores. Return : dict<str,obj>:: {'success':boolean, 'data':dict, 'errors':list, 'warnings':list} CLI Example: .. code-block:: bash salt '*' solr.set_is_polling False
[ "SLAVE", "CALL", "Prevent", "the", "slaves", "from", "polling", "the", "master", "for", "updates", "." ]
python
train
34.954545
proycon/clam
clam/common/data.py
https://github.com/proycon/clam/blob/09d15cfc26d7cbe0f5976cdd5424dc446d10dbf3/clam/common/data.py#L1881-L1887
def getparent(self, profile): """Resolve a parent ID""" assert self.parent for inputtemplate in profile.input: if inputtemplate == self.parent: return inputtemplate raise Exception("Parent InputTemplate '"+self.parent+"' not found!")
[ "def", "getparent", "(", "self", ",", "profile", ")", ":", "assert", "self", ".", "parent", "for", "inputtemplate", "in", "profile", ".", "input", ":", "if", "inputtemplate", "==", "self", ".", "parent", ":", "return", "inputtemplate", "raise", "Exception", "(", "\"Parent InputTemplate '\"", "+", "self", ".", "parent", "+", "\"' not found!\"", ")" ]
Resolve a parent ID
[ "Resolve", "a", "parent", "ID" ]
python
train
41
ml4ai/delphi
delphi/GrFN/networks.py
https://github.com/ml4ai/delphi/blob/6d03d8aafeab99610387c51b89c99738ff2abbe3/delphi/GrFN/networks.py#L641-L651
def to_call_agraph(self): """ Build a PyGraphviz AGraph object corresponding to a call graph of functions. """ A = nx.nx_agraph.to_agraph(self.call_graph) A.graph_attr.update({"dpi": 227, "fontsize": 20, "fontname": "Menlo"}) A.node_attr.update( {"shape": "rectangle", "color": "#650021", "style": "rounded"} ) A.edge_attr.update({"color": "#650021", "arrowsize": 0.5}) return A
[ "def", "to_call_agraph", "(", "self", ")", ":", "A", "=", "nx", ".", "nx_agraph", ".", "to_agraph", "(", "self", ".", "call_graph", ")", "A", ".", "graph_attr", ".", "update", "(", "{", "\"dpi\"", ":", "227", ",", "\"fontsize\"", ":", "20", ",", "\"fontname\"", ":", "\"Menlo\"", "}", ")", "A", ".", "node_attr", ".", "update", "(", "{", "\"shape\"", ":", "\"rectangle\"", ",", "\"color\"", ":", "\"#650021\"", ",", "\"style\"", ":", "\"rounded\"", "}", ")", "A", ".", "edge_attr", ".", "update", "(", "{", "\"color\"", ":", "\"#650021\"", ",", "\"arrowsize\"", ":", "0.5", "}", ")", "return", "A" ]
Build a PyGraphviz AGraph object corresponding to a call graph of functions.
[ "Build", "a", "PyGraphviz", "AGraph", "object", "corresponding", "to", "a", "call", "graph", "of", "functions", "." ]
python
train
40.454545
Yubico/python-pyhsm
pyhsm/soft_hsm.py
https://github.com/Yubico/python-pyhsm/blob/b6e2744d1ea15c352a0fc1d6ebc5950026b71311/pyhsm/soft_hsm.py#L95-L129
def aesCCM(key, key_handle, nonce, data, decrypt=False): """ Function implementing YubiHSM AEAD encrypt/decrypt in software. """ if decrypt: (data, saved_mac) = _split_data(data, len(data) - pyhsm.defines.YSM_AEAD_MAC_SIZE) nonce = pyhsm.util.input_validate_nonce(nonce, pad = True) mac = _cbc_mac(key, key_handle, nonce, len(data)) counter = _ctr_counter(key_handle, nonce, value = 0) ctr_aes = AES.new(key, AES.MODE_CTR, counter = counter.next) out = [] while data: (thisblock, data) = _split_data(data, pyhsm.defines.YSM_BLOCK_SIZE) # encrypt/decrypt and CBC MAC if decrypt: aes_out = ctr_aes.decrypt(thisblock) mac.update(aes_out) else: mac.update(thisblock) aes_out = ctr_aes.encrypt(thisblock) out.append(aes_out) # Finalize MAC counter.value = 0 mac.finalize(counter.pack()) if decrypt: if mac.get() != saved_mac: raise pyhsm.exception.YHSM_Error('AEAD integrity check failed') else: out.append(mac.get()) return ''.join(out)
[ "def", "aesCCM", "(", "key", ",", "key_handle", ",", "nonce", ",", "data", ",", "decrypt", "=", "False", ")", ":", "if", "decrypt", ":", "(", "data", ",", "saved_mac", ")", "=", "_split_data", "(", "data", ",", "len", "(", "data", ")", "-", "pyhsm", ".", "defines", ".", "YSM_AEAD_MAC_SIZE", ")", "nonce", "=", "pyhsm", ".", "util", ".", "input_validate_nonce", "(", "nonce", ",", "pad", "=", "True", ")", "mac", "=", "_cbc_mac", "(", "key", ",", "key_handle", ",", "nonce", ",", "len", "(", "data", ")", ")", "counter", "=", "_ctr_counter", "(", "key_handle", ",", "nonce", ",", "value", "=", "0", ")", "ctr_aes", "=", "AES", ".", "new", "(", "key", ",", "AES", ".", "MODE_CTR", ",", "counter", "=", "counter", ".", "next", ")", "out", "=", "[", "]", "while", "data", ":", "(", "thisblock", ",", "data", ")", "=", "_split_data", "(", "data", ",", "pyhsm", ".", "defines", ".", "YSM_BLOCK_SIZE", ")", "# encrypt/decrypt and CBC MAC", "if", "decrypt", ":", "aes_out", "=", "ctr_aes", ".", "decrypt", "(", "thisblock", ")", "mac", ".", "update", "(", "aes_out", ")", "else", ":", "mac", ".", "update", "(", "thisblock", ")", "aes_out", "=", "ctr_aes", ".", "encrypt", "(", "thisblock", ")", "out", ".", "append", "(", "aes_out", ")", "# Finalize MAC", "counter", ".", "value", "=", "0", "mac", ".", "finalize", "(", "counter", ".", "pack", "(", ")", ")", "if", "decrypt", ":", "if", "mac", ".", "get", "(", ")", "!=", "saved_mac", ":", "raise", "pyhsm", ".", "exception", ".", "YHSM_Error", "(", "'AEAD integrity check failed'", ")", "else", ":", "out", ".", "append", "(", "mac", ".", "get", "(", ")", ")", "return", "''", ".", "join", "(", "out", ")" ]
Function implementing YubiHSM AEAD encrypt/decrypt in software.
[ "Function", "implementing", "YubiHSM", "AEAD", "encrypt", "/", "decrypt", "in", "software", "." ]
python
train
31.171429
UCL-INGI/INGInious
inginious/frontend/pages/course_admin/danger_zone.py
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/frontend/pages/course_admin/danger_zone.py#L67-L91
def restore_course(self, courseid, backup): """ Restores a course of given courseid to a date specified in backup (format : YYYYMMDD.HHMMSS) """ self.wipe_course(courseid) filepath = os.path.join(self.backup_dir, courseid, backup + ".zip") with zipfile.ZipFile(filepath, "r") as zipf: aggregations = bson.json_util.loads(zipf.read("aggregations.json").decode("utf-8")) if len(aggregations) > 0: self.database.aggregations.insert(aggregations) user_tasks = bson.json_util.loads(zipf.read("user_tasks.json").decode("utf-8")) if len(user_tasks) > 0: self.database.user_tasks.insert(user_tasks) submissions = bson.json_util.loads(zipf.read("submissions.json").decode("utf-8")) for submission in submissions: for key in ["input", "archive"]: if key in submission and type(submission[key]) == bson.objectid.ObjectId: submission[key] = self.submission_manager.get_gridfs().put(zipf.read(key + "/" + str(submission[key]) + ".data")) if len(submissions) > 0: self.database.submissions.insert(submissions) self._logger.info("Course %s restored from backup directory.", courseid)
[ "def", "restore_course", "(", "self", ",", "courseid", ",", "backup", ")", ":", "self", ".", "wipe_course", "(", "courseid", ")", "filepath", "=", "os", ".", "path", ".", "join", "(", "self", ".", "backup_dir", ",", "courseid", ",", "backup", "+", "\".zip\"", ")", "with", "zipfile", ".", "ZipFile", "(", "filepath", ",", "\"r\"", ")", "as", "zipf", ":", "aggregations", "=", "bson", ".", "json_util", ".", "loads", "(", "zipf", ".", "read", "(", "\"aggregations.json\"", ")", ".", "decode", "(", "\"utf-8\"", ")", ")", "if", "len", "(", "aggregations", ")", ">", "0", ":", "self", ".", "database", ".", "aggregations", ".", "insert", "(", "aggregations", ")", "user_tasks", "=", "bson", ".", "json_util", ".", "loads", "(", "zipf", ".", "read", "(", "\"user_tasks.json\"", ")", ".", "decode", "(", "\"utf-8\"", ")", ")", "if", "len", "(", "user_tasks", ")", ">", "0", ":", "self", ".", "database", ".", "user_tasks", ".", "insert", "(", "user_tasks", ")", "submissions", "=", "bson", ".", "json_util", ".", "loads", "(", "zipf", ".", "read", "(", "\"submissions.json\"", ")", ".", "decode", "(", "\"utf-8\"", ")", ")", "for", "submission", "in", "submissions", ":", "for", "key", "in", "[", "\"input\"", ",", "\"archive\"", "]", ":", "if", "key", "in", "submission", "and", "type", "(", "submission", "[", "key", "]", ")", "==", "bson", ".", "objectid", ".", "ObjectId", ":", "submission", "[", "key", "]", "=", "self", ".", "submission_manager", ".", "get_gridfs", "(", ")", ".", "put", "(", "zipf", ".", "read", "(", "key", "+", "\"/\"", "+", "str", "(", "submission", "[", "key", "]", ")", "+", "\".data\"", ")", ")", "if", "len", "(", "submissions", ")", ">", "0", ":", "self", ".", "database", ".", "submissions", ".", "insert", "(", "submissions", ")", "self", ".", "_logger", ".", "info", "(", "\"Course %s restored from backup directory.\"", ",", "courseid", ")" ]
Restores a course of given courseid to a date specified in backup (format : YYYYMMDD.HHMMSS)
[ "Restores", "a", "course", "of", "given", "courseid", "to", "a", "date", "specified", "in", "backup", "(", "format", ":", "YYYYMMDD", ".", "HHMMSS", ")" ]
python
train
51.28
inveniosoftware/invenio-oaiserver
invenio_oaiserver/percolator.py
https://github.com/inveniosoftware/invenio-oaiserver/blob/eae765e32bd816ddc5612d4b281caf205518b512/invenio_oaiserver/percolator.py#L78-L91
def _new_percolator(spec, search_pattern): """Create new percolator associated with the new set.""" if spec and search_pattern: query = query_string_parser(search_pattern=search_pattern).to_dict() for index in current_search.mappings.keys(): # Create the percolator doc_type in the existing index for >= ES5 # TODO: Consider doing this only once in app initialization percolator_doc_type = _get_percolator_doc_type(index) _create_percolator_mapping(index, percolator_doc_type) current_search_client.index( index=index, doc_type=percolator_doc_type, id='oaiset-{}'.format(spec), body={'query': query} )
[ "def", "_new_percolator", "(", "spec", ",", "search_pattern", ")", ":", "if", "spec", "and", "search_pattern", ":", "query", "=", "query_string_parser", "(", "search_pattern", "=", "search_pattern", ")", ".", "to_dict", "(", ")", "for", "index", "in", "current_search", ".", "mappings", ".", "keys", "(", ")", ":", "# Create the percolator doc_type in the existing index for >= ES5", "# TODO: Consider doing this only once in app initialization", "percolator_doc_type", "=", "_get_percolator_doc_type", "(", "index", ")", "_create_percolator_mapping", "(", "index", ",", "percolator_doc_type", ")", "current_search_client", ".", "index", "(", "index", "=", "index", ",", "doc_type", "=", "percolator_doc_type", ",", "id", "=", "'oaiset-{}'", ".", "format", "(", "spec", ")", ",", "body", "=", "{", "'query'", ":", "query", "}", ")" ]
Create new percolator associated with the new set.
[ "Create", "new", "percolator", "associated", "with", "the", "new", "set", "." ]
python
train
52.285714
google/grr
api_client/python/grr_api_client/hunt.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/api_client/python/grr_api_client/hunt.py#L342-L347
def ListHunts(context=None): """List all GRR hunts.""" items = context.SendIteratorRequest("ListHunts", hunt_pb2.ApiListHuntsArgs()) return utils.MapItemsIterator(lambda data: Hunt(data=data, context=context), items)
[ "def", "ListHunts", "(", "context", "=", "None", ")", ":", "items", "=", "context", ".", "SendIteratorRequest", "(", "\"ListHunts\"", ",", "hunt_pb2", ".", "ApiListHuntsArgs", "(", ")", ")", "return", "utils", ".", "MapItemsIterator", "(", "lambda", "data", ":", "Hunt", "(", "data", "=", "data", ",", "context", "=", "context", ")", ",", "items", ")" ]
List all GRR hunts.
[ "List", "all", "GRR", "hunts", "." ]
python
train
41.666667
moderngl/moderngl
moderngl/context.py
https://github.com/moderngl/moderngl/blob/a8f5dce8dc72ae84a2f9523887fb5f6b620049b9/moderngl/context.py#L955-L979
def depth_renderbuffer(self, size, *, samples=0) -> 'Renderbuffer': ''' :py:class:`Renderbuffer` objects are OpenGL objects that contain images. They are created and used specifically with :py:class:`Framebuffer` objects. Args: size (tuple): The width and height of the renderbuffer. Keyword Args: samples (int): The number of samples. Value 0 means no multisample format. Returns: :py:class:`Renderbuffer` object ''' res = Renderbuffer.__new__(Renderbuffer) res.mglo, res._glo = self.mglo.depth_renderbuffer(size, samples) res._size = size res._components = 1 res._samples = samples res._dtype = 'f4' res._depth = True res.ctx = self res.extra = None return res
[ "def", "depth_renderbuffer", "(", "self", ",", "size", ",", "*", ",", "samples", "=", "0", ")", "->", "'Renderbuffer'", ":", "res", "=", "Renderbuffer", ".", "__new__", "(", "Renderbuffer", ")", "res", ".", "mglo", ",", "res", ".", "_glo", "=", "self", ".", "mglo", ".", "depth_renderbuffer", "(", "size", ",", "samples", ")", "res", ".", "_size", "=", "size", "res", ".", "_components", "=", "1", "res", ".", "_samples", "=", "samples", "res", ".", "_dtype", "=", "'f4'", "res", ".", "_depth", "=", "True", "res", ".", "ctx", "=", "self", "res", ".", "extra", "=", "None", "return", "res" ]
:py:class:`Renderbuffer` objects are OpenGL objects that contain images. They are created and used specifically with :py:class:`Framebuffer` objects. Args: size (tuple): The width and height of the renderbuffer. Keyword Args: samples (int): The number of samples. Value 0 means no multisample format. Returns: :py:class:`Renderbuffer` object
[ ":", "py", ":", "class", ":", "Renderbuffer", "objects", "are", "OpenGL", "objects", "that", "contain", "images", ".", "They", "are", "created", "and", "used", "specifically", "with", ":", "py", ":", "class", ":", "Framebuffer", "objects", "." ]
python
train
33.84
Esri/ArcREST
src/arcrest/ags/_uploads.py
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/ags/_uploads.py#L109-L129
def download(self, itemID, savePath): """ downloads an item to local disk Inputs: itemID - unique id of item to download savePath - folder to save the file in """ if os.path.isdir(savePath) == False: os.makedirs(savePath) url = self._url + "/%s/download" % itemID params = { } if len(params.keys()): url = url + "?%s" % urlencode(params) return self._get(url=url, param_dict=params, out_folder=savePath, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)
[ "def", "download", "(", "self", ",", "itemID", ",", "savePath", ")", ":", "if", "os", ".", "path", ".", "isdir", "(", "savePath", ")", "==", "False", ":", "os", ".", "makedirs", "(", "savePath", ")", "url", "=", "self", ".", "_url", "+", "\"/%s/download\"", "%", "itemID", "params", "=", "{", "}", "if", "len", "(", "params", ".", "keys", "(", ")", ")", ":", "url", "=", "url", "+", "\"?%s\"", "%", "urlencode", "(", "params", ")", "return", "self", ".", "_get", "(", "url", "=", "url", ",", "param_dict", "=", "params", ",", "out_folder", "=", "savePath", ",", "securityHandler", "=", "self", ".", "_securityHandler", ",", "proxy_url", "=", "self", ".", "_proxy_url", ",", "proxy_port", "=", "self", ".", "_proxy_port", ")" ]
downloads an item to local disk Inputs: itemID - unique id of item to download savePath - folder to save the file in
[ "downloads", "an", "item", "to", "local", "disk" ]
python
train
34.809524
GNS3/gns3-server
gns3server/compute/vmware/vmware_vm.py
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/vmware/vmware_vm.py#L313-L320
def _get_vnet(self, adapter_number): """ Return the vnet will use in ubridge """ vnet = "ethernet{}.vnet".format(adapter_number) if vnet not in self._vmx_pairs: raise VMwareError("vnet {} not in VMX file".format(vnet)) return vnet
[ "def", "_get_vnet", "(", "self", ",", "adapter_number", ")", ":", "vnet", "=", "\"ethernet{}.vnet\"", ".", "format", "(", "adapter_number", ")", "if", "vnet", "not", "in", "self", ".", "_vmx_pairs", ":", "raise", "VMwareError", "(", "\"vnet {} not in VMX file\"", ".", "format", "(", "vnet", ")", ")", "return", "vnet" ]
Return the vnet will use in ubridge
[ "Return", "the", "vnet", "will", "use", "in", "ubridge" ]
python
train
35.375
srittau/python-asserts
asserts/__init__.py
https://github.com/srittau/python-asserts/blob/1d5c797031c68ee27552d1c94e7f918c3d3d0453/asserts/__init__.py#L99-L115
def assert_boolean_false(expr, msg_fmt="{msg}"): """Fail the test unless the expression is the constant False. >>> assert_boolean_false(False) >>> assert_boolean_false(0) Traceback (most recent call last): ... AssertionError: 0 is not False The following msg_fmt arguments are supported: * msg - the default error message * expr - tested expression """ if expr is not False: msg = "{!r} is not False".format(expr) fail(msg_fmt.format(msg=msg, expr=expr))
[ "def", "assert_boolean_false", "(", "expr", ",", "msg_fmt", "=", "\"{msg}\"", ")", ":", "if", "expr", "is", "not", "False", ":", "msg", "=", "\"{!r} is not False\"", ".", "format", "(", "expr", ")", "fail", "(", "msg_fmt", ".", "format", "(", "msg", "=", "msg", ",", "expr", "=", "expr", ")", ")" ]
Fail the test unless the expression is the constant False. >>> assert_boolean_false(False) >>> assert_boolean_false(0) Traceback (most recent call last): ... AssertionError: 0 is not False The following msg_fmt arguments are supported: * msg - the default error message * expr - tested expression
[ "Fail", "the", "test", "unless", "the", "expression", "is", "the", "constant", "False", "." ]
python
train
29.705882
mardix/Juice
juice/decorators.py
https://github.com/mardix/Juice/blob/7afa8d4238868235dfcdae82272bd77958dd416a/juice/decorators.py#L127-L190
def template(page=None, layout=None, **kwargs): """ Decorator to change the view template and layout. It works on both View class and view methods on class only $layout is applied, everything else will be passed to the kwargs Using as first argument, it will be the layout. :first arg or $layout: The layout to use for that view :param layout: The layout to use for that view :param kwargs: get pass to the TEMPLATE_CONTEXT ** on method that return a dict page or layout are optional :param page: The html page :param layout: The layout to use for that view :param kwargs: get pass to the view as k/V ** on other methods that return other type, it doesn't apply :return: """ pkey = "_template_extends__" def decorator(f): if inspect.isclass(f): layout_ = layout or page extends = kwargs.pop("extends", None) if extends and hasattr(extends, pkey): items = getattr(extends, pkey).items() if "layout" in items: layout_ = items.pop("layout") for k, v in items: kwargs.setdefault(k, v) if not layout_: layout_ = "layout.html" kwargs.setdefault("brand_name", "") kwargs["layout"] = layout_ setattr(f, pkey, kwargs) setattr(f, "base_layout", kwargs.get("layout")) f.g(TEMPLATE_CONTEXT=kwargs) return f else: @functools.wraps(f) def wrap(*args2, **kwargs2): response = f(*args2, **kwargs2) if isinstance(response, dict) or response is None: response = response or {} if page: response.setdefault("template_", page) if layout: response.setdefault("layout_", layout) for k, v in kwargs.items(): response.setdefault(k, v) return response return wrap return decorator
[ "def", "template", "(", "page", "=", "None", ",", "layout", "=", "None", ",", "*", "*", "kwargs", ")", ":", "pkey", "=", "\"_template_extends__\"", "def", "decorator", "(", "f", ")", ":", "if", "inspect", ".", "isclass", "(", "f", ")", ":", "layout_", "=", "layout", "or", "page", "extends", "=", "kwargs", ".", "pop", "(", "\"extends\"", ",", "None", ")", "if", "extends", "and", "hasattr", "(", "extends", ",", "pkey", ")", ":", "items", "=", "getattr", "(", "extends", ",", "pkey", ")", ".", "items", "(", ")", "if", "\"layout\"", "in", "items", ":", "layout_", "=", "items", ".", "pop", "(", "\"layout\"", ")", "for", "k", ",", "v", "in", "items", ":", "kwargs", ".", "setdefault", "(", "k", ",", "v", ")", "if", "not", "layout_", ":", "layout_", "=", "\"layout.html\"", "kwargs", ".", "setdefault", "(", "\"brand_name\"", ",", "\"\"", ")", "kwargs", "[", "\"layout\"", "]", "=", "layout_", "setattr", "(", "f", ",", "pkey", ",", "kwargs", ")", "setattr", "(", "f", ",", "\"base_layout\"", ",", "kwargs", ".", "get", "(", "\"layout\"", ")", ")", "f", ".", "g", "(", "TEMPLATE_CONTEXT", "=", "kwargs", ")", "return", "f", "else", ":", "@", "functools", ".", "wraps", "(", "f", ")", "def", "wrap", "(", "*", "args2", ",", "*", "*", "kwargs2", ")", ":", "response", "=", "f", "(", "*", "args2", ",", "*", "*", "kwargs2", ")", "if", "isinstance", "(", "response", ",", "dict", ")", "or", "response", "is", "None", ":", "response", "=", "response", "or", "{", "}", "if", "page", ":", "response", ".", "setdefault", "(", "\"template_\"", ",", "page", ")", "if", "layout", ":", "response", ".", "setdefault", "(", "\"layout_\"", ",", "layout", ")", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", ":", "response", ".", "setdefault", "(", "k", ",", "v", ")", "return", "response", "return", "wrap", "return", "decorator" ]
Decorator to change the view template and layout. It works on both View class and view methods on class only $layout is applied, everything else will be passed to the kwargs Using as first argument, it will be the layout. :first arg or $layout: The layout to use for that view :param layout: The layout to use for that view :param kwargs: get pass to the TEMPLATE_CONTEXT ** on method that return a dict page or layout are optional :param page: The html page :param layout: The layout to use for that view :param kwargs: get pass to the view as k/V ** on other methods that return other type, it doesn't apply :return:
[ "Decorator", "to", "change", "the", "view", "template", "and", "layout", "." ]
python
train
33.09375
RudolfCardinal/pythonlib
cardinal_pythonlib/rnc_web.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/rnc_web.py#L637-L668
def make_urls_hyperlinks(text: str) -> str: """ Adds hyperlinks to text that appears to contain URLs. See - http://stackoverflow.com/questions/1071191 - ... except that double-replaces everything; e.g. try with ``text = "[email protected] [email protected]"`` - http://stackp.online.fr/?p=19 """ find_url = r''' (?x)( # verbose identify URLs within text (http|ftp|gopher) # make sure we find a resource type :// # ...needs to be followed by colon-slash-slash (\w+[:.]?){2,} # at least two domain groups, e.g. (gnosis.)(cx) (/?| # could be just the domain name (maybe w/ slash) [^ \n\r"]+ # or stuff then space, newline, tab, quote [\w/]) # resource name ends in alphanumeric or slash (?=[\s\.,>)'"\]]) # assert: followed by white or clause ending ) # end of match group ''' replace_url = r'<a href="\1">\1</a>' find_email = re.compile(r'([.\w\-]+@(\w[\w\-]+\.)+[\w\-]+)') # '.' doesn't need escaping inside square brackets # https://stackoverflow.com/questions/10397968/escape-dot-in-a-regex-range replace_email = r'<a href="mailto:\1">\1</a>' text = re.sub(find_url, replace_url, text) text = re.sub(find_email, replace_email, text) return text
[ "def", "make_urls_hyperlinks", "(", "text", ":", "str", ")", "->", "str", ":", "find_url", "=", "r'''\n (?x)( # verbose identify URLs within text\n (http|ftp|gopher) # make sure we find a resource type\n :// # ...needs to be followed by colon-slash-slash\n (\\w+[:.]?){2,} # at least two domain groups, e.g. (gnosis.)(cx)\n (/?| # could be just the domain name (maybe w/ slash)\n [^ \\n\\r\"]+ # or stuff then space, newline, tab, quote\n [\\w/]) # resource name ends in alphanumeric or slash\n (?=[\\s\\.,>)'\"\\]]) # assert: followed by white or clause ending\n ) # end of match group\n '''", "replace_url", "=", "r'<a href=\"\\1\">\\1</a>'", "find_email", "=", "re", ".", "compile", "(", "r'([.\\w\\-]+@(\\w[\\w\\-]+\\.)+[\\w\\-]+)'", ")", "# '.' doesn't need escaping inside square brackets", "# https://stackoverflow.com/questions/10397968/escape-dot-in-a-regex-range", "replace_email", "=", "r'<a href=\"mailto:\\1\">\\1</a>'", "text", "=", "re", ".", "sub", "(", "find_url", ",", "replace_url", ",", "text", ")", "text", "=", "re", ".", "sub", "(", "find_email", ",", "replace_email", ",", "text", ")", "return", "text" ]
Adds hyperlinks to text that appears to contain URLs. See - http://stackoverflow.com/questions/1071191 - ... except that double-replaces everything; e.g. try with ``text = "[email protected] [email protected]"`` - http://stackp.online.fr/?p=19
[ "Adds", "hyperlinks", "to", "text", "that", "appears", "to", "contain", "URLs", "." ]
python
train
42.21875
creare-com/pydem
pydem/processing_manager.py
https://github.com/creare-com/pydem/blob/c2fc8d84cfb411df84f71a6dec9edc4b544f710a/pydem/processing_manager.py#L487-L492
def update_edge_todo(self, elev_fn, dem_proc): """ Can figure out how to update the todo based on the elev filename """ for key in self.edges[elev_fn].keys(): self.edges[elev_fn][key].set_data('todo', data=dem_proc.edge_todo)
[ "def", "update_edge_todo", "(", "self", ",", "elev_fn", ",", "dem_proc", ")", ":", "for", "key", "in", "self", ".", "edges", "[", "elev_fn", "]", ".", "keys", "(", ")", ":", "self", ".", "edges", "[", "elev_fn", "]", "[", "key", "]", ".", "set_data", "(", "'todo'", ",", "data", "=", "dem_proc", ".", "edge_todo", ")" ]
Can figure out how to update the todo based on the elev filename
[ "Can", "figure", "out", "how", "to", "update", "the", "todo", "based", "on", "the", "elev", "filename" ]
python
train
44
pytroll/satpy
satpy/readers/goes_imager_nc.py
https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/readers/goes_imager_nc.py#L620-L634
def _get_sector(self, channel, nlines, ncols): """Determine which sector was scanned""" if self._is_vis(channel): margin = 100 sectors_ref = self.vis_sectors else: margin = 50 sectors_ref = self.ir_sectors for (nlines_ref, ncols_ref), sector in sectors_ref.items(): if np.fabs(ncols - ncols_ref) < margin and \ np.fabs(nlines - nlines_ref) < margin: return sector return UNKNOWN_SECTOR
[ "def", "_get_sector", "(", "self", ",", "channel", ",", "nlines", ",", "ncols", ")", ":", "if", "self", ".", "_is_vis", "(", "channel", ")", ":", "margin", "=", "100", "sectors_ref", "=", "self", ".", "vis_sectors", "else", ":", "margin", "=", "50", "sectors_ref", "=", "self", ".", "ir_sectors", "for", "(", "nlines_ref", ",", "ncols_ref", ")", ",", "sector", "in", "sectors_ref", ".", "items", "(", ")", ":", "if", "np", ".", "fabs", "(", "ncols", "-", "ncols_ref", ")", "<", "margin", "and", "np", ".", "fabs", "(", "nlines", "-", "nlines_ref", ")", "<", "margin", ":", "return", "sector", "return", "UNKNOWN_SECTOR" ]
Determine which sector was scanned
[ "Determine", "which", "sector", "was", "scanned" ]
python
train
33.933333
ployground/bsdploy
bsdploy/bootstrap_utils.py
https://github.com/ployground/bsdploy/blob/096d63b316264931627bed1f8ca8abf7eb517352/bsdploy/bootstrap_utils.py#L158-L266
def bootstrap_files(self): """ we need some files to bootstrap the FreeBSD installation. Some... - need to be provided by the user (i.e. authorized_keys) - others have some (sensible) defaults (i.e. rc.conf) - some can be downloaded via URL (i.e.) http://pkg.freebsd.org/freebsd:10:x86:64/latest/Latest/pkg.txz For those which can be downloaded we check the downloads directory. if the file exists there (and if the checksum matches TODO!) we will upload it to the host. If not, we will fetch the file from the given URL from the host. For files that cannot be downloaded (authorized_keys, rc.conf etc.) we allow the user to provide their own version in a ``bootstrap-files`` folder. The location of this folder can either be explicitly provided via the ``bootstrap-files`` key in the host definition of the config file or it defaults to ``deployment/bootstrap-files``. User provided files can be rendered as Jinja2 templates, by providing ``use_jinja: True`` in the YAML file. They will be rendered with the instance configuration dictionary as context. If the file is not found there, we revert to the default files that are part of bsdploy. If the file cannot be found there either we either error out or for authorized_keys we look in ``~/.ssh/identity.pub``. """ bootstrap_file_yamls = [ abspath(join(self.default_template_path, self.bootstrap_files_yaml)), abspath(join(self.custom_template_path, self.bootstrap_files_yaml))] bootstrap_files = dict() if self.upload_authorized_keys: bootstrap_files['authorized_keys'] = BootstrapFile(self, 'authorized_keys', **{ 'directory': '/mnt/root/.ssh', 'directory_mode': '0600', 'remote': '/mnt/root/.ssh/authorized_keys', 'fallback': [ '~/.ssh/identity.pub', '~/.ssh/id_rsa.pub', '~/.ssh/id_dsa.pub', '~/.ssh/id_ecdsa.pub']}) for bootstrap_file_yaml in bootstrap_file_yamls: if not exists(bootstrap_file_yaml): continue with open(bootstrap_file_yaml) as f: info = yaml.load(f, Loader=SafeLoader) if info is None: continue for k, v in info.items(): bootstrap_files[k] = BootstrapFile(self, k, **v) for bf in bootstrap_files.values(): if not exists(bf.local) and bf.raw_fallback: if not bf.existing_fallback: print("Found no public key in %s, you have to create '%s' manually" % (expanduser('~/.ssh'), bf.local)) sys.exit(1) print("The '%s' file is missing." % bf.local) for path in bf.existing_fallback: yes = env.instance.config.get('bootstrap-yes', False) if yes or yesno("Should we generate it using the key in '%s'?" % path): if not exists(bf.expected_path): os.mkdir(bf.expected_path) with open(bf.local, 'wb') as out: with open(path, 'rb') as f: out.write(f.read()) break else: # answered no to all options sys.exit(1) if not bf.check(): print('Cannot find %s' % bf.local) sys.exit(1) packages_path = join(self.download_path, 'packages') if exists(packages_path): for dirpath, dirnames, filenames in os.walk(packages_path): path = dirpath.split(packages_path)[1][1:] for filename in filenames: if not filename.endswith('.txz'): continue bootstrap_files[join(path, filename)] = BootstrapFile( self, join(path, filename), **dict( local=join(packages_path, join(path, filename)), remote=join('/mnt/var/cache/pkg/All', filename), encrypted=False)) if self.ssh_keys is not None: for ssh_key_name, ssh_key_options in list(self.ssh_keys): ssh_key = join(self.custom_template_path, ssh_key_name) if exists(ssh_key): pub_key_name = '%s.pub' % ssh_key_name pub_key = '%s.pub' % ssh_key if not exists(pub_key): print("Public key '%s' for '%s' missing." % (pub_key, ssh_key)) sys.exit(1) bootstrap_files[ssh_key_name] = BootstrapFile( self, ssh_key_name, **dict( local=ssh_key, remote='/mnt/etc/ssh/%s' % ssh_key_name, mode=0600)) bootstrap_files[pub_key_name] = BootstrapFile( self, pub_key_name, **dict( local=pub_key, remote='/mnt/etc/ssh/%s' % pub_key_name, mode=0644)) if hasattr(env.instance, 'get_vault_lib'): vaultlib = env.instance.get_vault_lib() for bf in bootstrap_files.values(): if bf.encrypted is None and exists(bf.local): with open(bf.local) as f: data = f.read() bf.info['encrypted'] = vaultlib.is_encrypted(data) return bootstrap_files
[ "def", "bootstrap_files", "(", "self", ")", ":", "bootstrap_file_yamls", "=", "[", "abspath", "(", "join", "(", "self", ".", "default_template_path", ",", "self", ".", "bootstrap_files_yaml", ")", ")", ",", "abspath", "(", "join", "(", "self", ".", "custom_template_path", ",", "self", ".", "bootstrap_files_yaml", ")", ")", "]", "bootstrap_files", "=", "dict", "(", ")", "if", "self", ".", "upload_authorized_keys", ":", "bootstrap_files", "[", "'authorized_keys'", "]", "=", "BootstrapFile", "(", "self", ",", "'authorized_keys'", ",", "*", "*", "{", "'directory'", ":", "'/mnt/root/.ssh'", ",", "'directory_mode'", ":", "'0600'", ",", "'remote'", ":", "'/mnt/root/.ssh/authorized_keys'", ",", "'fallback'", ":", "[", "'~/.ssh/identity.pub'", ",", "'~/.ssh/id_rsa.pub'", ",", "'~/.ssh/id_dsa.pub'", ",", "'~/.ssh/id_ecdsa.pub'", "]", "}", ")", "for", "bootstrap_file_yaml", "in", "bootstrap_file_yamls", ":", "if", "not", "exists", "(", "bootstrap_file_yaml", ")", ":", "continue", "with", "open", "(", "bootstrap_file_yaml", ")", "as", "f", ":", "info", "=", "yaml", ".", "load", "(", "f", ",", "Loader", "=", "SafeLoader", ")", "if", "info", "is", "None", ":", "continue", "for", "k", ",", "v", "in", "info", ".", "items", "(", ")", ":", "bootstrap_files", "[", "k", "]", "=", "BootstrapFile", "(", "self", ",", "k", ",", "*", "*", "v", ")", "for", "bf", "in", "bootstrap_files", ".", "values", "(", ")", ":", "if", "not", "exists", "(", "bf", ".", "local", ")", "and", "bf", ".", "raw_fallback", ":", "if", "not", "bf", ".", "existing_fallback", ":", "print", "(", "\"Found no public key in %s, you have to create '%s' manually\"", "%", "(", "expanduser", "(", "'~/.ssh'", ")", ",", "bf", ".", "local", ")", ")", "sys", ".", "exit", "(", "1", ")", "print", "(", "\"The '%s' file is missing.\"", "%", "bf", ".", "local", ")", "for", "path", "in", "bf", ".", "existing_fallback", ":", "yes", "=", "env", ".", "instance", ".", "config", ".", "get", "(", "'bootstrap-yes'", ",", "False", ")", "if", "yes", "or", "yesno", "(", "\"Should we generate it using the key in '%s'?\"", "%", "path", ")", ":", "if", "not", "exists", "(", "bf", ".", "expected_path", ")", ":", "os", ".", "mkdir", "(", "bf", ".", "expected_path", ")", "with", "open", "(", "bf", ".", "local", ",", "'wb'", ")", "as", "out", ":", "with", "open", "(", "path", ",", "'rb'", ")", "as", "f", ":", "out", ".", "write", "(", "f", ".", "read", "(", ")", ")", "break", "else", ":", "# answered no to all options", "sys", ".", "exit", "(", "1", ")", "if", "not", "bf", ".", "check", "(", ")", ":", "print", "(", "'Cannot find %s'", "%", "bf", ".", "local", ")", "sys", ".", "exit", "(", "1", ")", "packages_path", "=", "join", "(", "self", ".", "download_path", ",", "'packages'", ")", "if", "exists", "(", "packages_path", ")", ":", "for", "dirpath", ",", "dirnames", ",", "filenames", "in", "os", ".", "walk", "(", "packages_path", ")", ":", "path", "=", "dirpath", ".", "split", "(", "packages_path", ")", "[", "1", "]", "[", "1", ":", "]", "for", "filename", "in", "filenames", ":", "if", "not", "filename", ".", "endswith", "(", "'.txz'", ")", ":", "continue", "bootstrap_files", "[", "join", "(", "path", ",", "filename", ")", "]", "=", "BootstrapFile", "(", "self", ",", "join", "(", "path", ",", "filename", ")", ",", "*", "*", "dict", "(", "local", "=", "join", "(", "packages_path", ",", "join", "(", "path", ",", "filename", ")", ")", ",", "remote", "=", "join", "(", "'/mnt/var/cache/pkg/All'", ",", "filename", ")", ",", "encrypted", "=", "False", ")", ")", "if", "self", ".", "ssh_keys", "is", "not", "None", ":", "for", "ssh_key_name", ",", "ssh_key_options", "in", "list", "(", "self", ".", "ssh_keys", ")", ":", "ssh_key", "=", "join", "(", "self", ".", "custom_template_path", ",", "ssh_key_name", ")", "if", "exists", "(", "ssh_key", ")", ":", "pub_key_name", "=", "'%s.pub'", "%", "ssh_key_name", "pub_key", "=", "'%s.pub'", "%", "ssh_key", "if", "not", "exists", "(", "pub_key", ")", ":", "print", "(", "\"Public key '%s' for '%s' missing.\"", "%", "(", "pub_key", ",", "ssh_key", ")", ")", "sys", ".", "exit", "(", "1", ")", "bootstrap_files", "[", "ssh_key_name", "]", "=", "BootstrapFile", "(", "self", ",", "ssh_key_name", ",", "*", "*", "dict", "(", "local", "=", "ssh_key", ",", "remote", "=", "'/mnt/etc/ssh/%s'", "%", "ssh_key_name", ",", "mode", "=", "0600", ")", ")", "bootstrap_files", "[", "pub_key_name", "]", "=", "BootstrapFile", "(", "self", ",", "pub_key_name", ",", "*", "*", "dict", "(", "local", "=", "pub_key", ",", "remote", "=", "'/mnt/etc/ssh/%s'", "%", "pub_key_name", ",", "mode", "=", "0644", ")", ")", "if", "hasattr", "(", "env", ".", "instance", ",", "'get_vault_lib'", ")", ":", "vaultlib", "=", "env", ".", "instance", ".", "get_vault_lib", "(", ")", "for", "bf", "in", "bootstrap_files", ".", "values", "(", ")", ":", "if", "bf", ".", "encrypted", "is", "None", "and", "exists", "(", "bf", ".", "local", ")", ":", "with", "open", "(", "bf", ".", "local", ")", "as", "f", ":", "data", "=", "f", ".", "read", "(", ")", "bf", ".", "info", "[", "'encrypted'", "]", "=", "vaultlib", ".", "is_encrypted", "(", "data", ")", "return", "bootstrap_files" ]
we need some files to bootstrap the FreeBSD installation. Some... - need to be provided by the user (i.e. authorized_keys) - others have some (sensible) defaults (i.e. rc.conf) - some can be downloaded via URL (i.e.) http://pkg.freebsd.org/freebsd:10:x86:64/latest/Latest/pkg.txz For those which can be downloaded we check the downloads directory. if the file exists there (and if the checksum matches TODO!) we will upload it to the host. If not, we will fetch the file from the given URL from the host. For files that cannot be downloaded (authorized_keys, rc.conf etc.) we allow the user to provide their own version in a ``bootstrap-files`` folder. The location of this folder can either be explicitly provided via the ``bootstrap-files`` key in the host definition of the config file or it defaults to ``deployment/bootstrap-files``. User provided files can be rendered as Jinja2 templates, by providing ``use_jinja: True`` in the YAML file. They will be rendered with the instance configuration dictionary as context. If the file is not found there, we revert to the default files that are part of bsdploy. If the file cannot be found there either we either error out or for authorized_keys we look in ``~/.ssh/identity.pub``.
[ "we", "need", "some", "files", "to", "bootstrap", "the", "FreeBSD", "installation", ".", "Some", "...", "-", "need", "to", "be", "provided", "by", "the", "user", "(", "i", ".", "e", ".", "authorized_keys", ")", "-", "others", "have", "some", "(", "sensible", ")", "defaults", "(", "i", ".", "e", ".", "rc", ".", "conf", ")", "-", "some", "can", "be", "downloaded", "via", "URL", "(", "i", ".", "e", ".", ")", "http", ":", "//", "pkg", ".", "freebsd", ".", "org", "/", "freebsd", ":", "10", ":", "x86", ":", "64", "/", "latest", "/", "Latest", "/", "pkg", ".", "txz" ]
python
train
51.770642
awslabs/sockeye
sockeye/encoder.py
https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/encoder.py#L937-L941
def get_rnn_cells(self) -> List[mx.rnn.BaseRNNCell]: """ Returns a list of RNNCells used by this encoder. """ return self.forward_rnn.get_rnn_cells() + self.reverse_rnn.get_rnn_cells()
[ "def", "get_rnn_cells", "(", "self", ")", "->", "List", "[", "mx", ".", "rnn", ".", "BaseRNNCell", "]", ":", "return", "self", ".", "forward_rnn", ".", "get_rnn_cells", "(", ")", "+", "self", ".", "reverse_rnn", ".", "get_rnn_cells", "(", ")" ]
Returns a list of RNNCells used by this encoder.
[ "Returns", "a", "list", "of", "RNNCells", "used", "by", "this", "encoder", "." ]
python
train
42.4
python-cmd2/cmd2
cmd2/history.py
https://github.com/python-cmd2/cmd2/blob/b22c0bd891ed08c8b09df56df9d91f48166a5e2a/cmd2/history.py#L76-L81
def _zero_based_index(self, onebased: Union[int, str]) -> int: """Convert a one-based index to a zero-based index.""" result = int(onebased) if result > 0: result -= 1 return result
[ "def", "_zero_based_index", "(", "self", ",", "onebased", ":", "Union", "[", "int", ",", "str", "]", ")", "->", "int", ":", "result", "=", "int", "(", "onebased", ")", "if", "result", ">", "0", ":", "result", "-=", "1", "return", "result" ]
Convert a one-based index to a zero-based index.
[ "Convert", "a", "one", "-", "based", "index", "to", "a", "zero", "-", "based", "index", "." ]
python
train
36.666667
guaix-ucm/numina
numina/store/gtc/load.py
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/store/gtc/load.py#L62-L76
def build_result(data): """Create a dictionary with the contents of result.json""" more = {} for key, value in data.items(): if key != 'elements': newnode = value else: newnode = {} for el in value: nkey, nvalue = process_node(el) newnode[nkey] = nvalue more[key] = newnode return more
[ "def", "build_result", "(", "data", ")", ":", "more", "=", "{", "}", "for", "key", ",", "value", "in", "data", ".", "items", "(", ")", ":", "if", "key", "!=", "'elements'", ":", "newnode", "=", "value", "else", ":", "newnode", "=", "{", "}", "for", "el", "in", "value", ":", "nkey", ",", "nvalue", "=", "process_node", "(", "el", ")", "newnode", "[", "nkey", "]", "=", "nvalue", "more", "[", "key", "]", "=", "newnode", "return", "more" ]
Create a dictionary with the contents of result.json
[ "Create", "a", "dictionary", "with", "the", "contents", "of", "result", ".", "json" ]
python
train
25.466667
yuce/pyswip
pyswip/core.py
https://github.com/yuce/pyswip/blob/f7c1f1e8c3a13b90bd775861d374788a8b5677d8/pyswip/core.py#L393-L432
def _findSwipl(): """ This function makes a big effort to find the path to the SWI-Prolog shared library. Since this is both OS dependent and installation dependent, we may not aways succeed. If we do, we return a name/path that can be used by CDLL(). Otherwise we raise an exception. :return: Tuple. Fist element is the name or path to the library that can be used by CDLL. Second element is the path were SWI-Prolog resource file may be found (this is needed in some Linuxes) :rtype: Tuple of strings :raises ImportError: If we cannot guess the name of the library """ # Now begins the guesswork platform = sys.platform[:3] if platform == "win": # In Windows, we have the default installer # path and the registry to look (path, swiHome) = _findSwiplWin() elif platform in ("lin", "cyg"): (path, swiHome) = _findSwiplLin() elif platform == "dar": # Help with MacOS is welcome!! (path, swiHome) = _findSwiplDar() if path is None: (path, swiHome) = _findSwiplMacOSHome() else: # This should work for other UNIX (path, swiHome) = _findSwiplLin() # This is a catch all raise if path is None: raise ImportError('Could not find the SWI-Prolog library in this ' 'platform. If you are sure it is installed, please ' 'open an issue.') else: return (path, swiHome)
[ "def", "_findSwipl", "(", ")", ":", "# Now begins the guesswork", "platform", "=", "sys", ".", "platform", "[", ":", "3", "]", "if", "platform", "==", "\"win\"", ":", "# In Windows, we have the default installer", "# path and the registry to look", "(", "path", ",", "swiHome", ")", "=", "_findSwiplWin", "(", ")", "elif", "platform", "in", "(", "\"lin\"", ",", "\"cyg\"", ")", ":", "(", "path", ",", "swiHome", ")", "=", "_findSwiplLin", "(", ")", "elif", "platform", "==", "\"dar\"", ":", "# Help with MacOS is welcome!!", "(", "path", ",", "swiHome", ")", "=", "_findSwiplDar", "(", ")", "if", "path", "is", "None", ":", "(", "path", ",", "swiHome", ")", "=", "_findSwiplMacOSHome", "(", ")", "else", ":", "# This should work for other UNIX", "(", "path", ",", "swiHome", ")", "=", "_findSwiplLin", "(", ")", "# This is a catch all raise", "if", "path", "is", "None", ":", "raise", "ImportError", "(", "'Could not find the SWI-Prolog library in this '", "'platform. If you are sure it is installed, please '", "'open an issue.'", ")", "else", ":", "return", "(", "path", ",", "swiHome", ")" ]
This function makes a big effort to find the path to the SWI-Prolog shared library. Since this is both OS dependent and installation dependent, we may not aways succeed. If we do, we return a name/path that can be used by CDLL(). Otherwise we raise an exception. :return: Tuple. Fist element is the name or path to the library that can be used by CDLL. Second element is the path were SWI-Prolog resource file may be found (this is needed in some Linuxes) :rtype: Tuple of strings :raises ImportError: If we cannot guess the name of the library
[ "This", "function", "makes", "a", "big", "effort", "to", "find", "the", "path", "to", "the", "SWI", "-", "Prolog", "shared", "library", ".", "Since", "this", "is", "both", "OS", "dependent", "and", "installation", "dependent", "we", "may", "not", "aways", "succeed", ".", "If", "we", "do", "we", "return", "a", "name", "/", "path", "that", "can", "be", "used", "by", "CDLL", "()", ".", "Otherwise", "we", "raise", "an", "exception", "." ]
python
train
37.35
globocom/GloboNetworkAPI-client-python
networkapiclient/ApiVipRequest.py
https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/ApiVipRequest.py#L243-L252
def deploy(self, ids): """ Method to deploy vip's :param vips: List containing vip's desired to be deployed on equipment :return: None """ url = build_uri_with_ids('api/v3/vip-request/deploy/%s/', ids) return super(ApiVipRequest, self).post(url)
[ "def", "deploy", "(", "self", ",", "ids", ")", ":", "url", "=", "build_uri_with_ids", "(", "'api/v3/vip-request/deploy/%s/'", ",", "ids", ")", "return", "super", "(", "ApiVipRequest", ",", "self", ")", ".", "post", "(", "url", ")" ]
Method to deploy vip's :param vips: List containing vip's desired to be deployed on equipment :return: None
[ "Method", "to", "deploy", "vip", "s" ]
python
train
29.4
arne-cl/discoursegraphs
src/discoursegraphs/discoursegraph.py
https://github.com/arne-cl/discoursegraphs/blob/842f0068a3190be2c75905754521b176b25a54fb/src/discoursegraphs/discoursegraph.py#L614-L640
def add_layer(self, element, layer): """ add a layer to an existing node or edge Parameters ---------- element : str, int, (str/int, str/int) the ID of a node or edge (source node ID, target node ID) layer : str the layer that the element shall be added to """ assert isinstance(layer, str), "Layers must be strings!" if isinstance(element, tuple): # edge repr. by (source, target) assert len(element) == 2 assert all(isinstance(node, (str, int)) for node in element) source_id, target_id = element # this class is based on a multi-digraph, so we'll have to iterate # over all edges between the two nodes (even if there's just one) edges = self.edge[source_id][target_id] for edge in edges: existing_layers = edges[edge]['layers'] existing_layers.add(layer) edges[edge]['layers'] = existing_layers if isinstance(element, (str, int)): # node existing_layers = self.node[element]['layers'] existing_layers.add(layer) self.node[element]['layers'] = existing_layers
[ "def", "add_layer", "(", "self", ",", "element", ",", "layer", ")", ":", "assert", "isinstance", "(", "layer", ",", "str", ")", ",", "\"Layers must be strings!\"", "if", "isinstance", "(", "element", ",", "tuple", ")", ":", "# edge repr. by (source, target)", "assert", "len", "(", "element", ")", "==", "2", "assert", "all", "(", "isinstance", "(", "node", ",", "(", "str", ",", "int", ")", ")", "for", "node", "in", "element", ")", "source_id", ",", "target_id", "=", "element", "# this class is based on a multi-digraph, so we'll have to iterate", "# over all edges between the two nodes (even if there's just one)", "edges", "=", "self", ".", "edge", "[", "source_id", "]", "[", "target_id", "]", "for", "edge", "in", "edges", ":", "existing_layers", "=", "edges", "[", "edge", "]", "[", "'layers'", "]", "existing_layers", ".", "add", "(", "layer", ")", "edges", "[", "edge", "]", "[", "'layers'", "]", "=", "existing_layers", "if", "isinstance", "(", "element", ",", "(", "str", ",", "int", ")", ")", ":", "# node", "existing_layers", "=", "self", ".", "node", "[", "element", "]", "[", "'layers'", "]", "existing_layers", ".", "add", "(", "layer", ")", "self", ".", "node", "[", "element", "]", "[", "'layers'", "]", "=", "existing_layers" ]
add a layer to an existing node or edge Parameters ---------- element : str, int, (str/int, str/int) the ID of a node or edge (source node ID, target node ID) layer : str the layer that the element shall be added to
[ "add", "a", "layer", "to", "an", "existing", "node", "or", "edge" ]
python
train
44.740741
asyrjasalo/RESTinstance
src/REST/keywords.py
https://github.com/asyrjasalo/RESTinstance/blob/9b003ffc6a89ec4b8b6f05eeb6cc8e56aad4be4e/src/REST/keywords.py#L81-L94
def set_headers(self, headers): """*Sets new request headers or updates the existing.* ``headers``: The headers to add or update as a JSON object or a dictionary. *Examples* | `Set Headers` | { "authorization": "Basic QWxhZGRpbjpPcGVuU2VzYW1"} | | `Set Headers` | { "Accept-Encoding": "identity"} | | `Set Headers` | ${auth_dict} | """ self.request["headers"].update(self._input_object(headers)) return self.request["headers"]
[ "def", "set_headers", "(", "self", ",", "headers", ")", ":", "self", ".", "request", "[", "\"headers\"", "]", ".", "update", "(", "self", ".", "_input_object", "(", "headers", ")", ")", "return", "self", ".", "request", "[", "\"headers\"", "]" ]
*Sets new request headers or updates the existing.* ``headers``: The headers to add or update as a JSON object or a dictionary. *Examples* | `Set Headers` | { "authorization": "Basic QWxhZGRpbjpPcGVuU2VzYW1"} | | `Set Headers` | { "Accept-Encoding": "identity"} | | `Set Headers` | ${auth_dict} |
[ "*", "Sets", "new", "request", "headers", "or", "updates", "the", "existing", ".", "*" ]
python
train
35.428571
openego/eDisGo
edisgo/grid/network.py
https://github.com/openego/eDisGo/blob/e6245bdaf236f9c49dbda5a18c1c458290f41e2b/edisgo/grid/network.py#L1444-L1471
def _worst_case_generation(self, worst_case_scale_factors, modes): """ Define worst case generation time series for fluctuating and dispatchable generators. Parameters ---------- worst_case_scale_factors : dict Scale factors defined in config file 'config_timeseries.cfg'. Scale factors describe actual power to nominal power ratio of in worst-case scenarios. modes : list List with worst-cases to generate time series for. Can be 'feedin_case', 'load_case' or both. """ self.timeseries.generation_fluctuating = pd.DataFrame( {'solar': [worst_case_scale_factors[ '{}_feedin_pv'.format(mode)] for mode in modes], 'wind': [worst_case_scale_factors[ '{}_feedin_other'.format(mode)] for mode in modes]}, index=self.timeseries.timeindex) self.timeseries.generation_dispatchable = pd.DataFrame( {'other': [worst_case_scale_factors[ '{}_feedin_other'.format(mode)] for mode in modes]}, index=self.timeseries.timeindex)
[ "def", "_worst_case_generation", "(", "self", ",", "worst_case_scale_factors", ",", "modes", ")", ":", "self", ".", "timeseries", ".", "generation_fluctuating", "=", "pd", ".", "DataFrame", "(", "{", "'solar'", ":", "[", "worst_case_scale_factors", "[", "'{}_feedin_pv'", ".", "format", "(", "mode", ")", "]", "for", "mode", "in", "modes", "]", ",", "'wind'", ":", "[", "worst_case_scale_factors", "[", "'{}_feedin_other'", ".", "format", "(", "mode", ")", "]", "for", "mode", "in", "modes", "]", "}", ",", "index", "=", "self", ".", "timeseries", ".", "timeindex", ")", "self", ".", "timeseries", ".", "generation_dispatchable", "=", "pd", ".", "DataFrame", "(", "{", "'other'", ":", "[", "worst_case_scale_factors", "[", "'{}_feedin_other'", ".", "format", "(", "mode", ")", "]", "for", "mode", "in", "modes", "]", "}", ",", "index", "=", "self", ".", "timeseries", ".", "timeindex", ")" ]
Define worst case generation time series for fluctuating and dispatchable generators. Parameters ---------- worst_case_scale_factors : dict Scale factors defined in config file 'config_timeseries.cfg'. Scale factors describe actual power to nominal power ratio of in worst-case scenarios. modes : list List with worst-cases to generate time series for. Can be 'feedin_case', 'load_case' or both.
[ "Define", "worst", "case", "generation", "time", "series", "for", "fluctuating", "and", "dispatchable", "generators", "." ]
python
train
41.75
bunq/sdk_python
bunq/sdk/model/generated/endpoint.py
https://github.com/bunq/sdk_python/blob/da6c9b83e6d83ee8062617f53c6eb7293c0d863d/bunq/sdk/model/generated/endpoint.py#L1550-L1567
def is_all_field_none(self): """ :rtype: bool """ if self._uuid is not None: return False if self._created is not None: return False if self._updated is not None: return False if self._attachment is not None: return False return True
[ "def", "is_all_field_none", "(", "self", ")", ":", "if", "self", ".", "_uuid", "is", "not", "None", ":", "return", "False", "if", "self", ".", "_created", "is", "not", "None", ":", "return", "False", "if", "self", ".", "_updated", "is", "not", "None", ":", "return", "False", "if", "self", ".", "_attachment", "is", "not", "None", ":", "return", "False", "return", "True" ]
:rtype: bool
[ ":", "rtype", ":", "bool" ]
python
train
18.5
cloudant/python-cloudant
src/cloudant/client.py
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/client.py#L421-L443
def get(self, key, default=None, remote=False): """ Overrides dictionary get behavior to retrieve database objects with support for returning a default. If remote=True then a remote request is made to retrieve the database from the remote server, otherwise the client's locally cached database object is returned. :param str key: Database name used to retrieve the database object. :param str default: Default database name. Defaults to None. :param bool remote: Dictates whether the locally cached database is returned or a remote request is made to retrieve the database from the server. Defaults to False. :returns: Database object """ if not remote: return super(CouchDB, self).get(key, default) db = self._DATABASE_CLASS(self, key) if db.exists(): super(CouchDB, self).__setitem__(key, db) return db return default
[ "def", "get", "(", "self", ",", "key", ",", "default", "=", "None", ",", "remote", "=", "False", ")", ":", "if", "not", "remote", ":", "return", "super", "(", "CouchDB", ",", "self", ")", ".", "get", "(", "key", ",", "default", ")", "db", "=", "self", ".", "_DATABASE_CLASS", "(", "self", ",", "key", ")", "if", "db", ".", "exists", "(", ")", ":", "super", "(", "CouchDB", ",", "self", ")", ".", "__setitem__", "(", "key", ",", "db", ")", "return", "db", "return", "default" ]
Overrides dictionary get behavior to retrieve database objects with support for returning a default. If remote=True then a remote request is made to retrieve the database from the remote server, otherwise the client's locally cached database object is returned. :param str key: Database name used to retrieve the database object. :param str default: Default database name. Defaults to None. :param bool remote: Dictates whether the locally cached database is returned or a remote request is made to retrieve the database from the server. Defaults to False. :returns: Database object
[ "Overrides", "dictionary", "get", "behavior", "to", "retrieve", "database", "objects", "with", "support", "for", "returning", "a", "default", ".", "If", "remote", "=", "True", "then", "a", "remote", "request", "is", "made", "to", "retrieve", "the", "database", "from", "the", "remote", "server", "otherwise", "the", "client", "s", "locally", "cached", "database", "object", "is", "returned", "." ]
python
train
42.391304
TaurusOlson/incisive
incisive/core.py
https://github.com/TaurusOlson/incisive/blob/25bb9f53495985c1416c82e26f54158df4050cb0/incisive/core.py#L55-L88
def read_csv(filename, delimiter=",", skip=0, guess_type=True, has_header=True, use_types={}): """Read a CSV file Usage ----- >>> data = read_csv(filename, delimiter=delimiter, skip=skip, guess_type=guess_type, has_header=True, use_types={}) # Use specific types >>> types = {"sepal.length": int, "petal.width": float} >>> data = read_csv(filename, guess_type=guess_type, use_types=types) keywords :has_header: Determine whether the file has a header or not """ with open(filename, 'r') as f: # Skip the n first lines if has_header: header = f.readline().strip().split(delimiter) else: header = None for i in range(skip): f.readline() for line in csv.DictReader(f, delimiter=delimiter, fieldnames=header): if use_types: yield apply_types(use_types, guess_type, line) elif guess_type: yield dmap(determine_type, line) else: yield line
[ "def", "read_csv", "(", "filename", ",", "delimiter", "=", "\",\"", ",", "skip", "=", "0", ",", "guess_type", "=", "True", ",", "has_header", "=", "True", ",", "use_types", "=", "{", "}", ")", ":", "with", "open", "(", "filename", ",", "'r'", ")", "as", "f", ":", "# Skip the n first lines", "if", "has_header", ":", "header", "=", "f", ".", "readline", "(", ")", ".", "strip", "(", ")", ".", "split", "(", "delimiter", ")", "else", ":", "header", "=", "None", "for", "i", "in", "range", "(", "skip", ")", ":", "f", ".", "readline", "(", ")", "for", "line", "in", "csv", ".", "DictReader", "(", "f", ",", "delimiter", "=", "delimiter", ",", "fieldnames", "=", "header", ")", ":", "if", "use_types", ":", "yield", "apply_types", "(", "use_types", ",", "guess_type", ",", "line", ")", "elif", "guess_type", ":", "yield", "dmap", "(", "determine_type", ",", "line", ")", "else", ":", "yield", "line" ]
Read a CSV file Usage ----- >>> data = read_csv(filename, delimiter=delimiter, skip=skip, guess_type=guess_type, has_header=True, use_types={}) # Use specific types >>> types = {"sepal.length": int, "petal.width": float} >>> data = read_csv(filename, guess_type=guess_type, use_types=types) keywords :has_header: Determine whether the file has a header or not
[ "Read", "a", "CSV", "file", "Usage", "-----", ">>>", "data", "=", "read_csv", "(", "filename", "delimiter", "=", "delimiter", "skip", "=", "skip", "guess_type", "=", "guess_type", "has_header", "=", "True", "use_types", "=", "{}", ")" ]
python
valid
30.470588
robotools/extractor
Lib/extractor/formats/opentype.py
https://github.com/robotools/extractor/blob/da3c2c92bfd3da863dd5de29bd8bc94cbbf433df/Lib/extractor/formats/opentype.py#L453-L501
def _gatherLookupIndexes(gpos): """ Gather a mapping of script to lookup indexes referenced by the kern feature for each script. Returns a dictionary of this structure: { "latn" : [0], "DFLT" : [0] } """ # gather the indexes of the kern features kernFeatureIndexes = [index for index, featureRecord in enumerate(gpos.FeatureList.FeatureRecord) if featureRecord.FeatureTag == "kern"] # find scripts and languages that have kern features scriptKernFeatureIndexes = {} for scriptRecord in gpos.ScriptList.ScriptRecord: script = scriptRecord.ScriptTag thisScriptKernFeatureIndexes = [] defaultLangSysRecord = scriptRecord.Script.DefaultLangSys if defaultLangSysRecord is not None: f = [] for featureIndex in defaultLangSysRecord.FeatureIndex: if featureIndex not in kernFeatureIndexes: continue f.append(featureIndex) if f: thisScriptKernFeatureIndexes.append((None, f)) if scriptRecord.Script.LangSysRecord is not None: for langSysRecord in scriptRecord.Script.LangSysRecord: langSys = langSysRecord.LangSysTag f = [] for featureIndex in langSysRecord.LangSys.FeatureIndex: if featureIndex not in kernFeatureIndexes: continue f.append(featureIndex) if f: thisScriptKernFeatureIndexes.append((langSys, f)) scriptKernFeatureIndexes[script] = thisScriptKernFeatureIndexes # convert the feature indexes to lookup indexes scriptLookupIndexes = {} for script, featureDefinitions in scriptKernFeatureIndexes.items(): lookupIndexes = scriptLookupIndexes[script] = [] for language, featureIndexes in featureDefinitions: for featureIndex in featureIndexes: featureRecord = gpos.FeatureList.FeatureRecord[featureIndex] for lookupIndex in featureRecord.Feature.LookupListIndex: if lookupIndex not in lookupIndexes: lookupIndexes.append(lookupIndex) # done return scriptLookupIndexes
[ "def", "_gatherLookupIndexes", "(", "gpos", ")", ":", "# gather the indexes of the kern features", "kernFeatureIndexes", "=", "[", "index", "for", "index", ",", "featureRecord", "in", "enumerate", "(", "gpos", ".", "FeatureList", ".", "FeatureRecord", ")", "if", "featureRecord", ".", "FeatureTag", "==", "\"kern\"", "]", "# find scripts and languages that have kern features", "scriptKernFeatureIndexes", "=", "{", "}", "for", "scriptRecord", "in", "gpos", ".", "ScriptList", ".", "ScriptRecord", ":", "script", "=", "scriptRecord", ".", "ScriptTag", "thisScriptKernFeatureIndexes", "=", "[", "]", "defaultLangSysRecord", "=", "scriptRecord", ".", "Script", ".", "DefaultLangSys", "if", "defaultLangSysRecord", "is", "not", "None", ":", "f", "=", "[", "]", "for", "featureIndex", "in", "defaultLangSysRecord", ".", "FeatureIndex", ":", "if", "featureIndex", "not", "in", "kernFeatureIndexes", ":", "continue", "f", ".", "append", "(", "featureIndex", ")", "if", "f", ":", "thisScriptKernFeatureIndexes", ".", "append", "(", "(", "None", ",", "f", ")", ")", "if", "scriptRecord", ".", "Script", ".", "LangSysRecord", "is", "not", "None", ":", "for", "langSysRecord", "in", "scriptRecord", ".", "Script", ".", "LangSysRecord", ":", "langSys", "=", "langSysRecord", ".", "LangSysTag", "f", "=", "[", "]", "for", "featureIndex", "in", "langSysRecord", ".", "LangSys", ".", "FeatureIndex", ":", "if", "featureIndex", "not", "in", "kernFeatureIndexes", ":", "continue", "f", ".", "append", "(", "featureIndex", ")", "if", "f", ":", "thisScriptKernFeatureIndexes", ".", "append", "(", "(", "langSys", ",", "f", ")", ")", "scriptKernFeatureIndexes", "[", "script", "]", "=", "thisScriptKernFeatureIndexes", "# convert the feature indexes to lookup indexes", "scriptLookupIndexes", "=", "{", "}", "for", "script", ",", "featureDefinitions", "in", "scriptKernFeatureIndexes", ".", "items", "(", ")", ":", "lookupIndexes", "=", "scriptLookupIndexes", "[", "script", "]", "=", "[", "]", "for", "language", ",", "featureIndexes", "in", "featureDefinitions", ":", "for", "featureIndex", "in", "featureIndexes", ":", "featureRecord", "=", "gpos", ".", "FeatureList", ".", "FeatureRecord", "[", "featureIndex", "]", "for", "lookupIndex", "in", "featureRecord", ".", "Feature", ".", "LookupListIndex", ":", "if", "lookupIndex", "not", "in", "lookupIndexes", ":", "lookupIndexes", ".", "append", "(", "lookupIndex", ")", "# done", "return", "scriptLookupIndexes" ]
Gather a mapping of script to lookup indexes referenced by the kern feature for each script. Returns a dictionary of this structure: { "latn" : [0], "DFLT" : [0] }
[ "Gather", "a", "mapping", "of", "script", "to", "lookup", "indexes", "referenced", "by", "the", "kern", "feature", "for", "each", "script", ".", "Returns", "a", "dictionary", "of", "this", "structure", ":", "{", "latn", ":", "[", "0", "]", "DFLT", ":", "[", "0", "]", "}" ]
python
train
45.612245
mdsol/rwslib
rwslib/builders/metadata.py
https://github.com/mdsol/rwslib/blob/1a86bc072d408c009ed1de8bf6e98a1769f54d18/rwslib/builders/metadata.py#L1687-L1697
def build(self, builder): """Build XML by appending to builder""" params = dict( Namespace=self.namespace, Name=self.name, Value=self.value, TransactionType=self.transaction_type, ) builder.start("mdsol:Attribute", params) builder.end("mdsol:Attribute")
[ "def", "build", "(", "self", ",", "builder", ")", ":", "params", "=", "dict", "(", "Namespace", "=", "self", ".", "namespace", ",", "Name", "=", "self", ".", "name", ",", "Value", "=", "self", ".", "value", ",", "TransactionType", "=", "self", ".", "transaction_type", ",", ")", "builder", ".", "start", "(", "\"mdsol:Attribute\"", ",", "params", ")", "builder", ".", "end", "(", "\"mdsol:Attribute\"", ")" ]
Build XML by appending to builder
[ "Build", "XML", "by", "appending", "to", "builder" ]
python
train
30.181818
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_vswitch.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_vswitch.py#L671-L682
def get_vnetwork_vswitches_input_last_rcvd_instance(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_vnetwork_vswitches = ET.Element("get_vnetwork_vswitches") config = get_vnetwork_vswitches input = ET.SubElement(get_vnetwork_vswitches, "input") last_rcvd_instance = ET.SubElement(input, "last-rcvd-instance") last_rcvd_instance.text = kwargs.pop('last_rcvd_instance') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_vnetwork_vswitches_input_last_rcvd_instance", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_vnetwork_vswitches", "=", "ET", ".", "Element", "(", "\"get_vnetwork_vswitches\"", ")", "config", "=", "get_vnetwork_vswitches", "input", "=", "ET", ".", "SubElement", "(", "get_vnetwork_vswitches", ",", "\"input\"", ")", "last_rcvd_instance", "=", "ET", ".", "SubElement", "(", "input", ",", "\"last-rcvd-instance\"", ")", "last_rcvd_instance", ".", "text", "=", "kwargs", ".", "pop", "(", "'last_rcvd_instance'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
45.083333
gwastro/pycbc
pycbc/workflow/pegasus_workflow.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/workflow/pegasus_workflow.py#L182-L186
def add_output_opt(self, opt, out): """ Add an option that determines an output """ self.add_opt(opt, out._dax_repr()) self._add_output(out)
[ "def", "add_output_opt", "(", "self", ",", "opt", ",", "out", ")", ":", "self", ".", "add_opt", "(", "opt", ",", "out", ".", "_dax_repr", "(", ")", ")", "self", ".", "_add_output", "(", "out", ")" ]
Add an option that determines an output
[ "Add", "an", "option", "that", "determines", "an", "output" ]
python
train
33.6
DigitalGlobe/gbdxtools
gbdxtools/images/mixins/geo.py
https://github.com/DigitalGlobe/gbdxtools/blob/def62f8f2d77b168aa2bd115290aaa0f9a08a4bb/gbdxtools/images/mixins/geo.py#L61-L79
def histogram_equalize(self, use_bands, **kwargs): ''' Equalize and the histogram and normalize value range Equalization is on all three bands, not per-band''' data = self._read(self[use_bands,...], **kwargs) data = np.rollaxis(data.astype(np.float32), 0, 3) flattened = data.flatten() if 0 in data: masked = np.ma.masked_values(data, 0).compressed() image_histogram, bin_edges = np.histogram(masked, 256) else: image_histogram, bin_edges = np.histogram(flattened, 256) bins = (bin_edges[:-1] + bin_edges[1:]) / 2.0 cdf = image_histogram.cumsum() cdf = cdf / float(cdf[-1]) image_equalized = np.interp(flattened, bins, cdf).reshape(data.shape) if 'stretch' in kwargs or 'gamma' in kwargs: return self._histogram_stretch(image_equalized, **kwargs) else: return image_equalized
[ "def", "histogram_equalize", "(", "self", ",", "use_bands", ",", "*", "*", "kwargs", ")", ":", "data", "=", "self", ".", "_read", "(", "self", "[", "use_bands", ",", "...", "]", ",", "*", "*", "kwargs", ")", "data", "=", "np", ".", "rollaxis", "(", "data", ".", "astype", "(", "np", ".", "float32", ")", ",", "0", ",", "3", ")", "flattened", "=", "data", ".", "flatten", "(", ")", "if", "0", "in", "data", ":", "masked", "=", "np", ".", "ma", ".", "masked_values", "(", "data", ",", "0", ")", ".", "compressed", "(", ")", "image_histogram", ",", "bin_edges", "=", "np", ".", "histogram", "(", "masked", ",", "256", ")", "else", ":", "image_histogram", ",", "bin_edges", "=", "np", ".", "histogram", "(", "flattened", ",", "256", ")", "bins", "=", "(", "bin_edges", "[", ":", "-", "1", "]", "+", "bin_edges", "[", "1", ":", "]", ")", "/", "2.0", "cdf", "=", "image_histogram", ".", "cumsum", "(", ")", "cdf", "=", "cdf", "/", "float", "(", "cdf", "[", "-", "1", "]", ")", "image_equalized", "=", "np", ".", "interp", "(", "flattened", ",", "bins", ",", "cdf", ")", ".", "reshape", "(", "data", ".", "shape", ")", "if", "'stretch'", "in", "kwargs", "or", "'gamma'", "in", "kwargs", ":", "return", "self", ".", "_histogram_stretch", "(", "image_equalized", ",", "*", "*", "kwargs", ")", "else", ":", "return", "image_equalized" ]
Equalize and the histogram and normalize value range Equalization is on all three bands, not per-band
[ "Equalize", "and", "the", "histogram", "and", "normalize", "value", "range", "Equalization", "is", "on", "all", "three", "bands", "not", "per", "-", "band" ]
python
valid
48.736842
stefankoegl/kdtree
kdtree.py
https://github.com/stefankoegl/kdtree/blob/587edc7056d7735177ad56a84ad5abccdea91693/kdtree.py#L187-L199
def require_axis(f): """ Check if the object of the function has axis and sel_axis members """ @wraps(f) def _wrapper(self, *args, **kwargs): if None in (self.axis, self.sel_axis): raise ValueError('%(func_name) requires the node %(node)s ' 'to have an axis and a sel_axis function' % dict(func_name=f.__name__, node=repr(self))) return f(self, *args, **kwargs) return _wrapper
[ "def", "require_axis", "(", "f", ")", ":", "@", "wraps", "(", "f", ")", "def", "_wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "None", "in", "(", "self", ".", "axis", ",", "self", ".", "sel_axis", ")", ":", "raise", "ValueError", "(", "'%(func_name) requires the node %(node)s '", "'to have an axis and a sel_axis function'", "%", "dict", "(", "func_name", "=", "f", ".", "__name__", ",", "node", "=", "repr", "(", "self", ")", ")", ")", "return", "f", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "_wrapper" ]
Check if the object of the function has axis and sel_axis members
[ "Check", "if", "the", "object", "of", "the", "function", "has", "axis", "and", "sel_axis", "members" ]
python
train
34.769231
fukuball/fuku-ml
FukuML/MLBase.py
https://github.com/fukuball/fuku-ml/blob/0da15ad7af76adf344b5a6b3f3dbabbbab3446b0/FukuML/MLBase.py#L34-L55
def set_feature_transform(self, mode='polynomial', degree=1): ''' Transform data feature to high level ''' if self.status != 'load_train_data': print("Please load train data first.") return self.train_X self.feature_transform_mode = mode self.feature_transform_degree = degree self.train_X = self.train_X[:, 1:] self.train_X = utility.DatasetLoader.feature_transform( self.train_X, self.feature_transform_mode, self.feature_transform_degree ) return self.train_X
[ "def", "set_feature_transform", "(", "self", ",", "mode", "=", "'polynomial'", ",", "degree", "=", "1", ")", ":", "if", "self", ".", "status", "!=", "'load_train_data'", ":", "print", "(", "\"Please load train data first.\"", ")", "return", "self", ".", "train_X", "self", ".", "feature_transform_mode", "=", "mode", "self", ".", "feature_transform_degree", "=", "degree", "self", ".", "train_X", "=", "self", ".", "train_X", "[", ":", ",", "1", ":", "]", "self", ".", "train_X", "=", "utility", ".", "DatasetLoader", ".", "feature_transform", "(", "self", ".", "train_X", ",", "self", ".", "feature_transform_mode", ",", "self", ".", "feature_transform_degree", ")", "return", "self", ".", "train_X" ]
Transform data feature to high level
[ "Transform", "data", "feature", "to", "high", "level" ]
python
test
26.681818
msztolcman/versionner
versionner/config.py
https://github.com/msztolcman/versionner/blob/78fca02859e3e3eb71c9eb7ea230758944177c54/versionner/config.py#L106-L118
def _parse_config_file(self, cfg_files): """Parse config file (ini) and set properties :return: """ cfg_handler = configparser.ConfigParser(interpolation=None) if not cfg_handler.read(map(str, cfg_files)): return self._parse_global_section(cfg_handler) self._parse_vcs_section(cfg_handler) self._parse_file_section(cfg_handler)
[ "def", "_parse_config_file", "(", "self", ",", "cfg_files", ")", ":", "cfg_handler", "=", "configparser", ".", "ConfigParser", "(", "interpolation", "=", "None", ")", "if", "not", "cfg_handler", ".", "read", "(", "map", "(", "str", ",", "cfg_files", ")", ")", ":", "return", "self", ".", "_parse_global_section", "(", "cfg_handler", ")", "self", ".", "_parse_vcs_section", "(", "cfg_handler", ")", "self", ".", "_parse_file_section", "(", "cfg_handler", ")" ]
Parse config file (ini) and set properties :return:
[ "Parse", "config", "file", "(", "ini", ")", "and", "set", "properties" ]
python
train
30.307692
modin-project/modin
modin/backends/pandas/query_compiler.py
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L1637-L1663
def query(self, expr, **kwargs): """Query columns of the DataManager with a boolean expression. Args: expr: Boolean expression to query the columns with. Returns: DataManager containing the rows where the boolean expression is satisfied. """ columns = self.columns def query_builder(df, **kwargs): # This is required because of an Arrow limitation # TODO revisit for Arrow error df = df.copy() df.index = pandas.RangeIndex(len(df)) df.columns = columns df.query(expr, inplace=True, **kwargs) df.columns = pandas.RangeIndex(len(df.columns)) return df func = self._prepare_method(query_builder, **kwargs) new_data = self._map_across_full_axis(1, func) # Query removes rows, so we need to update the index new_index = self.compute_index(0, new_data, True) return self.__constructor__(new_data, new_index, self.columns, self.dtypes)
[ "def", "query", "(", "self", ",", "expr", ",", "*", "*", "kwargs", ")", ":", "columns", "=", "self", ".", "columns", "def", "query_builder", "(", "df", ",", "*", "*", "kwargs", ")", ":", "# This is required because of an Arrow limitation", "# TODO revisit for Arrow error", "df", "=", "df", ".", "copy", "(", ")", "df", ".", "index", "=", "pandas", ".", "RangeIndex", "(", "len", "(", "df", ")", ")", "df", ".", "columns", "=", "columns", "df", ".", "query", "(", "expr", ",", "inplace", "=", "True", ",", "*", "*", "kwargs", ")", "df", ".", "columns", "=", "pandas", ".", "RangeIndex", "(", "len", "(", "df", ".", "columns", ")", ")", "return", "df", "func", "=", "self", ".", "_prepare_method", "(", "query_builder", ",", "*", "*", "kwargs", ")", "new_data", "=", "self", ".", "_map_across_full_axis", "(", "1", ",", "func", ")", "# Query removes rows, so we need to update the index", "new_index", "=", "self", ".", "compute_index", "(", "0", ",", "new_data", ",", "True", ")", "return", "self", ".", "__constructor__", "(", "new_data", ",", "new_index", ",", "self", ".", "columns", ",", "self", ".", "dtypes", ")" ]
Query columns of the DataManager with a boolean expression. Args: expr: Boolean expression to query the columns with. Returns: DataManager containing the rows where the boolean expression is satisfied.
[ "Query", "columns", "of", "the", "DataManager", "with", "a", "boolean", "expression", "." ]
python
train
37.592593
dhermes/bezier
docs/make_images.py
https://github.com/dhermes/bezier/blob/4f941f82637a8e70a5b159a9203132192e23406b/docs/make_images.py#L1080-L1108
def newton_refine_curve(curve, point, s, new_s): """Image for :func:`._curve_helpers.newton_refine` docstring.""" if NO_IMAGES: return ax = curve.plot(256) ax.plot(point[0, :], point[1, :], marker="H") wrong_points = curve.evaluate_multi(np.asfortranarray([s, new_s])) ax.plot( wrong_points[0, [0]], wrong_points[1, [0]], color="black", linestyle="None", marker="o", ) ax.plot( wrong_points[0, [1]], wrong_points[1, [1]], color="black", linestyle="None", marker="o", markeredgewidth=1, markerfacecolor="None", ) # Set the axis bounds / scaling. ax.axis("scaled") ax.set_xlim(-0.125, 3.125) ax.set_ylim(-0.125, 1.375) save_image(ax.figure, "newton_refine_curve.png")
[ "def", "newton_refine_curve", "(", "curve", ",", "point", ",", "s", ",", "new_s", ")", ":", "if", "NO_IMAGES", ":", "return", "ax", "=", "curve", ".", "plot", "(", "256", ")", "ax", ".", "plot", "(", "point", "[", "0", ",", ":", "]", ",", "point", "[", "1", ",", ":", "]", ",", "marker", "=", "\"H\"", ")", "wrong_points", "=", "curve", ".", "evaluate_multi", "(", "np", ".", "asfortranarray", "(", "[", "s", ",", "new_s", "]", ")", ")", "ax", ".", "plot", "(", "wrong_points", "[", "0", ",", "[", "0", "]", "]", ",", "wrong_points", "[", "1", ",", "[", "0", "]", "]", ",", "color", "=", "\"black\"", ",", "linestyle", "=", "\"None\"", ",", "marker", "=", "\"o\"", ",", ")", "ax", ".", "plot", "(", "wrong_points", "[", "0", ",", "[", "1", "]", "]", ",", "wrong_points", "[", "1", ",", "[", "1", "]", "]", ",", "color", "=", "\"black\"", ",", "linestyle", "=", "\"None\"", ",", "marker", "=", "\"o\"", ",", "markeredgewidth", "=", "1", ",", "markerfacecolor", "=", "\"None\"", ",", ")", "# Set the axis bounds / scaling.", "ax", ".", "axis", "(", "\"scaled\"", ")", "ax", ".", "set_xlim", "(", "-", "0.125", ",", "3.125", ")", "ax", ".", "set_ylim", "(", "-", "0.125", ",", "1.375", ")", "save_image", "(", "ax", ".", "figure", ",", "\"newton_refine_curve.png\"", ")" ]
Image for :func:`._curve_helpers.newton_refine` docstring.
[ "Image", "for", ":", "func", ":", ".", "_curve_helpers", ".", "newton_refine", "docstring", "." ]
python
train
27.517241
pudo/apikit
apikit/args.py
https://github.com/pudo/apikit/blob/638f83fc3f727d56541dd76ceb5fde04993a2bc6/apikit/args.py#L8-L13
def arg_bool(name, default=False): """ Fetch a query argument, as a boolean. """ v = request.args.get(name, '') if not len(v): return default return v in BOOL_TRUISH
[ "def", "arg_bool", "(", "name", ",", "default", "=", "False", ")", ":", "v", "=", "request", ".", "args", ".", "get", "(", "name", ",", "''", ")", "if", "not", "len", "(", "v", ")", ":", "return", "default", "return", "v", "in", "BOOL_TRUISH" ]
Fetch a query argument, as a boolean.
[ "Fetch", "a", "query", "argument", "as", "a", "boolean", "." ]
python
train
30.666667
ssalentin/plip
plip/modules/supplemental.py
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/supplemental.py#L410-L442
def readmol(path, as_string=False): """Reads the given molecule file and returns the corresponding Pybel molecule as well as the input file type. In contrast to the standard Pybel implementation, the file is closed properly.""" supported_formats = ['pdb'] # Fix for Windows-generated files: Remove carriage return characters if "\r" in path and as_string: path = path.replace('\r', '') for sformat in supported_formats: obc = pybel.ob.OBConversion() obc.SetInFormat(sformat) write_message("Detected {} as format. Trying to read file with OpenBabel...\n".format(sformat), mtype='debug') # Read molecules with single bond information if as_string: try: mymol = pybel.readstring(sformat, path) except IOError: sysexit(4, 'No valid file format provided.') else: read_file = pybel.readfile(format=sformat, filename=path, opt={"s": None}) try: mymol = next(read_file) except StopIteration: sysexit(4, 'File contains no valid molecules.\n') write_message("Molecule successfully read.\n", mtype='debug') # Assign multiple bonds mymol.OBMol.PerceiveBondOrders() return mymol, sformat sysexit(4, 'No valid file format provided.')
[ "def", "readmol", "(", "path", ",", "as_string", "=", "False", ")", ":", "supported_formats", "=", "[", "'pdb'", "]", "# Fix for Windows-generated files: Remove carriage return characters", "if", "\"\\r\"", "in", "path", "and", "as_string", ":", "path", "=", "path", ".", "replace", "(", "'\\r'", ",", "''", ")", "for", "sformat", "in", "supported_formats", ":", "obc", "=", "pybel", ".", "ob", ".", "OBConversion", "(", ")", "obc", ".", "SetInFormat", "(", "sformat", ")", "write_message", "(", "\"Detected {} as format. Trying to read file with OpenBabel...\\n\"", ".", "format", "(", "sformat", ")", ",", "mtype", "=", "'debug'", ")", "# Read molecules with single bond information", "if", "as_string", ":", "try", ":", "mymol", "=", "pybel", ".", "readstring", "(", "sformat", ",", "path", ")", "except", "IOError", ":", "sysexit", "(", "4", ",", "'No valid file format provided.'", ")", "else", ":", "read_file", "=", "pybel", ".", "readfile", "(", "format", "=", "sformat", ",", "filename", "=", "path", ",", "opt", "=", "{", "\"s\"", ":", "None", "}", ")", "try", ":", "mymol", "=", "next", "(", "read_file", ")", "except", "StopIteration", ":", "sysexit", "(", "4", ",", "'File contains no valid molecules.\\n'", ")", "write_message", "(", "\"Molecule successfully read.\\n\"", ",", "mtype", "=", "'debug'", ")", "# Assign multiple bonds", "mymol", ".", "OBMol", ".", "PerceiveBondOrders", "(", ")", "return", "mymol", ",", "sformat", "sysexit", "(", "4", ",", "'No valid file format provided.'", ")" ]
Reads the given molecule file and returns the corresponding Pybel molecule as well as the input file type. In contrast to the standard Pybel implementation, the file is closed properly.
[ "Reads", "the", "given", "molecule", "file", "and", "returns", "the", "corresponding", "Pybel", "molecule", "as", "well", "as", "the", "input", "file", "type", ".", "In", "contrast", "to", "the", "standard", "Pybel", "implementation", "the", "file", "is", "closed", "properly", "." ]
python
train
40.363636
edx/edx-enterprise
integrated_channels/degreed/client.py
https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/integrated_channels/degreed/client.py#L127-L158
def _sync_content_metadata(self, serialized_data, http_method): """ Synchronize content metadata using the Degreed course content API. Args: serialized_data: JSON-encoded object containing content metadata. http_method: The HTTP method to use for the API request. Raises: ClientError: If Degreed API request fails. """ try: status_code, response_body = getattr(self, '_' + http_method)( urljoin(self.enterprise_configuration.degreed_base_url, self.global_degreed_config.course_api_path), serialized_data, self.CONTENT_PROVIDER_SCOPE ) except requests.exceptions.RequestException as exc: raise ClientError( 'DegreedAPIClient request failed: {error} {message}'.format( error=exc.__class__.__name__, message=str(exc) ) ) if status_code >= 400: raise ClientError( 'DegreedAPIClient request failed with status {status_code}: {message}'.format( status_code=status_code, message=response_body ) )
[ "def", "_sync_content_metadata", "(", "self", ",", "serialized_data", ",", "http_method", ")", ":", "try", ":", "status_code", ",", "response_body", "=", "getattr", "(", "self", ",", "'_'", "+", "http_method", ")", "(", "urljoin", "(", "self", ".", "enterprise_configuration", ".", "degreed_base_url", ",", "self", ".", "global_degreed_config", ".", "course_api_path", ")", ",", "serialized_data", ",", "self", ".", "CONTENT_PROVIDER_SCOPE", ")", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "exc", ":", "raise", "ClientError", "(", "'DegreedAPIClient request failed: {error} {message}'", ".", "format", "(", "error", "=", "exc", ".", "__class__", ".", "__name__", ",", "message", "=", "str", "(", "exc", ")", ")", ")", "if", "status_code", ">=", "400", ":", "raise", "ClientError", "(", "'DegreedAPIClient request failed with status {status_code}: {message}'", ".", "format", "(", "status_code", "=", "status_code", ",", "message", "=", "response_body", ")", ")" ]
Synchronize content metadata using the Degreed course content API. Args: serialized_data: JSON-encoded object containing content metadata. http_method: The HTTP method to use for the API request. Raises: ClientError: If Degreed API request fails.
[ "Synchronize", "content", "metadata", "using", "the", "Degreed", "course", "content", "API", "." ]
python
valid
38.28125
evandempsey/porter2-stemmer
porter2stemmer/porter2stemmer.py
https://github.com/evandempsey/porter2-stemmer/blob/949824b7767c25efb014ef738e682442fa70c10b/porter2stemmer/porter2stemmer.py#L295-L313
def delete_suffixes(self, word): """ Delete some very common suffixes. """ length = len(word) suffixes = ['al', 'ance', 'ence', 'er', 'ic', 'able', 'ible', 'ant', 'ement', 'ment', 'ent', 'ism', 'ate', 'iti', 'ous', 'ive', 'ize'] for suffix in suffixes: if word.endswith(suffix) and self.r2 <= (length - len(suffix)): word = word[:-len(suffix)] return word if word.endswith('ion') and self.r2 <= (length - 3): if word[length - 4] in 'st': word = word[:-3] return word
[ "def", "delete_suffixes", "(", "self", ",", "word", ")", ":", "length", "=", "len", "(", "word", ")", "suffixes", "=", "[", "'al'", ",", "'ance'", ",", "'ence'", ",", "'er'", ",", "'ic'", ",", "'able'", ",", "'ible'", ",", "'ant'", ",", "'ement'", ",", "'ment'", ",", "'ent'", ",", "'ism'", ",", "'ate'", ",", "'iti'", ",", "'ous'", ",", "'ive'", ",", "'ize'", "]", "for", "suffix", "in", "suffixes", ":", "if", "word", ".", "endswith", "(", "suffix", ")", "and", "self", ".", "r2", "<=", "(", "length", "-", "len", "(", "suffix", ")", ")", ":", "word", "=", "word", "[", ":", "-", "len", "(", "suffix", ")", "]", "return", "word", "if", "word", ".", "endswith", "(", "'ion'", ")", "and", "self", ".", "r2", "<=", "(", "length", "-", "3", ")", ":", "if", "word", "[", "length", "-", "4", "]", "in", "'st'", ":", "word", "=", "word", "[", ":", "-", "3", "]", "return", "word" ]
Delete some very common suffixes.
[ "Delete", "some", "very", "common", "suffixes", "." ]
python
train
33
bukun/TorCMS
ext_script/autocrud/func_gen_html.py
https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/ext_script/autocrud/func_gen_html.py#L135-L149
def gen_checkbox_list(sig_dic): ''' For generating List view HTML file for CHECKBOX. for each item. ''' view_zuoxiang = '''<span class="iga_pd_val">''' dic_tmp = sig_dic['dic'] for key in dic_tmp.keys(): tmp_str = '''{{% if "{0}" in postinfo.extinfo["{1}"] %}} {2} {{% end %}} '''.format(key, sig_dic['en'], dic_tmp[key]) view_zuoxiang += tmp_str view_zuoxiang += '''</span>''' return view_zuoxiang
[ "def", "gen_checkbox_list", "(", "sig_dic", ")", ":", "view_zuoxiang", "=", "'''<span class=\"iga_pd_val\">'''", "dic_tmp", "=", "sig_dic", "[", "'dic'", "]", "for", "key", "in", "dic_tmp", ".", "keys", "(", ")", ":", "tmp_str", "=", "'''{{% if \"{0}\" in postinfo.extinfo[\"{1}\"] %}} {2} {{% end %}}\n '''", ".", "format", "(", "key", ",", "sig_dic", "[", "'en'", "]", ",", "dic_tmp", "[", "key", "]", ")", "view_zuoxiang", "+=", "tmp_str", "view_zuoxiang", "+=", "'''</span>'''", "return", "view_zuoxiang" ]
For generating List view HTML file for CHECKBOX. for each item.
[ "For", "generating", "List", "view", "HTML", "file", "for", "CHECKBOX", ".", "for", "each", "item", "." ]
python
train
29.8
danilobellini/audiolazy
audiolazy/lazy_synth.py
https://github.com/danilobellini/audiolazy/blob/dba0a278937909980ed40b976d866b8e97c35dee/audiolazy/lazy_synth.py#L325-L345
def zeros(dur=None): """ Zeros/zeroes stream generator. You may sum your endless stream by this to enforce an end to it. Parameters ---------- dur : Duration, in number of samples; endless if not given. Returns ------- Stream that repeats "0.0" during a given time duration (if any) or endlessly. """ if dur is None or (isinf(dur) and dur > 0): while True: yield 0.0 for x in xrange(int(.5 + dur)): yield 0.0
[ "def", "zeros", "(", "dur", "=", "None", ")", ":", "if", "dur", "is", "None", "or", "(", "isinf", "(", "dur", ")", "and", "dur", ">", "0", ")", ":", "while", "True", ":", "yield", "0.0", "for", "x", "in", "xrange", "(", "int", "(", ".5", "+", "dur", ")", ")", ":", "yield", "0.0" ]
Zeros/zeroes stream generator. You may sum your endless stream by this to enforce an end to it. Parameters ---------- dur : Duration, in number of samples; endless if not given. Returns ------- Stream that repeats "0.0" during a given time duration (if any) or endlessly.
[ "Zeros", "/", "zeroes", "stream", "generator", ".", "You", "may", "sum", "your", "endless", "stream", "by", "this", "to", "enforce", "an", "end", "to", "it", "." ]
python
train
20.714286
slundberg/shap
shap/datasets.py
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/datasets.py#L200-L225
def independentlinear60(display=False): """ A simulated dataset with tight correlations among distinct groups of features. """ # set a constant seed old_seed = np.random.seed() np.random.seed(0) # generate dataset with known correlation N = 1000 M = 60 # set one coefficent from each group of 3 to 1 beta = np.zeros(M) beta[0:30:3] = 1 f = lambda X: np.matmul(X, beta) # Make sure the sample correlation is a perfect match X_start = np.random.randn(N, M) X = X_start - X_start.mean(0) y = f(X) + np.random.randn(N) * 1e-2 # restore the previous numpy random seed np.random.seed(old_seed) return pd.DataFrame(X), y
[ "def", "independentlinear60", "(", "display", "=", "False", ")", ":", "# set a constant seed", "old_seed", "=", "np", ".", "random", ".", "seed", "(", ")", "np", ".", "random", ".", "seed", "(", "0", ")", "# generate dataset with known correlation", "N", "=", "1000", "M", "=", "60", "# set one coefficent from each group of 3 to 1", "beta", "=", "np", ".", "zeros", "(", "M", ")", "beta", "[", "0", ":", "30", ":", "3", "]", "=", "1", "f", "=", "lambda", "X", ":", "np", ".", "matmul", "(", "X", ",", "beta", ")", "# Make sure the sample correlation is a perfect match", "X_start", "=", "np", ".", "random", ".", "randn", "(", "N", ",", "M", ")", "X", "=", "X_start", "-", "X_start", ".", "mean", "(", "0", ")", "y", "=", "f", "(", "X", ")", "+", "np", ".", "random", ".", "randn", "(", "N", ")", "*", "1e-2", "# restore the previous numpy random seed", "np", ".", "random", ".", "seed", "(", "old_seed", ")", "return", "pd", ".", "DataFrame", "(", "X", ")", ",", "y" ]
A simulated dataset with tight correlations among distinct groups of features.
[ "A", "simulated", "dataset", "with", "tight", "correlations", "among", "distinct", "groups", "of", "features", "." ]
python
train
25.769231
AustralianSynchrotron/lightflow
lightflow/models/utils.py
https://github.com/AustralianSynchrotron/lightflow/blob/dc53dbc1d961e20fb144273baca258060705c03e/lightflow/models/utils.py#L2-L19
def find_indices(lst, element): """ Returns the indices for all occurrences of 'element' in 'lst'. Args: lst (list): List to search. element: Element to find. Returns: list: List of indices or values """ result = [] offset = -1 while True: try: offset = lst.index(element, offset+1) except ValueError: return result result.append(offset)
[ "def", "find_indices", "(", "lst", ",", "element", ")", ":", "result", "=", "[", "]", "offset", "=", "-", "1", "while", "True", ":", "try", ":", "offset", "=", "lst", ".", "index", "(", "element", ",", "offset", "+", "1", ")", "except", "ValueError", ":", "return", "result", "result", ".", "append", "(", "offset", ")" ]
Returns the indices for all occurrences of 'element' in 'lst'. Args: lst (list): List to search. element: Element to find. Returns: list: List of indices or values
[ "Returns", "the", "indices", "for", "all", "occurrences", "of", "element", "in", "lst", "." ]
python
train
23.5