repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
gofed/gofedlib
gofedlib/distribution/packagenamegenerator.py
https://github.com/gofed/gofedlib/blob/0674c248fe3d8706f98f912996b65af469f96b10/gofedlib/distribution/packagenamegenerator.py#L9-L59
def generate(self, project): """ Package name construction is based on provider, not on prefix. Prefix does not have to equal provider_prefix. """ for assignment in self.s2n_mapping: if assignment["ipprefix"] == project: self._name = assignment["package"] return self # # github.com -> github # code.google.com/p/ -> googlecode # golang.org/x/ -> golangorg # gopkg.in/check.v1 -> gopkg-check # camlistore.org # name = project if name.startswith("github.com"): name = re.sub(r"^github\.com", "github", name) if name.startswith("gopkg.in"): name = re.sub(r"gopkg\.in", "gopkg", name) # any version marks? name = re.sub(r"\.v\d", "", name) name = re.sub(r"/v\d/", "/", name) if name.startswith("code.google.com/p"): name = re.sub(r"^code\.google\.com/p", "googlecode", name) if name.startswith("golang.org/x"): name = re.sub(r"^golang\.org/x", "golangorg", name) if name.startswith("google.golang.org"): name = re.sub(r"^google\.golang\.org", "googlegolangorg", name) if name.startswith("bitbucket.org"): name = re.sub(r"^bitbucket\.org", "bitbucket", name) if name.startswith("k8s.io"): name = re.sub(r"^k8s\.io", "k8s", name) if name.endswith(".org"): name = re.sub(r"\.org$", "", name) name = name.replace("/", "-") self._name = "golang-%s" % name return self
[ "def", "generate", "(", "self", ",", "project", ")", ":", "for", "assignment", "in", "self", ".", "s2n_mapping", ":", "if", "assignment", "[", "\"ipprefix\"", "]", "==", "project", ":", "self", ".", "_name", "=", "assignment", "[", "\"package\"", "]", "return", "self", "#", "# github.com -> github", "# code.google.com/p/ -> googlecode", "# golang.org/x/ -> golangorg", "# gopkg.in/check.v1 -> gopkg-check", "# camlistore.org", "#", "name", "=", "project", "if", "name", ".", "startswith", "(", "\"github.com\"", ")", ":", "name", "=", "re", ".", "sub", "(", "r\"^github\\.com\"", ",", "\"github\"", ",", "name", ")", "if", "name", ".", "startswith", "(", "\"gopkg.in\"", ")", ":", "name", "=", "re", ".", "sub", "(", "r\"gopkg\\.in\"", ",", "\"gopkg\"", ",", "name", ")", "# any version marks?", "name", "=", "re", ".", "sub", "(", "r\"\\.v\\d\"", ",", "\"\"", ",", "name", ")", "name", "=", "re", ".", "sub", "(", "r\"/v\\d/\"", ",", "\"/\"", ",", "name", ")", "if", "name", ".", "startswith", "(", "\"code.google.com/p\"", ")", ":", "name", "=", "re", ".", "sub", "(", "r\"^code\\.google\\.com/p\"", ",", "\"googlecode\"", ",", "name", ")", "if", "name", ".", "startswith", "(", "\"golang.org/x\"", ")", ":", "name", "=", "re", ".", "sub", "(", "r\"^golang\\.org/x\"", ",", "\"golangorg\"", ",", "name", ")", "if", "name", ".", "startswith", "(", "\"google.golang.org\"", ")", ":", "name", "=", "re", ".", "sub", "(", "r\"^google\\.golang\\.org\"", ",", "\"googlegolangorg\"", ",", "name", ")", "if", "name", ".", "startswith", "(", "\"bitbucket.org\"", ")", ":", "name", "=", "re", ".", "sub", "(", "r\"^bitbucket\\.org\"", ",", "\"bitbucket\"", ",", "name", ")", "if", "name", ".", "startswith", "(", "\"k8s.io\"", ")", ":", "name", "=", "re", ".", "sub", "(", "r\"^k8s\\.io\"", ",", "\"k8s\"", ",", "name", ")", "if", "name", ".", "endswith", "(", "\".org\"", ")", ":", "name", "=", "re", ".", "sub", "(", "r\"\\.org$\"", ",", "\"\"", ",", "name", ")", "name", "=", "name", ".", "replace", "(", "\"/\"", ",", "\"-\"", ")", "self", ".", "_name", "=", "\"golang-%s\"", "%", "name", "return", "self" ]
Package name construction is based on provider, not on prefix. Prefix does not have to equal provider_prefix.
[ "Package", "name", "construction", "is", "based", "on", "provider", "not", "on", "prefix", ".", "Prefix", "does", "not", "have", "to", "equal", "provider_prefix", "." ]
python
train
openthread/openthread
tools/harness-thci/OpenThread.py
https://github.com/openthread/openthread/blob/0208d10563aa21c518092985c78ecf9cd223ab74/tools/harness-thci/OpenThread.py#L2567-L2580
def setUdpJoinerPort(self, portNumber): """set Joiner UDP Port Args: portNumber: Joiner UDP Port number Returns: True: successful to set Joiner UDP Port False: fail to set Joiner UDP Port """ print '%s call setUdpJoinerPort' % self.port cmd = 'joinerport %d' % portNumber print cmd return self.__sendCommand(cmd)[0] == 'Done'
[ "def", "setUdpJoinerPort", "(", "self", ",", "portNumber", ")", ":", "print", "'%s call setUdpJoinerPort'", "%", "self", ".", "port", "cmd", "=", "'joinerport %d'", "%", "portNumber", "print", "cmd", "return", "self", ".", "__sendCommand", "(", "cmd", ")", "[", "0", "]", "==", "'Done'" ]
set Joiner UDP Port Args: portNumber: Joiner UDP Port number Returns: True: successful to set Joiner UDP Port False: fail to set Joiner UDP Port
[ "set", "Joiner", "UDP", "Port" ]
python
train
brentpayne/phrase
phrase/noun_phrase_dictionary.py
https://github.com/brentpayne/phrase/blob/2c25e202eff0f284cb724a36cec1b22a1169e7a2/phrase/noun_phrase_dictionary.py#L31-L54
def convert_noun_phrases(self, id_run, pos_run): """ Converts any identified phrases in the run into phrase_ids. The dictionary provides all acceptable phrases :param id_run: a run of token ids :param dictionary: a dictionary of acceptable phrases described as there component token ids :return: a run of token and phrase ids. """ i = 0 rv = [] while i < len(id_run): phrase_id, offset = PhraseDictionary.return_max_phrase(id_run, i, self) if phrase_id: if pos_run[i] in ('JJ', 'JJR', 'JJS', 'NN', 'NNS', 'NNP', 'NNPS', 'SYM', 'CD', 'VBG', 'FW', 'NP'): print "MERGED", pos_run[i], self.get_phrase(phrase_id) rv.append((phrase_id,'NP')) i = offset else: print "SKIPPED", pos_run[i], self.get_phrase(phrase_id) rv.append((id_run[i], pos_run[i])) i += 1 else: rv.append((id_run[i], pos_run[i])) i += 1 return rv
[ "def", "convert_noun_phrases", "(", "self", ",", "id_run", ",", "pos_run", ")", ":", "i", "=", "0", "rv", "=", "[", "]", "while", "i", "<", "len", "(", "id_run", ")", ":", "phrase_id", ",", "offset", "=", "PhraseDictionary", ".", "return_max_phrase", "(", "id_run", ",", "i", ",", "self", ")", "if", "phrase_id", ":", "if", "pos_run", "[", "i", "]", "in", "(", "'JJ'", ",", "'JJR'", ",", "'JJS'", ",", "'NN'", ",", "'NNS'", ",", "'NNP'", ",", "'NNPS'", ",", "'SYM'", ",", "'CD'", ",", "'VBG'", ",", "'FW'", ",", "'NP'", ")", ":", "print", "\"MERGED\"", ",", "pos_run", "[", "i", "]", ",", "self", ".", "get_phrase", "(", "phrase_id", ")", "rv", ".", "append", "(", "(", "phrase_id", ",", "'NP'", ")", ")", "i", "=", "offset", "else", ":", "print", "\"SKIPPED\"", ",", "pos_run", "[", "i", "]", ",", "self", ".", "get_phrase", "(", "phrase_id", ")", "rv", ".", "append", "(", "(", "id_run", "[", "i", "]", ",", "pos_run", "[", "i", "]", ")", ")", "i", "+=", "1", "else", ":", "rv", ".", "append", "(", "(", "id_run", "[", "i", "]", ",", "pos_run", "[", "i", "]", ")", ")", "i", "+=", "1", "return", "rv" ]
Converts any identified phrases in the run into phrase_ids. The dictionary provides all acceptable phrases :param id_run: a run of token ids :param dictionary: a dictionary of acceptable phrases described as there component token ids :return: a run of token and phrase ids.
[ "Converts", "any", "identified", "phrases", "in", "the", "run", "into", "phrase_ids", ".", "The", "dictionary", "provides", "all", "acceptable", "phrases", ":", "param", "id_run", ":", "a", "run", "of", "token", "ids", ":", "param", "dictionary", ":", "a", "dictionary", "of", "acceptable", "phrases", "described", "as", "there", "component", "token", "ids", ":", "return", ":", "a", "run", "of", "token", "and", "phrase", "ids", "." ]
python
train
CellProfiler/centrosome
centrosome/lapjv.py
https://github.com/CellProfiler/centrosome/blob/7bd9350a2d4ae1b215b81eabcecfe560bbb1f32a/centrosome/lapjv.py#L237-L360
def slow_augment(n, ii, jj, idx, count, x, y, u, v, c): '''Perform the augmentation step to assign unassigned i and j n - the # of i and j, also the marker of unassigned x and y ii - the unassigned i jj - the ragged arrays of j for each i idx - the index of the first j for each i count - the number of j for each i x - the assignments of j for each i y - the assignments of i for each j u,v - the dual variables c - the costs ''' ################################################## # # Augment procedure: from the Jonker paper. # # Note: # cred [i,j] = c [i,j] - u [i] - v[j] # # procedure AUGMENT; # begin # for all unassigned i* do # begin # for j:= 1 ... n do # begin d[j] := c[i*,j] - v[j] ; pred[j] := i* end; # READY: = { ) ; SCAN: = { } ; TODO: = { 1 ... n} ; # repeat # if SCAN = { } then # begin # u = min {d[j] for j in TODO} ; # SCAN: = {j | d[j] = u} ; # TODO: = TODO - SCAN; # for j in SCAN do if y[j]==0 then go to augment # end; # select any j* in SCAN; # i := y[j*]; SCAN := SCAN - {j*} ; READY: = READY + {j*} ; # for all j in TODO do if u + cred[i,j] < d[j] then # begin # d[j] := u + cred[i,j]; pred[j] := i; # if d[j] = u then # if y[j] is unassigned then go to augment else # begin SCAN: = SCAN + {j} ; TODO: = TODO - {j} end # end # until false; (* repeat always ends with go to augment *) #augment: # (* price updating *) # for k in READY do v[k]: = v[k] + d[k] - u; # (* augmentation *) # repeat # i: = pred[j]; y[ j ] := i ; k:=j; j:=x[i]; x[i]:= k # until i = i* # end #end inf = np.sum(c) + 1 d = np.zeros(n) cc = np.zeros((n,n)) cc[:,:] = inf for i in range(n): cc[i,jj[idx[i]:(idx[i]+count[i])]] = c[idx[i]:(idx[i]+count[i])] c = cc for i in ii: print("Processing i=%d" % i) j = jj[idx[i]:(idx[i] + count[i])] d = c[i,:] - v pred = np.ones(n, int) * i on_deck = [] ready = [] scan = [] to_do = list(range(n)) try: while True: print("Evaluating i=%d, n_scan = %d" % (i, len(scan))) if len(scan) == 0: ready += on_deck on_deck = [] umin = np.min([d[jjj] for jjj in to_do]) print("umin = %f" % umin) scan = [jjj for jjj in to_do if d[jjj] == umin] to_do = [jjj for jjj in to_do if d[jjj] != umin] for j1 in scan: if y[j1] == n: raise StopIteration() j1 = scan[0] iii = y[j1] print("Consider replacing i=%d, j=%d" % (iii, j1)) scan = scan[1:] on_deck += [j1] u1 = c[iii, j1] - v[j1] - umin for j1 in list(to_do): h = c[iii, j1] - v[j1] - u1 print("Consider j=%d as replacement, c[%d,%d]=%f,v[%d]=%f,h=%f, d[j]= %f" % (j1,iii,j1,c[iii,j1],j1,v[j1],h,d[j1])) if h < d[j1]: print("Add to chain") pred[j1] = iii if h == umin: if y[j1] == n: raise StopIteration() print("Add to scan") scan += [j1] to_do.remove(j1) d[j1] = h except StopIteration: # Augment print("Augmenting %d" % j1) for k in ready: temp = v[k] v[k] = v[k] + d[k] - umin print("v[%d] %f -> %f" % (k, temp, v[k])) while True: iii = pred[j1] print("y[%d] %d -> %d" % (j1, y[j1], iii)) y[j1] = iii j1, x[iii] = x[iii], j1 if iii == i: break # # Re-establish slackness since we didn't pay attention to u # for i in range(n): j = x[i] u[i] = c[i,j] - v[j]
[ "def", "slow_augment", "(", "n", ",", "ii", ",", "jj", ",", "idx", ",", "count", ",", "x", ",", "y", ",", "u", ",", "v", ",", "c", ")", ":", "##################################################", "#", "# Augment procedure: from the Jonker paper.", "#", "# Note:", "# cred [i,j] = c [i,j] - u [i] - v[j]", "#", "# procedure AUGMENT;", "# begin", "# for all unassigned i* do", "# begin", "# for j:= 1 ... n do ", "# begin d[j] := c[i*,j] - v[j] ; pred[j] := i* end;", "# READY: = { ) ; SCAN: = { } ; TODO: = { 1 ... n} ;", "# repeat", "# if SCAN = { } then", "# begin", "# u = min {d[j] for j in TODO} ; ", "# SCAN: = {j | d[j] = u} ;", "# TODO: = TODO - SCAN;", "# for j in SCAN do if y[j]==0 then go to augment", "# end;", "# select any j* in SCAN;", "# i := y[j*]; SCAN := SCAN - {j*} ; READY: = READY + {j*} ;", "# for all j in TODO do if u + cred[i,j] < d[j] then", "# begin", "# d[j] := u + cred[i,j]; pred[j] := i;", "# if d[j] = u then", "# if y[j] is unassigned then go to augment else", "# begin SCAN: = SCAN + {j} ; TODO: = TODO - {j} end", "# end", "# until false; (* repeat always ends with go to augment *)", "#augment:", "# (* price updating *)", "# for k in READY do v[k]: = v[k] + d[k] - u;", "# (* augmentation *)", "# repeat", "# i: = pred[j]; y[ j ] := i ; k:=j; j:=x[i]; x[i]:= k", "# until i = i*", "# end", "#end", "inf", "=", "np", ".", "sum", "(", "c", ")", "+", "1", "d", "=", "np", ".", "zeros", "(", "n", ")", "cc", "=", "np", ".", "zeros", "(", "(", "n", ",", "n", ")", ")", "cc", "[", ":", ",", ":", "]", "=", "inf", "for", "i", "in", "range", "(", "n", ")", ":", "cc", "[", "i", ",", "jj", "[", "idx", "[", "i", "]", ":", "(", "idx", "[", "i", "]", "+", "count", "[", "i", "]", ")", "]", "]", "=", "c", "[", "idx", "[", "i", "]", ":", "(", "idx", "[", "i", "]", "+", "count", "[", "i", "]", ")", "]", "c", "=", "cc", "for", "i", "in", "ii", ":", "print", "(", "\"Processing i=%d\"", "%", "i", ")", "j", "=", "jj", "[", "idx", "[", "i", "]", ":", "(", "idx", "[", "i", "]", "+", "count", "[", "i", "]", ")", "]", "d", "=", "c", "[", "i", ",", ":", "]", "-", "v", "pred", "=", "np", ".", "ones", "(", "n", ",", "int", ")", "*", "i", "on_deck", "=", "[", "]", "ready", "=", "[", "]", "scan", "=", "[", "]", "to_do", "=", "list", "(", "range", "(", "n", ")", ")", "try", ":", "while", "True", ":", "print", "(", "\"Evaluating i=%d, n_scan = %d\"", "%", "(", "i", ",", "len", "(", "scan", ")", ")", ")", "if", "len", "(", "scan", ")", "==", "0", ":", "ready", "+=", "on_deck", "on_deck", "=", "[", "]", "umin", "=", "np", ".", "min", "(", "[", "d", "[", "jjj", "]", "for", "jjj", "in", "to_do", "]", ")", "print", "(", "\"umin = %f\"", "%", "umin", ")", "scan", "=", "[", "jjj", "for", "jjj", "in", "to_do", "if", "d", "[", "jjj", "]", "==", "umin", "]", "to_do", "=", "[", "jjj", "for", "jjj", "in", "to_do", "if", "d", "[", "jjj", "]", "!=", "umin", "]", "for", "j1", "in", "scan", ":", "if", "y", "[", "j1", "]", "==", "n", ":", "raise", "StopIteration", "(", ")", "j1", "=", "scan", "[", "0", "]", "iii", "=", "y", "[", "j1", "]", "print", "(", "\"Consider replacing i=%d, j=%d\"", "%", "(", "iii", ",", "j1", ")", ")", "scan", "=", "scan", "[", "1", ":", "]", "on_deck", "+=", "[", "j1", "]", "u1", "=", "c", "[", "iii", ",", "j1", "]", "-", "v", "[", "j1", "]", "-", "umin", "for", "j1", "in", "list", "(", "to_do", ")", ":", "h", "=", "c", "[", "iii", ",", "j1", "]", "-", "v", "[", "j1", "]", "-", "u1", "print", "(", "\"Consider j=%d as replacement, c[%d,%d]=%f,v[%d]=%f,h=%f, d[j]= %f\"", "%", "(", "j1", ",", "iii", ",", "j1", ",", "c", "[", "iii", ",", "j1", "]", ",", "j1", ",", "v", "[", "j1", "]", ",", "h", ",", "d", "[", "j1", "]", ")", ")", "if", "h", "<", "d", "[", "j1", "]", ":", "print", "(", "\"Add to chain\"", ")", "pred", "[", "j1", "]", "=", "iii", "if", "h", "==", "umin", ":", "if", "y", "[", "j1", "]", "==", "n", ":", "raise", "StopIteration", "(", ")", "print", "(", "\"Add to scan\"", ")", "scan", "+=", "[", "j1", "]", "to_do", ".", "remove", "(", "j1", ")", "d", "[", "j1", "]", "=", "h", "except", "StopIteration", ":", "# Augment", "print", "(", "\"Augmenting %d\"", "%", "j1", ")", "for", "k", "in", "ready", ":", "temp", "=", "v", "[", "k", "]", "v", "[", "k", "]", "=", "v", "[", "k", "]", "+", "d", "[", "k", "]", "-", "umin", "print", "(", "\"v[%d] %f -> %f\"", "%", "(", "k", ",", "temp", ",", "v", "[", "k", "]", ")", ")", "while", "True", ":", "iii", "=", "pred", "[", "j1", "]", "print", "(", "\"y[%d] %d -> %d\"", "%", "(", "j1", ",", "y", "[", "j1", "]", ",", "iii", ")", ")", "y", "[", "j1", "]", "=", "iii", "j1", ",", "x", "[", "iii", "]", "=", "x", "[", "iii", "]", ",", "j1", "if", "iii", "==", "i", ":", "break", "#", "# Re-establish slackness since we didn't pay attention to u", "#", "for", "i", "in", "range", "(", "n", ")", ":", "j", "=", "x", "[", "i", "]", "u", "[", "i", "]", "=", "c", "[", "i", ",", "j", "]", "-", "v", "[", "j", "]" ]
Perform the augmentation step to assign unassigned i and j n - the # of i and j, also the marker of unassigned x and y ii - the unassigned i jj - the ragged arrays of j for each i idx - the index of the first j for each i count - the number of j for each i x - the assignments of j for each i y - the assignments of i for each j u,v - the dual variables c - the costs
[ "Perform", "the", "augmentation", "step", "to", "assign", "unassigned", "i", "and", "j", "n", "-", "the", "#", "of", "i", "and", "j", "also", "the", "marker", "of", "unassigned", "x", "and", "y", "ii", "-", "the", "unassigned", "i", "jj", "-", "the", "ragged", "arrays", "of", "j", "for", "each", "i", "idx", "-", "the", "index", "of", "the", "first", "j", "for", "each", "i", "count", "-", "the", "number", "of", "j", "for", "each", "i", "x", "-", "the", "assignments", "of", "j", "for", "each", "i", "y", "-", "the", "assignments", "of", "i", "for", "each", "j", "u", "v", "-", "the", "dual", "variables", "c", "-", "the", "costs" ]
python
train
singnet/snet-cli
snet_cli/mpe_service_metadata.py
https://github.com/singnet/snet-cli/blob/1b5ac98cb9a64211c861ead9fcfe6208f2749032/snet_cli/mpe_service_metadata.py#L75-L83
def add_group(self, group_name, payment_address): """ Return new group_id in base64 """ if (self.is_group_name_exists(group_name)): raise Exception("the group \"%s\" is already present"%str(group_name)) group_id_base64 = base64.b64encode(secrets.token_bytes(32)) self.m["groups"] += [{"group_name" : group_name , "group_id" : group_id_base64.decode("ascii"), "payment_address" : payment_address}] return group_id_base64
[ "def", "add_group", "(", "self", ",", "group_name", ",", "payment_address", ")", ":", "if", "(", "self", ".", "is_group_name_exists", "(", "group_name", ")", ")", ":", "raise", "Exception", "(", "\"the group \\\"%s\\\" is already present\"", "%", "str", "(", "group_name", ")", ")", "group_id_base64", "=", "base64", ".", "b64encode", "(", "secrets", ".", "token_bytes", "(", "32", ")", ")", "self", ".", "m", "[", "\"groups\"", "]", "+=", "[", "{", "\"group_name\"", ":", "group_name", ",", "\"group_id\"", ":", "group_id_base64", ".", "decode", "(", "\"ascii\"", ")", ",", "\"payment_address\"", ":", "payment_address", "}", "]", "return", "group_id_base64" ]
Return new group_id in base64
[ "Return", "new", "group_id", "in", "base64" ]
python
train
GPflow/GPflow
gpflow/models/sgpmc.py
https://github.com/GPflow/GPflow/blob/549394f0b1b0696c7b521a065e49bdae6e7acf27/gpflow/models/sgpmc.py#L80-L86
def _build_likelihood(self): """ This function computes the optimal density for v, q*(v), up to a constant """ # get the (marginals of) q(f): exactly predicting! fmean, fvar = self._build_predict(self.X, full_cov=False) return tf.reduce_sum(self.likelihood.variational_expectations(fmean, fvar, self.Y))
[ "def", "_build_likelihood", "(", "self", ")", ":", "# get the (marginals of) q(f): exactly predicting!", "fmean", ",", "fvar", "=", "self", ".", "_build_predict", "(", "self", ".", "X", ",", "full_cov", "=", "False", ")", "return", "tf", ".", "reduce_sum", "(", "self", ".", "likelihood", ".", "variational_expectations", "(", "fmean", ",", "fvar", ",", "self", ".", "Y", ")", ")" ]
This function computes the optimal density for v, q*(v), up to a constant
[ "This", "function", "computes", "the", "optimal", "density", "for", "v", "q", "*", "(", "v", ")", "up", "to", "a", "constant" ]
python
train
klahnakoski/pyLibrary
mo_math/vendor/strangman/stats.py
https://github.com/klahnakoski/pyLibrary/blob/fa2dcbc48fda8d26999baef400e9a98149e0b982/mo_math/vendor/strangman/stats.py#L443-L460
def scoreatpercentile(inlist, percent): """ Returns the score at a given percentile relative to the distribution given by inlist. Usage: lscoreatpercentile(inlist,percent) """ if percent > 1: print("\nDividing percent>1 by 100 in lscoreatpercentile().\n") percent = percent / 100.0 targetcf = percent * len(inlist) h, lrl, binsize, extras = histogram(inlist) cumhist = cumsum(copy.deepcopy(h)) for i in range(len(cumhist)): if cumhist[i] >= targetcf: break score = binsize * ((targetcf - cumhist[i - 1]) / float(h[i])) + (lrl + binsize * i) return score
[ "def", "scoreatpercentile", "(", "inlist", ",", "percent", ")", ":", "if", "percent", ">", "1", ":", "print", "(", "\"\\nDividing percent>1 by 100 in lscoreatpercentile().\\n\"", ")", "percent", "=", "percent", "/", "100.0", "targetcf", "=", "percent", "*", "len", "(", "inlist", ")", "h", ",", "lrl", ",", "binsize", ",", "extras", "=", "histogram", "(", "inlist", ")", "cumhist", "=", "cumsum", "(", "copy", ".", "deepcopy", "(", "h", ")", ")", "for", "i", "in", "range", "(", "len", "(", "cumhist", ")", ")", ":", "if", "cumhist", "[", "i", "]", ">=", "targetcf", ":", "break", "score", "=", "binsize", "*", "(", "(", "targetcf", "-", "cumhist", "[", "i", "-", "1", "]", ")", "/", "float", "(", "h", "[", "i", "]", ")", ")", "+", "(", "lrl", "+", "binsize", "*", "i", ")", "return", "score" ]
Returns the score at a given percentile relative to the distribution given by inlist. Usage: lscoreatpercentile(inlist,percent)
[ "Returns", "the", "score", "at", "a", "given", "percentile", "relative", "to", "the", "distribution", "given", "by", "inlist", "." ]
python
train
titusjan/argos
argos/repo/repotreemodel.py
https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/repo/repotreemodel.py#L53-L113
def itemData(self, treeItem, column, role=Qt.DisplayRole): """ Returns the data stored under the given role for the item. O """ if role == Qt.DisplayRole: if column == self.COL_NODE_NAME: return treeItem.nodeName elif column == self.COL_NODE_PATH: return treeItem.nodePath elif column == self.COL_SHAPE: if treeItem.isSliceable: return " x ".join(str(elem) for elem in treeItem.arrayShape) else: return "" elif column == self.COL_IS_OPEN: # Only show for RTIs that actually open resources. # TODO: this must be clearer. Use CanFetchChildren? Set is Open to None by default? if treeItem.hasChildren(): return str(treeItem.isOpen) else: return "" elif column == self.COL_ELEM_TYPE: return treeItem.elementTypeName elif column == self.COL_FILE_NAME: return treeItem.fileName if hasattr(treeItem, 'fileName') else '' elif column == self.COL_UNIT: return treeItem.unit elif column == self.COL_MISSING_DATA: return to_string(treeItem.missingDataValue, noneFormat='') # empty str for Nones elif column == self.COL_RTI_TYPE: return type_name(treeItem) elif column == self.COL_EXCEPTION: return str(treeItem.exception) if treeItem.exception else '' else: raise ValueError("Invalid column: {}".format(column)) elif role == Qt.ToolTipRole: if treeItem.exception: return str(treeItem.exception) if column == self.COL_NODE_NAME: return treeItem.nodePath # Also path when hovering over the name elif column == self.COL_NODE_PATH: return treeItem.nodePath elif column == self.COL_SHAPE: if treeItem.isSliceable: return " x ".join(str(elem) for elem in treeItem.arrayShape) else: return "" elif column == self.COL_UNIT: return treeItem.unit elif column == self.COL_MISSING_DATA: return to_string(treeItem.missingDataValue, noneFormat='') # empty str for Nones elif column == self.COL_RTI_TYPE: return type_name(treeItem) elif column == self.COL_ELEM_TYPE: return treeItem.elementTypeName elif column == self.COL_FILE_NAME: return treeItem.fileName if hasattr(treeItem, 'fileName') else '' else: return None else: return super(RepoTreeModel, self).itemData(treeItem, column, role=role)
[ "def", "itemData", "(", "self", ",", "treeItem", ",", "column", ",", "role", "=", "Qt", ".", "DisplayRole", ")", ":", "if", "role", "==", "Qt", ".", "DisplayRole", ":", "if", "column", "==", "self", ".", "COL_NODE_NAME", ":", "return", "treeItem", ".", "nodeName", "elif", "column", "==", "self", ".", "COL_NODE_PATH", ":", "return", "treeItem", ".", "nodePath", "elif", "column", "==", "self", ".", "COL_SHAPE", ":", "if", "treeItem", ".", "isSliceable", ":", "return", "\" x \"", ".", "join", "(", "str", "(", "elem", ")", "for", "elem", "in", "treeItem", ".", "arrayShape", ")", "else", ":", "return", "\"\"", "elif", "column", "==", "self", ".", "COL_IS_OPEN", ":", "# Only show for RTIs that actually open resources.", "# TODO: this must be clearer. Use CanFetchChildren? Set is Open to None by default?", "if", "treeItem", ".", "hasChildren", "(", ")", ":", "return", "str", "(", "treeItem", ".", "isOpen", ")", "else", ":", "return", "\"\"", "elif", "column", "==", "self", ".", "COL_ELEM_TYPE", ":", "return", "treeItem", ".", "elementTypeName", "elif", "column", "==", "self", ".", "COL_FILE_NAME", ":", "return", "treeItem", ".", "fileName", "if", "hasattr", "(", "treeItem", ",", "'fileName'", ")", "else", "''", "elif", "column", "==", "self", ".", "COL_UNIT", ":", "return", "treeItem", ".", "unit", "elif", "column", "==", "self", ".", "COL_MISSING_DATA", ":", "return", "to_string", "(", "treeItem", ".", "missingDataValue", ",", "noneFormat", "=", "''", ")", "# empty str for Nones", "elif", "column", "==", "self", ".", "COL_RTI_TYPE", ":", "return", "type_name", "(", "treeItem", ")", "elif", "column", "==", "self", ".", "COL_EXCEPTION", ":", "return", "str", "(", "treeItem", ".", "exception", ")", "if", "treeItem", ".", "exception", "else", "''", "else", ":", "raise", "ValueError", "(", "\"Invalid column: {}\"", ".", "format", "(", "column", ")", ")", "elif", "role", "==", "Qt", ".", "ToolTipRole", ":", "if", "treeItem", ".", "exception", ":", "return", "str", "(", "treeItem", ".", "exception", ")", "if", "column", "==", "self", ".", "COL_NODE_NAME", ":", "return", "treeItem", ".", "nodePath", "# Also path when hovering over the name", "elif", "column", "==", "self", ".", "COL_NODE_PATH", ":", "return", "treeItem", ".", "nodePath", "elif", "column", "==", "self", ".", "COL_SHAPE", ":", "if", "treeItem", ".", "isSliceable", ":", "return", "\" x \"", ".", "join", "(", "str", "(", "elem", ")", "for", "elem", "in", "treeItem", ".", "arrayShape", ")", "else", ":", "return", "\"\"", "elif", "column", "==", "self", ".", "COL_UNIT", ":", "return", "treeItem", ".", "unit", "elif", "column", "==", "self", ".", "COL_MISSING_DATA", ":", "return", "to_string", "(", "treeItem", ".", "missingDataValue", ",", "noneFormat", "=", "''", ")", "# empty str for Nones", "elif", "column", "==", "self", ".", "COL_RTI_TYPE", ":", "return", "type_name", "(", "treeItem", ")", "elif", "column", "==", "self", ".", "COL_ELEM_TYPE", ":", "return", "treeItem", ".", "elementTypeName", "elif", "column", "==", "self", ".", "COL_FILE_NAME", ":", "return", "treeItem", ".", "fileName", "if", "hasattr", "(", "treeItem", ",", "'fileName'", ")", "else", "''", "else", ":", "return", "None", "else", ":", "return", "super", "(", "RepoTreeModel", ",", "self", ")", ".", "itemData", "(", "treeItem", ",", "column", ",", "role", "=", "role", ")" ]
Returns the data stored under the given role for the item. O
[ "Returns", "the", "data", "stored", "under", "the", "given", "role", "for", "the", "item", ".", "O" ]
python
train
SCIP-Interfaces/PySCIPOpt
examples/finished/prodmix_soco.py
https://github.com/SCIP-Interfaces/PySCIPOpt/blob/9c960b40d94a48b0304d73dbe28b467b9c065abe/examples/finished/prodmix_soco.py#L8-L35
def prodmix(I,K,a,p,epsilon,LB): """prodmix: robust production planning using soco Parameters: I - set of materials K - set of components a[i][k] - coef. matrix p[i] - price of material i LB[k] - amount needed for k Returns a model, ready to be solved. """ model = Model("robust product mix") x,rhs = {},{} for i in I: x[i] = model.addVar(vtype="C", name="x(%s)"%i) for k in K: rhs[k] = model.addVar(vtype="C", name="rhs(%s)"%k) model.addCons(quicksum(x[i] for i in I) == 1) for k in K: model.addCons(rhs[k] == -LB[k]+ quicksum(a[i,k]*x[i] for i in I) ) model.addCons(quicksum(epsilon*epsilon*x[i]*x[i] for i in I) <= rhs[k]*rhs[k]) model.setObjective(quicksum(p[i]*x[i] for i in I), "minimize") model.data = x,rhs return model
[ "def", "prodmix", "(", "I", ",", "K", ",", "a", ",", "p", ",", "epsilon", ",", "LB", ")", ":", "model", "=", "Model", "(", "\"robust product mix\"", ")", "x", ",", "rhs", "=", "{", "}", ",", "{", "}", "for", "i", "in", "I", ":", "x", "[", "i", "]", "=", "model", ".", "addVar", "(", "vtype", "=", "\"C\"", ",", "name", "=", "\"x(%s)\"", "%", "i", ")", "for", "k", "in", "K", ":", "rhs", "[", "k", "]", "=", "model", ".", "addVar", "(", "vtype", "=", "\"C\"", ",", "name", "=", "\"rhs(%s)\"", "%", "k", ")", "model", ".", "addCons", "(", "quicksum", "(", "x", "[", "i", "]", "for", "i", "in", "I", ")", "==", "1", ")", "for", "k", "in", "K", ":", "model", ".", "addCons", "(", "rhs", "[", "k", "]", "==", "-", "LB", "[", "k", "]", "+", "quicksum", "(", "a", "[", "i", ",", "k", "]", "*", "x", "[", "i", "]", "for", "i", "in", "I", ")", ")", "model", ".", "addCons", "(", "quicksum", "(", "epsilon", "*", "epsilon", "*", "x", "[", "i", "]", "*", "x", "[", "i", "]", "for", "i", "in", "I", ")", "<=", "rhs", "[", "k", "]", "*", "rhs", "[", "k", "]", ")", "model", ".", "setObjective", "(", "quicksum", "(", "p", "[", "i", "]", "*", "x", "[", "i", "]", "for", "i", "in", "I", ")", ",", "\"minimize\"", ")", "model", ".", "data", "=", "x", ",", "rhs", "return", "model" ]
prodmix: robust production planning using soco Parameters: I - set of materials K - set of components a[i][k] - coef. matrix p[i] - price of material i LB[k] - amount needed for k Returns a model, ready to be solved.
[ "prodmix", ":", "robust", "production", "planning", "using", "soco", "Parameters", ":", "I", "-", "set", "of", "materials", "K", "-", "set", "of", "components", "a", "[", "i", "]", "[", "k", "]", "-", "coef", ".", "matrix", "p", "[", "i", "]", "-", "price", "of", "material", "i", "LB", "[", "k", "]", "-", "amount", "needed", "for", "k", "Returns", "a", "model", "ready", "to", "be", "solved", "." ]
python
train
Fantomas42/django-blog-zinnia
zinnia/views/channels.py
https://github.com/Fantomas42/django-blog-zinnia/blob/b4949304b104a8e1a7a7a0773cbfd024313c3a15/zinnia/views/channels.py#L25-L31
def get_context_data(self, **kwargs): """ Add query in context. """ context = super(BaseEntryChannel, self).get_context_data(**kwargs) context.update({'query': self.query}) return context
[ "def", "get_context_data", "(", "self", ",", "*", "*", "kwargs", ")", ":", "context", "=", "super", "(", "BaseEntryChannel", ",", "self", ")", ".", "get_context_data", "(", "*", "*", "kwargs", ")", "context", ".", "update", "(", "{", "'query'", ":", "self", ".", "query", "}", ")", "return", "context" ]
Add query in context.
[ "Add", "query", "in", "context", "." ]
python
train
mlperf/training
reinforcement/tensorflow/minigo/dual_net.py
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/reinforcement/tensorflow/minigo/dual_net.py#L182-L188
def initialize_weights(self, save_file): """Initialize the weights from the given save_file. Assumes that the graph has been constructed, and the save_file contains weights that match the graph. Used to set the weights to a different version of the player without redifining the entire graph.""" tf.train.Saver().restore(self.sess, save_file)
[ "def", "initialize_weights", "(", "self", ",", "save_file", ")", ":", "tf", ".", "train", ".", "Saver", "(", ")", ".", "restore", "(", "self", ".", "sess", ",", "save_file", ")" ]
Initialize the weights from the given save_file. Assumes that the graph has been constructed, and the save_file contains weights that match the graph. Used to set the weights to a different version of the player without redifining the entire graph.
[ "Initialize", "the", "weights", "from", "the", "given", "save_file", ".", "Assumes", "that", "the", "graph", "has", "been", "constructed", "and", "the", "save_file", "contains", "weights", "that", "match", "the", "graph", ".", "Used", "to", "set", "the", "weights", "to", "a", "different", "version", "of", "the", "player", "without", "redifining", "the", "entire", "graph", "." ]
python
train
CellProfiler/centrosome
centrosome/filter.py
https://github.com/CellProfiler/centrosome/blob/7bd9350a2d4ae1b215b81eabcecfe560bbb1f32a/centrosome/filter.py#L1387-L1425
def dot_n(x, y): '''given two tensors N x I x K and N x K x J return N dot products If either x or y is 2-dimensional, broadcast it over all N. Dot products are size N x I x J. Example: x = np.array([[[1,2], [3,4], [5,6]],[[7,8], [9,10],[11,12]]]) y = np.array([[[1,2,3], [4,5,6]],[[7,8,9],[10,11,12]]]) print dot_n(x,y) array([[[ 9, 12, 15], [ 19, 26, 33], [ 29, 40, 51]], [[129, 144, 159], [163, 182, 201], [197, 220, 243]]]) ''' if x.ndim == 2: if y.ndim == 2: return np.dot(x, y) x3 = False y3 = True nlen = y.shape[0] elif y.ndim == 2: nlen = x.shape[0] x3 = True y3 = False else: assert x.shape[0] == y.shape[0] nlen = x.shape[0] x3 = True y3 = True assert x.shape[1+x3] == y.shape[0+y3] n, i, j, k = np.mgrid[0:nlen, 0:x.shape[0+x3], 0:y.shape[1+y3], 0:y.shape[0+y3]] return np.sum((x[n, i, k] if x3 else x[i,k]) * (y[n, k, j] if y3 else y[k,j]), 3)
[ "def", "dot_n", "(", "x", ",", "y", ")", ":", "if", "x", ".", "ndim", "==", "2", ":", "if", "y", ".", "ndim", "==", "2", ":", "return", "np", ".", "dot", "(", "x", ",", "y", ")", "x3", "=", "False", "y3", "=", "True", "nlen", "=", "y", ".", "shape", "[", "0", "]", "elif", "y", ".", "ndim", "==", "2", ":", "nlen", "=", "x", ".", "shape", "[", "0", "]", "x3", "=", "True", "y3", "=", "False", "else", ":", "assert", "x", ".", "shape", "[", "0", "]", "==", "y", ".", "shape", "[", "0", "]", "nlen", "=", "x", ".", "shape", "[", "0", "]", "x3", "=", "True", "y3", "=", "True", "assert", "x", ".", "shape", "[", "1", "+", "x3", "]", "==", "y", ".", "shape", "[", "0", "+", "y3", "]", "n", ",", "i", ",", "j", ",", "k", "=", "np", ".", "mgrid", "[", "0", ":", "nlen", ",", "0", ":", "x", ".", "shape", "[", "0", "+", "x3", "]", ",", "0", ":", "y", ".", "shape", "[", "1", "+", "y3", "]", ",", "0", ":", "y", ".", "shape", "[", "0", "+", "y3", "]", "]", "return", "np", ".", "sum", "(", "(", "x", "[", "n", ",", "i", ",", "k", "]", "if", "x3", "else", "x", "[", "i", ",", "k", "]", ")", "*", "(", "y", "[", "n", ",", "k", ",", "j", "]", "if", "y3", "else", "y", "[", "k", ",", "j", "]", ")", ",", "3", ")" ]
given two tensors N x I x K and N x K x J return N dot products If either x or y is 2-dimensional, broadcast it over all N. Dot products are size N x I x J. Example: x = np.array([[[1,2], [3,4], [5,6]],[[7,8], [9,10],[11,12]]]) y = np.array([[[1,2,3], [4,5,6]],[[7,8,9],[10,11,12]]]) print dot_n(x,y) array([[[ 9, 12, 15], [ 19, 26, 33], [ 29, 40, 51]], [[129, 144, 159], [163, 182, 201], [197, 220, 243]]])
[ "given", "two", "tensors", "N", "x", "I", "x", "K", "and", "N", "x", "K", "x", "J", "return", "N", "dot", "products" ]
python
train
project-rig/rig
rig/machine_control/scp_connection.py
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/scp_connection.py#L372-L419
def write(self, buffer_size, window_size, x, y, p, address, data): """Write a bytestring to an address in memory. ..note:: This method is included here to maintain API compatibility with an `alternative implementation of SCP <https://github.com/project-rig/rig-scp>`_. Parameters ---------- buffer_size : int Number of bytes held in an SCP buffer by SARK, determines how many bytes will be expected in a socket and how many bytes will be written in each packet. window_size : int x : int y : int p : int address : int The address at which to start writing the data. Addresses are given within the address space of a SpiNNaker core. See the SpiNNaker datasheet for more information. data : :py:class:`bytes` Data to write into memory. Writes are automatically broken into a sequence of SCP write commands. """ # While there is still data perform a write: get the block to write # this time around, determine the data type, perform the write and # increment the address def packets(address, data): end = len(data) pos = 0 while pos < end: block = data[pos:pos + buffer_size] block_size = len(block) dtype = consts.address_length_dtype[(address % 4, block_size % 4)] yield scpcall(x, y, p, consts.SCPCommands.write, address, block_size, dtype, block) address += block_size pos += block_size # Run the event loop and then return the retrieved data self.send_scp_burst(buffer_size, window_size, list(packets(address, data)))
[ "def", "write", "(", "self", ",", "buffer_size", ",", "window_size", ",", "x", ",", "y", ",", "p", ",", "address", ",", "data", ")", ":", "# While there is still data perform a write: get the block to write", "# this time around, determine the data type, perform the write and", "# increment the address", "def", "packets", "(", "address", ",", "data", ")", ":", "end", "=", "len", "(", "data", ")", "pos", "=", "0", "while", "pos", "<", "end", ":", "block", "=", "data", "[", "pos", ":", "pos", "+", "buffer_size", "]", "block_size", "=", "len", "(", "block", ")", "dtype", "=", "consts", ".", "address_length_dtype", "[", "(", "address", "%", "4", ",", "block_size", "%", "4", ")", "]", "yield", "scpcall", "(", "x", ",", "y", ",", "p", ",", "consts", ".", "SCPCommands", ".", "write", ",", "address", ",", "block_size", ",", "dtype", ",", "block", ")", "address", "+=", "block_size", "pos", "+=", "block_size", "# Run the event loop and then return the retrieved data", "self", ".", "send_scp_burst", "(", "buffer_size", ",", "window_size", ",", "list", "(", "packets", "(", "address", ",", "data", ")", ")", ")" ]
Write a bytestring to an address in memory. ..note:: This method is included here to maintain API compatibility with an `alternative implementation of SCP <https://github.com/project-rig/rig-scp>`_. Parameters ---------- buffer_size : int Number of bytes held in an SCP buffer by SARK, determines how many bytes will be expected in a socket and how many bytes will be written in each packet. window_size : int x : int y : int p : int address : int The address at which to start writing the data. Addresses are given within the address space of a SpiNNaker core. See the SpiNNaker datasheet for more information. data : :py:class:`bytes` Data to write into memory. Writes are automatically broken into a sequence of SCP write commands.
[ "Write", "a", "bytestring", "to", "an", "address", "in", "memory", "." ]
python
train
ynop/audiomate
audiomate/corpus/corpus.py
https://github.com/ynop/audiomate/blob/61727920b23a708293c3d526fa3000d4de9c6c21/audiomate/corpus/corpus.py#L168-L196
def import_tracks(self, import_tracks): """ Add the given tracks/track to the corpus. If any of the given track-ids already exists, a suffix is appended so it is unique. Args: import_tracks (list): Either a list of or a single :py:class:`audiomate.tracks.Track`. Returns: dict: A dictionary containing track-idx mappings (old-track-idx/track-instance). If a track is imported, whose idx already exists this mapping can be used to check the new id. """ if isinstance(import_tracks, tracks.Track): import_tracks = [import_tracks] idx_mapping = {} for track in import_tracks: idx_mapping[track.idx] = track # Add index to idx if already existing if track.idx in self._tracks.keys(): track.idx = naming.index_name_if_in_list(track.idx, self._tracks.keys()) self._tracks[track.idx] = track return idx_mapping
[ "def", "import_tracks", "(", "self", ",", "import_tracks", ")", ":", "if", "isinstance", "(", "import_tracks", ",", "tracks", ".", "Track", ")", ":", "import_tracks", "=", "[", "import_tracks", "]", "idx_mapping", "=", "{", "}", "for", "track", "in", "import_tracks", ":", "idx_mapping", "[", "track", ".", "idx", "]", "=", "track", "# Add index to idx if already existing", "if", "track", ".", "idx", "in", "self", ".", "_tracks", ".", "keys", "(", ")", ":", "track", ".", "idx", "=", "naming", ".", "index_name_if_in_list", "(", "track", ".", "idx", ",", "self", ".", "_tracks", ".", "keys", "(", ")", ")", "self", ".", "_tracks", "[", "track", ".", "idx", "]", "=", "track", "return", "idx_mapping" ]
Add the given tracks/track to the corpus. If any of the given track-ids already exists, a suffix is appended so it is unique. Args: import_tracks (list): Either a list of or a single :py:class:`audiomate.tracks.Track`. Returns: dict: A dictionary containing track-idx mappings (old-track-idx/track-instance). If a track is imported, whose idx already exists this mapping can be used to check the new id.
[ "Add", "the", "given", "tracks", "/", "track", "to", "the", "corpus", ".", "If", "any", "of", "the", "given", "track", "-", "ids", "already", "exists", "a", "suffix", "is", "appended", "so", "it", "is", "unique", "." ]
python
train
Opentrons/opentrons
api/src/opentrons/legacy_api/robot/robot.py
https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/api/src/opentrons/legacy_api/robot/robot.py#L162-L174
def clear_tips(self): """ If reset is called with a tip attached, the tip must be removed before the poses and _instruments members are cleared. If the tip is not removed, the effective length of the pipette remains increased by the length of the tip, and subsequent `_add_tip` calls will increase the length in addition to this. This should be fixed by changing pose tracking to that it tracks the tip as a separate node rather than adding and subtracting the tip length to the pipette length. """ for instrument in self._instruments.values(): if instrument.tip_attached: instrument._remove_tip(instrument._tip_length)
[ "def", "clear_tips", "(", "self", ")", ":", "for", "instrument", "in", "self", ".", "_instruments", ".", "values", "(", ")", ":", "if", "instrument", ".", "tip_attached", ":", "instrument", ".", "_remove_tip", "(", "instrument", ".", "_tip_length", ")" ]
If reset is called with a tip attached, the tip must be removed before the poses and _instruments members are cleared. If the tip is not removed, the effective length of the pipette remains increased by the length of the tip, and subsequent `_add_tip` calls will increase the length in addition to this. This should be fixed by changing pose tracking to that it tracks the tip as a separate node rather than adding and subtracting the tip length to the pipette length.
[ "If", "reset", "is", "called", "with", "a", "tip", "attached", "the", "tip", "must", "be", "removed", "before", "the", "poses", "and", "_instruments", "members", "are", "cleared", ".", "If", "the", "tip", "is", "not", "removed", "the", "effective", "length", "of", "the", "pipette", "remains", "increased", "by", "the", "length", "of", "the", "tip", "and", "subsequent", "_add_tip", "calls", "will", "increase", "the", "length", "in", "addition", "to", "this", ".", "This", "should", "be", "fixed", "by", "changing", "pose", "tracking", "to", "that", "it", "tracks", "the", "tip", "as", "a", "separate", "node", "rather", "than", "adding", "and", "subtracting", "the", "tip", "length", "to", "the", "pipette", "length", "." ]
python
train
pywbem/pywbem
pywbem/tupleparse.py
https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/pywbem/tupleparse.py#L1772-L1781
def parse_multirsp(self, tup_tree): # pylint: disable=unused-argument """ This function not implemented. Because this request is generally not implemented. It will probably never be implemented. """ raise CIMXMLParseError( _format("Internal Error: Parsing support for element {0!A} is not " "implemented", name(tup_tree)), conn_id=self.conn_id)
[ "def", "parse_multirsp", "(", "self", ",", "tup_tree", ")", ":", "# pylint: disable=unused-argument", "raise", "CIMXMLParseError", "(", "_format", "(", "\"Internal Error: Parsing support for element {0!A} is not \"", "\"implemented\"", ",", "name", "(", "tup_tree", ")", ")", ",", "conn_id", "=", "self", ".", "conn_id", ")" ]
This function not implemented. Because this request is generally not implemented. It will probably never be implemented.
[ "This", "function", "not", "implemented", ".", "Because", "this", "request", "is", "generally", "not", "implemented", ".", "It", "will", "probably", "never", "be", "implemented", "." ]
python
train
QualiSystems/vCenterShell
package/cloudshell/cp/vcenter/common/vcenter/vmomi_service.py
https://github.com/QualiSystems/vCenterShell/blob/e2e24cd938a92a68f4a8e6a860810d3ef72aae6d/package/cloudshell/cp/vcenter/common/vcenter/vmomi_service.py#L245-L304
def get_folder(self, si, path, root=None): """ Finds folder in the vCenter or returns "None" :param si: pyvmomi 'ServiceInstance' :param path: the path to find the object ('dc' or 'dc/folder' or 'dc/folder/folder/etc...') """ search_index = si.content.searchIndex sub_folder = root if root else si.content.rootFolder if not path: return sub_folder paths = [p for p in path.split("/") if p] child = None try: new_root = search_index.FindChild(sub_folder, paths[0]) if new_root: child = self.get_folder(si, '/'.join(paths[1:]), new_root) except: child = None if child is None and hasattr(sub_folder, self.ChildEntity): new_root = search_index.FindChild(sub_folder, paths[0]) if new_root: child = self.get_folder(si, '/'.join(paths[1:]), new_root) if child is None and hasattr(sub_folder, self.VM): new_root = search_index.FindChild(sub_folder.vmFolder, paths[0]) if new_root: child = self.get_folder(si, '/'.join(paths[1:]), new_root) if child is None and hasattr(sub_folder, self.Datastore): new_root = search_index.FindChild(sub_folder.datastoreFolder, paths[0]) if new_root: child = self.get_folder(si, '/'.join(paths[1:]), new_root) if child is None and hasattr(sub_folder, self.Network): new_root = search_index.FindChild(sub_folder.networkFolder, paths[0]) if new_root: child = self.get_folder(si, '/'.join(paths[1:]), new_root) if child is None and hasattr(sub_folder, self.Host): new_root = search_index.FindChild(sub_folder.hostFolder, paths[0]) if new_root: child = self.get_folder(si, '/'.join(paths[1:]), new_root) if child is None and hasattr(sub_folder, self.Datacenter): new_root = search_index.FindChild(sub_folder.datacenterFolder, paths[0]) if new_root: child = self.get_folder(si, '/'.join(paths[1:]), new_root) if child is None and hasattr(sub_folder, 'resourcePool'): new_root = search_index.FindChild(sub_folder.resourcePool, paths[0]) if new_root: child = self.get_folder(si, '/'.join(paths[1:]), new_root) return child
[ "def", "get_folder", "(", "self", ",", "si", ",", "path", ",", "root", "=", "None", ")", ":", "search_index", "=", "si", ".", "content", ".", "searchIndex", "sub_folder", "=", "root", "if", "root", "else", "si", ".", "content", ".", "rootFolder", "if", "not", "path", ":", "return", "sub_folder", "paths", "=", "[", "p", "for", "p", "in", "path", ".", "split", "(", "\"/\"", ")", "if", "p", "]", "child", "=", "None", "try", ":", "new_root", "=", "search_index", ".", "FindChild", "(", "sub_folder", ",", "paths", "[", "0", "]", ")", "if", "new_root", ":", "child", "=", "self", ".", "get_folder", "(", "si", ",", "'/'", ".", "join", "(", "paths", "[", "1", ":", "]", ")", ",", "new_root", ")", "except", ":", "child", "=", "None", "if", "child", "is", "None", "and", "hasattr", "(", "sub_folder", ",", "self", ".", "ChildEntity", ")", ":", "new_root", "=", "search_index", ".", "FindChild", "(", "sub_folder", ",", "paths", "[", "0", "]", ")", "if", "new_root", ":", "child", "=", "self", ".", "get_folder", "(", "si", ",", "'/'", ".", "join", "(", "paths", "[", "1", ":", "]", ")", ",", "new_root", ")", "if", "child", "is", "None", "and", "hasattr", "(", "sub_folder", ",", "self", ".", "VM", ")", ":", "new_root", "=", "search_index", ".", "FindChild", "(", "sub_folder", ".", "vmFolder", ",", "paths", "[", "0", "]", ")", "if", "new_root", ":", "child", "=", "self", ".", "get_folder", "(", "si", ",", "'/'", ".", "join", "(", "paths", "[", "1", ":", "]", ")", ",", "new_root", ")", "if", "child", "is", "None", "and", "hasattr", "(", "sub_folder", ",", "self", ".", "Datastore", ")", ":", "new_root", "=", "search_index", ".", "FindChild", "(", "sub_folder", ".", "datastoreFolder", ",", "paths", "[", "0", "]", ")", "if", "new_root", ":", "child", "=", "self", ".", "get_folder", "(", "si", ",", "'/'", ".", "join", "(", "paths", "[", "1", ":", "]", ")", ",", "new_root", ")", "if", "child", "is", "None", "and", "hasattr", "(", "sub_folder", ",", "self", ".", "Network", ")", ":", "new_root", "=", "search_index", ".", "FindChild", "(", "sub_folder", ".", "networkFolder", ",", "paths", "[", "0", "]", ")", "if", "new_root", ":", "child", "=", "self", ".", "get_folder", "(", "si", ",", "'/'", ".", "join", "(", "paths", "[", "1", ":", "]", ")", ",", "new_root", ")", "if", "child", "is", "None", "and", "hasattr", "(", "sub_folder", ",", "self", ".", "Host", ")", ":", "new_root", "=", "search_index", ".", "FindChild", "(", "sub_folder", ".", "hostFolder", ",", "paths", "[", "0", "]", ")", "if", "new_root", ":", "child", "=", "self", ".", "get_folder", "(", "si", ",", "'/'", ".", "join", "(", "paths", "[", "1", ":", "]", ")", ",", "new_root", ")", "if", "child", "is", "None", "and", "hasattr", "(", "sub_folder", ",", "self", ".", "Datacenter", ")", ":", "new_root", "=", "search_index", ".", "FindChild", "(", "sub_folder", ".", "datacenterFolder", ",", "paths", "[", "0", "]", ")", "if", "new_root", ":", "child", "=", "self", ".", "get_folder", "(", "si", ",", "'/'", ".", "join", "(", "paths", "[", "1", ":", "]", ")", ",", "new_root", ")", "if", "child", "is", "None", "and", "hasattr", "(", "sub_folder", ",", "'resourcePool'", ")", ":", "new_root", "=", "search_index", ".", "FindChild", "(", "sub_folder", ".", "resourcePool", ",", "paths", "[", "0", "]", ")", "if", "new_root", ":", "child", "=", "self", ".", "get_folder", "(", "si", ",", "'/'", ".", "join", "(", "paths", "[", "1", ":", "]", ")", ",", "new_root", ")", "return", "child" ]
Finds folder in the vCenter or returns "None" :param si: pyvmomi 'ServiceInstance' :param path: the path to find the object ('dc' or 'dc/folder' or 'dc/folder/folder/etc...')
[ "Finds", "folder", "in", "the", "vCenter", "or", "returns", "None" ]
python
train
rsgalloway/grit
grit/repo/version.py
https://github.com/rsgalloway/grit/blob/e6434ad8a1f4ac5d0903ebad630c81f8a5164d78/grit/repo/version.py#L224-L231
def versions(self, rev=None, index=None): """:return: List of Versions for this Item""" raise NotImplementedError _revisions = [line.split()[0] for line in self.log.split('\n') if line] _versions = [Version(self.obj.repo.commit(r)) for r in _revisions if rev is None or r.startswith(rev)] if index is not None and len(_versions) > index: _versions = _versions[index] return _versions
[ "def", "versions", "(", "self", ",", "rev", "=", "None", ",", "index", "=", "None", ")", ":", "raise", "NotImplementedError", "_revisions", "=", "[", "line", ".", "split", "(", ")", "[", "0", "]", "for", "line", "in", "self", ".", "log", ".", "split", "(", "'\\n'", ")", "if", "line", "]", "_versions", "=", "[", "Version", "(", "self", ".", "obj", ".", "repo", ".", "commit", "(", "r", ")", ")", "for", "r", "in", "_revisions", "if", "rev", "is", "None", "or", "r", ".", "startswith", "(", "rev", ")", "]", "if", "index", "is", "not", "None", "and", "len", "(", "_versions", ")", ">", "index", ":", "_versions", "=", "_versions", "[", "index", "]", "return", "_versions" ]
:return: List of Versions for this Item
[ ":", "return", ":", "List", "of", "Versions", "for", "this", "Item" ]
python
train
saltstack/salt
salt/modules/napalm_mod.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/napalm_mod.py#L1533-L1571
def config_merge_tree(source='running', merge_config=None, merge_path=None, saltenv='base'): ''' .. versionadded:: 2019.2.0 Return the merge tree of the ``initial_config`` with the ``merge_config``, as a Python dictionary. source: ``running`` The configuration type to retrieve from the network device. Default: ``running``. Available options: ``running``, ``startup``, ``candidate``. merge_config The config to be merged into the initial config, sent as text. This argument is ignored when ``merge_path`` is set. merge_path Absolute or remote path from where to load the merge configuration text. This argument allows any URI supported by :py:func:`cp.get_url <salt.modules.cp.get_url>`), e.g., ``salt://``, ``https://``, ``s3://``, ``ftp:/``, etc. saltenv: ``base`` Salt fileserver environment from which to retrieve the file. Ignored if ``merge_path`` is not a ``salt://`` URL. CLI Example: .. code-block:: bash salt '*' napalm.config_merge_tree merge_path=salt://path/to/merge.cfg ''' config_txt = __salt__['net.config'](source=source)['out'][source] return __salt__['iosconfig.merge_tree'](initial_config=config_txt, merge_config=merge_config, merge_path=merge_path, saltenv=saltenv)
[ "def", "config_merge_tree", "(", "source", "=", "'running'", ",", "merge_config", "=", "None", ",", "merge_path", "=", "None", ",", "saltenv", "=", "'base'", ")", ":", "config_txt", "=", "__salt__", "[", "'net.config'", "]", "(", "source", "=", "source", ")", "[", "'out'", "]", "[", "source", "]", "return", "__salt__", "[", "'iosconfig.merge_tree'", "]", "(", "initial_config", "=", "config_txt", ",", "merge_config", "=", "merge_config", ",", "merge_path", "=", "merge_path", ",", "saltenv", "=", "saltenv", ")" ]
.. versionadded:: 2019.2.0 Return the merge tree of the ``initial_config`` with the ``merge_config``, as a Python dictionary. source: ``running`` The configuration type to retrieve from the network device. Default: ``running``. Available options: ``running``, ``startup``, ``candidate``. merge_config The config to be merged into the initial config, sent as text. This argument is ignored when ``merge_path`` is set. merge_path Absolute or remote path from where to load the merge configuration text. This argument allows any URI supported by :py:func:`cp.get_url <salt.modules.cp.get_url>`), e.g., ``salt://``, ``https://``, ``s3://``, ``ftp:/``, etc. saltenv: ``base`` Salt fileserver environment from which to retrieve the file. Ignored if ``merge_path`` is not a ``salt://`` URL. CLI Example: .. code-block:: bash salt '*' napalm.config_merge_tree merge_path=salt://path/to/merge.cfg
[ "..", "versionadded", "::", "2019", ".", "2", ".", "0" ]
python
train
uw-it-aca/uw-restclients
restclients/trumba/calendar.py
https://github.com/uw-it-aca/uw-restclients/blob/e12dcd32bf5296b6ebdf71798031594afb7852cb/restclients/trumba/calendar.py#L249-L265
def _process_get_perm_resp(url, post_response, campus, calendarid): """ :return: a list of trumba.Permission objects sorted by descending level and ascending uwnetid None if error, [] if not exists If the response is successful, process the response data and load into the return objects otherwise raise DataFailureException """ request_id = "%s %s CalendarID:%s" % (campus, url, calendarid) data = _load_json(request_id, post_response) permission_list = [] if data['d']['Users'] is not None and len(data['d']['Users']) > 0: _load_permissions(campus, calendarid, data['d']['Users'], permission_list) return sorted(permission_list)
[ "def", "_process_get_perm_resp", "(", "url", ",", "post_response", ",", "campus", ",", "calendarid", ")", ":", "request_id", "=", "\"%s %s CalendarID:%s\"", "%", "(", "campus", ",", "url", ",", "calendarid", ")", "data", "=", "_load_json", "(", "request_id", ",", "post_response", ")", "permission_list", "=", "[", "]", "if", "data", "[", "'d'", "]", "[", "'Users'", "]", "is", "not", "None", "and", "len", "(", "data", "[", "'d'", "]", "[", "'Users'", "]", ")", ">", "0", ":", "_load_permissions", "(", "campus", ",", "calendarid", ",", "data", "[", "'d'", "]", "[", "'Users'", "]", ",", "permission_list", ")", "return", "sorted", "(", "permission_list", ")" ]
:return: a list of trumba.Permission objects sorted by descending level and ascending uwnetid None if error, [] if not exists If the response is successful, process the response data and load into the return objects otherwise raise DataFailureException
[ ":", "return", ":", "a", "list", "of", "trumba", ".", "Permission", "objects", "sorted", "by", "descending", "level", "and", "ascending", "uwnetid", "None", "if", "error", "[]", "if", "not", "exists", "If", "the", "response", "is", "successful", "process", "the", "response", "data", "and", "load", "into", "the", "return", "objects", "otherwise", "raise", "DataFailureException" ]
python
train
KelSolaar/Umbra
umbra/ui/delegates.py
https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/ui/delegates.py#L156-L175
def paint(self, painter, option, index): """ Reimplements the :meth:`QStyledItemDelegate.paint` method. """ if option.state & QStyle.State_MouseOver: styleSheet = self.__style.hover elif option.state & QStyle.State_Selected: styleSheet = self.__style.highlight else: styleSheet = self.__style.default self.__label.setStyleSheet(styleSheet) data = index.model().data(index, Qt.DisplayRole) self.__label.setText(umbra.ui.common.QVariant_to_string(data)) self.__label.setFixedSize(option.rect.size()) painter.save() painter.translate(option.rect.topLeft()) self.__label.render(painter) painter.restore()
[ "def", "paint", "(", "self", ",", "painter", ",", "option", ",", "index", ")", ":", "if", "option", ".", "state", "&", "QStyle", ".", "State_MouseOver", ":", "styleSheet", "=", "self", ".", "__style", ".", "hover", "elif", "option", ".", "state", "&", "QStyle", ".", "State_Selected", ":", "styleSheet", "=", "self", ".", "__style", ".", "highlight", "else", ":", "styleSheet", "=", "self", ".", "__style", ".", "default", "self", ".", "__label", ".", "setStyleSheet", "(", "styleSheet", ")", "data", "=", "index", ".", "model", "(", ")", ".", "data", "(", "index", ",", "Qt", ".", "DisplayRole", ")", "self", ".", "__label", ".", "setText", "(", "umbra", ".", "ui", ".", "common", ".", "QVariant_to_string", "(", "data", ")", ")", "self", ".", "__label", ".", "setFixedSize", "(", "option", ".", "rect", ".", "size", "(", ")", ")", "painter", ".", "save", "(", ")", "painter", ".", "translate", "(", "option", ".", "rect", ".", "topLeft", "(", ")", ")", "self", ".", "__label", ".", "render", "(", "painter", ")", "painter", ".", "restore", "(", ")" ]
Reimplements the :meth:`QStyledItemDelegate.paint` method.
[ "Reimplements", "the", ":", "meth", ":", "QStyledItemDelegate", ".", "paint", "method", "." ]
python
train
Alignak-monitoring/alignak
alignak/external_command.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/external_command.py#L2789-L2803
def enable_host_notifications(self, host): """Enable notifications for a host Format of the line that triggers function call:: ENABLE_HOST_NOTIFICATIONS;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None """ if not host.notifications_enabled: host.modified_attributes |= \ DICT_MODATTR["MODATTR_NOTIFICATIONS_ENABLED"].value host.notifications_enabled = True self.send_an_element(host.get_update_status_brok())
[ "def", "enable_host_notifications", "(", "self", ",", "host", ")", ":", "if", "not", "host", ".", "notifications_enabled", ":", "host", ".", "modified_attributes", "|=", "DICT_MODATTR", "[", "\"MODATTR_NOTIFICATIONS_ENABLED\"", "]", ".", "value", "host", ".", "notifications_enabled", "=", "True", "self", ".", "send_an_element", "(", "host", ".", "get_update_status_brok", "(", ")", ")" ]
Enable notifications for a host Format of the line that triggers function call:: ENABLE_HOST_NOTIFICATIONS;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None
[ "Enable", "notifications", "for", "a", "host", "Format", "of", "the", "line", "that", "triggers", "function", "call", "::" ]
python
train
vcatalano/py-authorize
authorize/apis/authorize_api.py
https://github.com/vcatalano/py-authorize/blob/4d000b5a1ff2d8e7e955b83dab9d6c6a495c2851/authorize/apis/authorize_api.py#L46-L51
def _base_request(self, method): """Factory method for generating the base XML requests.""" request = E.Element(method) request.set('xmlns', 'AnetApi/xml/v1/schema/AnetApiSchema.xsd') request.append(self.client_auth) return request
[ "def", "_base_request", "(", "self", ",", "method", ")", ":", "request", "=", "E", ".", "Element", "(", "method", ")", "request", ".", "set", "(", "'xmlns'", ",", "'AnetApi/xml/v1/schema/AnetApiSchema.xsd'", ")", "request", ".", "append", "(", "self", ".", "client_auth", ")", "return", "request" ]
Factory method for generating the base XML requests.
[ "Factory", "method", "for", "generating", "the", "base", "XML", "requests", "." ]
python
train
priestc/moneywagon
moneywagon/__init__.py
https://github.com/priestc/moneywagon/blob/00518f1f557dcca8b3031f46d3564c2baa0227a3/moneywagon/__init__.py#L414-L438
def guess_currency_from_address(address): """ Given a crypto address, find which currency it likely belongs to. Raises an exception if it can't find a match. Raises exception if address is invalid. """ if is_py2: fixer = lambda x: int(x.encode('hex'), 16) else: fixer = lambda x: x # does nothing first_byte = fixer(b58decode_check(address)[0]) double_first_byte = fixer(b58decode_check(address)[:2]) hits = [] for currency, data in crypto_data.items(): if hasattr(data, 'get'): # skip incomplete data listings version = data.get('address_version_byte', None) if version is not None and version in [double_first_byte, first_byte]: hits.append([currency, data['name']]) if hits: return hits raise ValueError("Unknown Currency with first byte: %s" % first_byte)
[ "def", "guess_currency_from_address", "(", "address", ")", ":", "if", "is_py2", ":", "fixer", "=", "lambda", "x", ":", "int", "(", "x", ".", "encode", "(", "'hex'", ")", ",", "16", ")", "else", ":", "fixer", "=", "lambda", "x", ":", "x", "# does nothing", "first_byte", "=", "fixer", "(", "b58decode_check", "(", "address", ")", "[", "0", "]", ")", "double_first_byte", "=", "fixer", "(", "b58decode_check", "(", "address", ")", "[", ":", "2", "]", ")", "hits", "=", "[", "]", "for", "currency", ",", "data", "in", "crypto_data", ".", "items", "(", ")", ":", "if", "hasattr", "(", "data", ",", "'get'", ")", ":", "# skip incomplete data listings", "version", "=", "data", ".", "get", "(", "'address_version_byte'", ",", "None", ")", "if", "version", "is", "not", "None", "and", "version", "in", "[", "double_first_byte", ",", "first_byte", "]", ":", "hits", ".", "append", "(", "[", "currency", ",", "data", "[", "'name'", "]", "]", ")", "if", "hits", ":", "return", "hits", "raise", "ValueError", "(", "\"Unknown Currency with first byte: %s\"", "%", "first_byte", ")" ]
Given a crypto address, find which currency it likely belongs to. Raises an exception if it can't find a match. Raises exception if address is invalid.
[ "Given", "a", "crypto", "address", "find", "which", "currency", "it", "likely", "belongs", "to", ".", "Raises", "an", "exception", "if", "it", "can", "t", "find", "a", "match", ".", "Raises", "exception", "if", "address", "is", "invalid", "." ]
python
train
broadinstitute/fiss
firecloud/api.py
https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/api.py#L1235-L1269
def clone_workspace(from_namespace, from_workspace, to_namespace, to_workspace, authorizationDomain=""): """Clone a FireCloud workspace. A clone is a shallow copy of a FireCloud workspace, enabling easy sharing of data, such as TCGA data, without duplication. Args: from_namespace (str): project (namespace) to which source workspace belongs from_workspace (str): Source workspace's name to_namespace (str): project to which target workspace belongs to_workspace (str): Target workspace's name authorizationDomain: (str) required authorization domains Swagger: https://api.firecloud.org/#!/Workspaces/cloneWorkspace """ if authorizationDomain: if isinstance(authorizationDomain, string_types): authDomain = [{"membersGroupName": authorizationDomain}] else: authDomain = [{"membersGroupName": authDomain} for authDomain in authorizationDomain] else: authDomain = [] body = { "namespace": to_namespace, "name": to_workspace, "attributes": dict(), "authorizationDomain": authDomain, } uri = "workspaces/{0}/{1}/clone".format(from_namespace, from_workspace) return __post(uri, json=body)
[ "def", "clone_workspace", "(", "from_namespace", ",", "from_workspace", ",", "to_namespace", ",", "to_workspace", ",", "authorizationDomain", "=", "\"\"", ")", ":", "if", "authorizationDomain", ":", "if", "isinstance", "(", "authorizationDomain", ",", "string_types", ")", ":", "authDomain", "=", "[", "{", "\"membersGroupName\"", ":", "authorizationDomain", "}", "]", "else", ":", "authDomain", "=", "[", "{", "\"membersGroupName\"", ":", "authDomain", "}", "for", "authDomain", "in", "authorizationDomain", "]", "else", ":", "authDomain", "=", "[", "]", "body", "=", "{", "\"namespace\"", ":", "to_namespace", ",", "\"name\"", ":", "to_workspace", ",", "\"attributes\"", ":", "dict", "(", ")", ",", "\"authorizationDomain\"", ":", "authDomain", ",", "}", "uri", "=", "\"workspaces/{0}/{1}/clone\"", ".", "format", "(", "from_namespace", ",", "from_workspace", ")", "return", "__post", "(", "uri", ",", "json", "=", "body", ")" ]
Clone a FireCloud workspace. A clone is a shallow copy of a FireCloud workspace, enabling easy sharing of data, such as TCGA data, without duplication. Args: from_namespace (str): project (namespace) to which source workspace belongs from_workspace (str): Source workspace's name to_namespace (str): project to which target workspace belongs to_workspace (str): Target workspace's name authorizationDomain: (str) required authorization domains Swagger: https://api.firecloud.org/#!/Workspaces/cloneWorkspace
[ "Clone", "a", "FireCloud", "workspace", "." ]
python
train
nosegae/NoseGAE
examples/modules_example/printenv.py
https://github.com/nosegae/NoseGAE/blob/fca9fab22b480bb9721ecaa0967a636107648d92/examples/modules_example/printenv.py#L33-L43
def html_for_env_var(key): """Returns an HTML snippet for an environment variable. Args: key: A string representing an environment variable name. Returns: String HTML representing the value and variable. """ value = os.getenv(key) return KEY_VALUE_TEMPLATE.format(key, value)
[ "def", "html_for_env_var", "(", "key", ")", ":", "value", "=", "os", ".", "getenv", "(", "key", ")", "return", "KEY_VALUE_TEMPLATE", ".", "format", "(", "key", ",", "value", ")" ]
Returns an HTML snippet for an environment variable. Args: key: A string representing an environment variable name. Returns: String HTML representing the value and variable.
[ "Returns", "an", "HTML", "snippet", "for", "an", "environment", "variable", "." ]
python
train
wandb/client
wandb/vendor/prompt_toolkit/interface.py
https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/interface.py#L350-L361
def _redraw(self): """ Render the command line again. (Not thread safe!) (From other threads, or if unsure, use :meth:`.CommandLineInterface.invalidate`.) """ # Only draw when no sub application was started. if self._is_running and self._sub_cli is None: self.render_counter += 1 self.renderer.render(self, self.layout, is_done=self.is_done) # Fire render event. self.on_render.fire()
[ "def", "_redraw", "(", "self", ")", ":", "# Only draw when no sub application was started.", "if", "self", ".", "_is_running", "and", "self", ".", "_sub_cli", "is", "None", ":", "self", ".", "render_counter", "+=", "1", "self", ".", "renderer", ".", "render", "(", "self", ",", "self", ".", "layout", ",", "is_done", "=", "self", ".", "is_done", ")", "# Fire render event.", "self", ".", "on_render", ".", "fire", "(", ")" ]
Render the command line again. (Not thread safe!) (From other threads, or if unsure, use :meth:`.CommandLineInterface.invalidate`.)
[ "Render", "the", "command", "line", "again", ".", "(", "Not", "thread", "safe!", ")", "(", "From", "other", "threads", "or", "if", "unsure", "use", ":", "meth", ":", ".", "CommandLineInterface", ".", "invalidate", ".", ")" ]
python
train
bkg/django-spillway
spillway/query.py
https://github.com/bkg/django-spillway/blob/c488a62642430b005f1e0d4a19e160d8d5964b67/spillway/query.py#L265-L284
def zipfiles(self, path=None, arcdirname='data'): """Returns a .zip archive of selected rasters.""" if path: fp = open(path, 'w+b') else: prefix = '%s-' % arcdirname fp = tempfile.NamedTemporaryFile(prefix=prefix, suffix='.zip') with zipfile.ZipFile(fp, mode='w') as zf: for obj in self: img = obj.image arcname = os.path.join(arcdirname, os.path.basename(img.name)) try: zf.write(img.path, arcname=arcname) except OSError: img.seek(0) zf.writestr(arcname, img.read()) img.close() fp.seek(0) zobj = self.model(image=fp) return [zobj]
[ "def", "zipfiles", "(", "self", ",", "path", "=", "None", ",", "arcdirname", "=", "'data'", ")", ":", "if", "path", ":", "fp", "=", "open", "(", "path", ",", "'w+b'", ")", "else", ":", "prefix", "=", "'%s-'", "%", "arcdirname", "fp", "=", "tempfile", ".", "NamedTemporaryFile", "(", "prefix", "=", "prefix", ",", "suffix", "=", "'.zip'", ")", "with", "zipfile", ".", "ZipFile", "(", "fp", ",", "mode", "=", "'w'", ")", "as", "zf", ":", "for", "obj", "in", "self", ":", "img", "=", "obj", ".", "image", "arcname", "=", "os", ".", "path", ".", "join", "(", "arcdirname", ",", "os", ".", "path", ".", "basename", "(", "img", ".", "name", ")", ")", "try", ":", "zf", ".", "write", "(", "img", ".", "path", ",", "arcname", "=", "arcname", ")", "except", "OSError", ":", "img", ".", "seek", "(", "0", ")", "zf", ".", "writestr", "(", "arcname", ",", "img", ".", "read", "(", ")", ")", "img", ".", "close", "(", ")", "fp", ".", "seek", "(", "0", ")", "zobj", "=", "self", ".", "model", "(", "image", "=", "fp", ")", "return", "[", "zobj", "]" ]
Returns a .zip archive of selected rasters.
[ "Returns", "a", ".", "zip", "archive", "of", "selected", "rasters", "." ]
python
train
iotile/coretools
transport_plugins/native_ble/iotile_transport_native_ble/device_adapter.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/transport_plugins/native_ble/iotile_transport_native_ble/device_adapter.py#L649-L675
def _on_interface_opened(self, success, result, failure_reason, context, next_characteristic=None): """Callback function called when the notification related to an interface has been enabled. It is executed in the baBLE working thread: should not be blocking. Args: success (bool): A bool indicating that the operation is successful or not result (dict): Information (if successful) failure_reason (any): An object indicating the reason why the operation is not successful (else None) context (dict): The connection context next_characteristic (bable_interface.Characteristic): If not None, indicate another characteristic to enable notification. """ if not success: self.connections.finish_operation(context['connection_id'], False, failure_reason) return if next_characteristic is not None: self.bable.set_notification( enabled=True, connection_handle=context['connection_handle'], characteristic=next_characteristic, on_notification_set=[self._on_interface_opened, context], on_notification_received=self._on_notification_received, sync=False ) else: self.connections.finish_operation(context['connection_id'], True, None)
[ "def", "_on_interface_opened", "(", "self", ",", "success", ",", "result", ",", "failure_reason", ",", "context", ",", "next_characteristic", "=", "None", ")", ":", "if", "not", "success", ":", "self", ".", "connections", ".", "finish_operation", "(", "context", "[", "'connection_id'", "]", ",", "False", ",", "failure_reason", ")", "return", "if", "next_characteristic", "is", "not", "None", ":", "self", ".", "bable", ".", "set_notification", "(", "enabled", "=", "True", ",", "connection_handle", "=", "context", "[", "'connection_handle'", "]", ",", "characteristic", "=", "next_characteristic", ",", "on_notification_set", "=", "[", "self", ".", "_on_interface_opened", ",", "context", "]", ",", "on_notification_received", "=", "self", ".", "_on_notification_received", ",", "sync", "=", "False", ")", "else", ":", "self", ".", "connections", ".", "finish_operation", "(", "context", "[", "'connection_id'", "]", ",", "True", ",", "None", ")" ]
Callback function called when the notification related to an interface has been enabled. It is executed in the baBLE working thread: should not be blocking. Args: success (bool): A bool indicating that the operation is successful or not result (dict): Information (if successful) failure_reason (any): An object indicating the reason why the operation is not successful (else None) context (dict): The connection context next_characteristic (bable_interface.Characteristic): If not None, indicate another characteristic to enable notification.
[ "Callback", "function", "called", "when", "the", "notification", "related", "to", "an", "interface", "has", "been", "enabled", ".", "It", "is", "executed", "in", "the", "baBLE", "working", "thread", ":", "should", "not", "be", "blocking", "." ]
python
train
sdispater/orator
orator/query/grammars/mysql_grammar.py
https://github.com/sdispater/orator/blob/bd90bf198ee897751848f9a92e49d18e60a74136/orator/query/grammars/mysql_grammar.py#L25-L40
def compile_select(self, query): """ Compile a select query into SQL :param query: A QueryBuilder instance :type query: QueryBuilder :return: The compiled sql :rtype: str """ sql = super(MySQLQueryGrammar, self).compile_select(query) if query.unions: sql = "(%s) %s" % (sql, self._compile_unions(query)) return sql
[ "def", "compile_select", "(", "self", ",", "query", ")", ":", "sql", "=", "super", "(", "MySQLQueryGrammar", ",", "self", ")", ".", "compile_select", "(", "query", ")", "if", "query", ".", "unions", ":", "sql", "=", "\"(%s) %s\"", "%", "(", "sql", ",", "self", ".", "_compile_unions", "(", "query", ")", ")", "return", "sql" ]
Compile a select query into SQL :param query: A QueryBuilder instance :type query: QueryBuilder :return: The compiled sql :rtype: str
[ "Compile", "a", "select", "query", "into", "SQL" ]
python
train
inasafe/inasafe
safe/gui/widgets/dock.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/widgets/dock.py#L1265-L1282
def _search_inasafe_layer(self): """Search for an inasafe layer in an active group. :returns: A valid layer. :rtype: QgsMapLayer .. versionadded:: 4.3 """ selected_nodes = self.iface.layerTreeView().selectedNodes() for selected_node in selected_nodes: tree_layers = [ child for child in selected_node.children() if ( isinstance(child, QgsLayerTreeLayer))] for tree_layer in tree_layers: layer = tree_layer.layer() keywords = self.keyword_io.read_keywords(layer) if keywords.get('inasafe_fields'): return layer
[ "def", "_search_inasafe_layer", "(", "self", ")", ":", "selected_nodes", "=", "self", ".", "iface", ".", "layerTreeView", "(", ")", ".", "selectedNodes", "(", ")", "for", "selected_node", "in", "selected_nodes", ":", "tree_layers", "=", "[", "child", "for", "child", "in", "selected_node", ".", "children", "(", ")", "if", "(", "isinstance", "(", "child", ",", "QgsLayerTreeLayer", ")", ")", "]", "for", "tree_layer", "in", "tree_layers", ":", "layer", "=", "tree_layer", ".", "layer", "(", ")", "keywords", "=", "self", ".", "keyword_io", ".", "read_keywords", "(", "layer", ")", "if", "keywords", ".", "get", "(", "'inasafe_fields'", ")", ":", "return", "layer" ]
Search for an inasafe layer in an active group. :returns: A valid layer. :rtype: QgsMapLayer .. versionadded:: 4.3
[ "Search", "for", "an", "inasafe", "layer", "in", "an", "active", "group", "." ]
python
train
pklaus/brother_ql
brother_ql/cli.py
https://github.com/pklaus/brother_ql/blob/b551b1fc944873f3a2ead7032d144dfd81011e79/brother_ql/cli.py#L134-L147
def print_cmd(ctx, *args, **kwargs): """ Print a label of the provided IMAGE. """ backend = ctx.meta.get('BACKEND', 'pyusb') model = ctx.meta.get('MODEL') printer = ctx.meta.get('PRINTER') from brother_ql.conversion import convert from brother_ql.backends.helpers import send from brother_ql.raster import BrotherQLRaster qlr = BrotherQLRaster(model) qlr.exception_on_warning = True kwargs['cut'] = not kwargs['no_cut'] del kwargs['no_cut'] instructions = convert(qlr=qlr, **kwargs) send(instructions=instructions, printer_identifier=printer, backend_identifier=backend, blocking=True)
[ "def", "print_cmd", "(", "ctx", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "backend", "=", "ctx", ".", "meta", ".", "get", "(", "'BACKEND'", ",", "'pyusb'", ")", "model", "=", "ctx", ".", "meta", ".", "get", "(", "'MODEL'", ")", "printer", "=", "ctx", ".", "meta", ".", "get", "(", "'PRINTER'", ")", "from", "brother_ql", ".", "conversion", "import", "convert", "from", "brother_ql", ".", "backends", ".", "helpers", "import", "send", "from", "brother_ql", ".", "raster", "import", "BrotherQLRaster", "qlr", "=", "BrotherQLRaster", "(", "model", ")", "qlr", ".", "exception_on_warning", "=", "True", "kwargs", "[", "'cut'", "]", "=", "not", "kwargs", "[", "'no_cut'", "]", "del", "kwargs", "[", "'no_cut'", "]", "instructions", "=", "convert", "(", "qlr", "=", "qlr", ",", "*", "*", "kwargs", ")", "send", "(", "instructions", "=", "instructions", ",", "printer_identifier", "=", "printer", ",", "backend_identifier", "=", "backend", ",", "blocking", "=", "True", ")" ]
Print a label of the provided IMAGE.
[ "Print", "a", "label", "of", "the", "provided", "IMAGE", "." ]
python
train
ContinuumIO/flask-ldap-login
flask_ldap_login/__init__.py
https://github.com/ContinuumIO/flask-ldap-login/blob/09a08be45f861823cb08f95883ee1e092a618c37/flask_ldap_login/__init__.py#L236-L262
def connect(self): 'initialize ldap connection and set options' log.debug("Connecting to ldap server %s" % self.config['URI']) self.conn = ldap.initialize(self.config['URI']) # There are some settings that can't be changed at runtime without a context restart. # It's possible to refresh the context and apply the settings by setting OPT_X_TLS_NEWCTX # to 0, but this needs to be the last option set, and since the config dictionary is not # sorted, this is not necessarily true. Sort the list of options so that if OPT_X_TLS_NEWCTX # is present, it is applied last. options = self.config.get('OPTIONS', {}).items() options.sort(key=lambda x: x[0] == 'OPT_X_TLS_NEWCTX') for opt, value in options: if isinstance(opt, str): opt = getattr(ldap, opt) try: if isinstance(value, str): value = getattr(ldap, value) except AttributeError: pass self.conn.set_option(opt, value) if self.config.get('START_TLS'): log.debug("Starting TLS") self.conn.start_tls_s()
[ "def", "connect", "(", "self", ")", ":", "log", ".", "debug", "(", "\"Connecting to ldap server %s\"", "%", "self", ".", "config", "[", "'URI'", "]", ")", "self", ".", "conn", "=", "ldap", ".", "initialize", "(", "self", ".", "config", "[", "'URI'", "]", ")", "# There are some settings that can't be changed at runtime without a context restart.", "# It's possible to refresh the context and apply the settings by setting OPT_X_TLS_NEWCTX", "# to 0, but this needs to be the last option set, and since the config dictionary is not", "# sorted, this is not necessarily true. Sort the list of options so that if OPT_X_TLS_NEWCTX", "# is present, it is applied last.", "options", "=", "self", ".", "config", ".", "get", "(", "'OPTIONS'", ",", "{", "}", ")", ".", "items", "(", ")", "options", ".", "sort", "(", "key", "=", "lambda", "x", ":", "x", "[", "0", "]", "==", "'OPT_X_TLS_NEWCTX'", ")", "for", "opt", ",", "value", "in", "options", ":", "if", "isinstance", "(", "opt", ",", "str", ")", ":", "opt", "=", "getattr", "(", "ldap", ",", "opt", ")", "try", ":", "if", "isinstance", "(", "value", ",", "str", ")", ":", "value", "=", "getattr", "(", "ldap", ",", "value", ")", "except", "AttributeError", ":", "pass", "self", ".", "conn", ".", "set_option", "(", "opt", ",", "value", ")", "if", "self", ".", "config", ".", "get", "(", "'START_TLS'", ")", ":", "log", ".", "debug", "(", "\"Starting TLS\"", ")", "self", ".", "conn", ".", "start_tls_s", "(", ")" ]
initialize ldap connection and set options
[ "initialize", "ldap", "connection", "and", "set", "options" ]
python
train
richardkiss/pycoin
pycoin/encoding/b58.py
https://github.com/richardkiss/pycoin/blob/1e8d0d9fe20ce0347b97847bb529cd1bd84c7442/pycoin/encoding/b58.py#L22-L25
def a2b_base58(s): """Convert base58 to binary using BASE58_ALPHABET.""" v, prefix = to_long(BASE58_BASE, lambda c: BASE58_LOOKUP[c], s.encode("utf8")) return from_long(v, prefix, 256, lambda x: x)
[ "def", "a2b_base58", "(", "s", ")", ":", "v", ",", "prefix", "=", "to_long", "(", "BASE58_BASE", ",", "lambda", "c", ":", "BASE58_LOOKUP", "[", "c", "]", ",", "s", ".", "encode", "(", "\"utf8\"", ")", ")", "return", "from_long", "(", "v", ",", "prefix", ",", "256", ",", "lambda", "x", ":", "x", ")" ]
Convert base58 to binary using BASE58_ALPHABET.
[ "Convert", "base58", "to", "binary", "using", "BASE58_ALPHABET", "." ]
python
train
protream/iquery
iquery/hospitals.py
https://github.com/protream/iquery/blob/7272e68af610f1dd63cf695209cfa44b75adc0e6/iquery/hospitals.py#L67-L99
def query(params): """`params` is a city name or a city name + hospital name. CLI: 1. query all putian hospitals in a city: $ iquery -p 南京 +------+ | 南京 | +------+ |... | +------+ |... | +------+ ... 2. query if the hospital in the city is putian series, you can only input hospital's short name: $ iquery -p 南京 曙光 +------------+ |南京曙光医院| +------------+ | True | +------------+ """ r = requests_get(QUERY_URL, verify=True) return HospitalCollection(r.json(), params)
[ "def", "query", "(", "params", ")", ":", "r", "=", "requests_get", "(", "QUERY_URL", ",", "verify", "=", "True", ")", "return", "HospitalCollection", "(", "r", ".", "json", "(", ")", ",", "params", ")" ]
`params` is a city name or a city name + hospital name. CLI: 1. query all putian hospitals in a city: $ iquery -p 南京 +------+ | 南京 | +------+ |... | +------+ |... | +------+ ... 2. query if the hospital in the city is putian series, you can only input hospital's short name: $ iquery -p 南京 曙光 +------------+ |南京曙光医院| +------------+ | True | +------------+
[ "params", "is", "a", "city", "name", "or", "a", "city", "name", "+", "hospital", "name", "." ]
python
train
angr/angr
angr/analyses/bindiff.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/bindiff.py#L104-L128
def _normalized_levenshtein_distance(s1, s2, acceptable_differences): """ This function calculates the levenshtein distance but allows for elements in the lists to be different by any number in the set acceptable_differences. :param s1: A list. :param s2: Another list. :param acceptable_differences: A set of numbers. If (s2[i]-s1[i]) is in the set then they are considered equal. :returns: """ if len(s1) > len(s2): s1, s2 = s2, s1 acceptable_differences = set(-i for i in acceptable_differences) distances = range(len(s1) + 1) for index2, num2 in enumerate(s2): new_distances = [index2 + 1] for index1, num1 in enumerate(s1): if num2 - num1 in acceptable_differences: new_distances.append(distances[index1]) else: new_distances.append(1 + min((distances[index1], distances[index1+1], new_distances[-1]))) distances = new_distances return distances[-1]
[ "def", "_normalized_levenshtein_distance", "(", "s1", ",", "s2", ",", "acceptable_differences", ")", ":", "if", "len", "(", "s1", ")", ">", "len", "(", "s2", ")", ":", "s1", ",", "s2", "=", "s2", ",", "s1", "acceptable_differences", "=", "set", "(", "-", "i", "for", "i", "in", "acceptable_differences", ")", "distances", "=", "range", "(", "len", "(", "s1", ")", "+", "1", ")", "for", "index2", ",", "num2", "in", "enumerate", "(", "s2", ")", ":", "new_distances", "=", "[", "index2", "+", "1", "]", "for", "index1", ",", "num1", "in", "enumerate", "(", "s1", ")", ":", "if", "num2", "-", "num1", "in", "acceptable_differences", ":", "new_distances", ".", "append", "(", "distances", "[", "index1", "]", ")", "else", ":", "new_distances", ".", "append", "(", "1", "+", "min", "(", "(", "distances", "[", "index1", "]", ",", "distances", "[", "index1", "+", "1", "]", ",", "new_distances", "[", "-", "1", "]", ")", ")", ")", "distances", "=", "new_distances", "return", "distances", "[", "-", "1", "]" ]
This function calculates the levenshtein distance but allows for elements in the lists to be different by any number in the set acceptable_differences. :param s1: A list. :param s2: Another list. :param acceptable_differences: A set of numbers. If (s2[i]-s1[i]) is in the set then they are considered equal. :returns:
[ "This", "function", "calculates", "the", "levenshtein", "distance", "but", "allows", "for", "elements", "in", "the", "lists", "to", "be", "different", "by", "any", "number", "in", "the", "set", "acceptable_differences", "." ]
python
train
rosenbrockc/acorn
acorn/logging/diff.py
https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/acorn/logging/diff.py#L5-L30
def cascade(sequence, full=False): """Restores a sequence of string definitions using the first entry as the original and then applying a series of :func:`~acorn.logging.diff.restore` calls. Args: sequence (list): of results returned by :func:`~acorn.logging.diff.compress`, except that the first entry should be a list of string entries for the very first instance. full (bool): when True, return all the intermediate entries as well; otherwise they are not stored in memory and only the final entry in the list is returned. """ if len(sequence) == 1: return sequence[0] left = sequence[0] if full: intermed = [] for cdiff in sequence[1:]: right = restore(cdiff, left) if full: intermed.append(right) left = right return left
[ "def", "cascade", "(", "sequence", ",", "full", "=", "False", ")", ":", "if", "len", "(", "sequence", ")", "==", "1", ":", "return", "sequence", "[", "0", "]", "left", "=", "sequence", "[", "0", "]", "if", "full", ":", "intermed", "=", "[", "]", "for", "cdiff", "in", "sequence", "[", "1", ":", "]", ":", "right", "=", "restore", "(", "cdiff", ",", "left", ")", "if", "full", ":", "intermed", ".", "append", "(", "right", ")", "left", "=", "right", "return", "left" ]
Restores a sequence of string definitions using the first entry as the original and then applying a series of :func:`~acorn.logging.diff.restore` calls. Args: sequence (list): of results returned by :func:`~acorn.logging.diff.compress`, except that the first entry should be a list of string entries for the very first instance. full (bool): when True, return all the intermediate entries as well; otherwise they are not stored in memory and only the final entry in the list is returned.
[ "Restores", "a", "sequence", "of", "string", "definitions", "using", "the", "first", "entry", "as", "the", "original", "and", "then", "applying", "a", "series", "of", ":", "func", ":", "~acorn", ".", "logging", ".", "diff", ".", "restore", "calls", "." ]
python
train
ActivisionGameScience/assertpy
assertpy/assertpy.py
https://github.com/ActivisionGameScience/assertpy/blob/08d799cdb01f9a25d3e20672efac991c7bc26d79/assertpy/assertpy.py#L451-L457
def is_not_inf(self): """Asserts that val is real number and not Inf (infinity).""" self._validate_number() self._validate_real() if math.isinf(self.val): self._err('Expected not <Inf>, but was.') return self
[ "def", "is_not_inf", "(", "self", ")", ":", "self", ".", "_validate_number", "(", ")", "self", ".", "_validate_real", "(", ")", "if", "math", ".", "isinf", "(", "self", ".", "val", ")", ":", "self", ".", "_err", "(", "'Expected not <Inf>, but was.'", ")", "return", "self" ]
Asserts that val is real number and not Inf (infinity).
[ "Asserts", "that", "val", "is", "real", "number", "and", "not", "Inf", "(", "infinity", ")", "." ]
python
valid
neithere/monk
monk/validators.py
https://github.com/neithere/monk/blob/4b2ee5152b081ac288ce8568422a027b5e7d2b1c/monk/validators.py#L699-L753
def translate(value): """ Translates given schema from "pythonic" syntax to a validator. Usage:: >>> translate(str) IsA(str) >>> translate('hello') IsA(str, default='hello') """ if isinstance(value, BaseValidator): return value if value is None: return Anything() if isinstance(value, type): return IsA(value) if type(value) in compat.func_types: real_value = value() return IsA(type(real_value), default=real_value) if isinstance(value, list): if value == []: # no inner spec, just an empty list as the default value return IsA(list) elif len(value) == 1: # the only item as spec for each item of the collection return ListOf(translate(value[0])) else: raise StructureSpecificationError( 'Expected a list containing exactly 1 item; ' 'got {cnt}: {spec}'.format(cnt=len(value), spec=value)) if isinstance(value, dict): if not value: return IsA(dict) items = [] for k, v in value.items(): if isinstance(k, BaseValidator): k_validator = k else: k_validator = translate(k) default = k_validator.get_default_for(None) if default is not None: k_validator = Equals(default) v_validator = translate(v) items.append((k_validator, v_validator)) return DictOf(items) return IsA(type(value), default=value)
[ "def", "translate", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "BaseValidator", ")", ":", "return", "value", "if", "value", "is", "None", ":", "return", "Anything", "(", ")", "if", "isinstance", "(", "value", ",", "type", ")", ":", "return", "IsA", "(", "value", ")", "if", "type", "(", "value", ")", "in", "compat", ".", "func_types", ":", "real_value", "=", "value", "(", ")", "return", "IsA", "(", "type", "(", "real_value", ")", ",", "default", "=", "real_value", ")", "if", "isinstance", "(", "value", ",", "list", ")", ":", "if", "value", "==", "[", "]", ":", "# no inner spec, just an empty list as the default value", "return", "IsA", "(", "list", ")", "elif", "len", "(", "value", ")", "==", "1", ":", "# the only item as spec for each item of the collection", "return", "ListOf", "(", "translate", "(", "value", "[", "0", "]", ")", ")", "else", ":", "raise", "StructureSpecificationError", "(", "'Expected a list containing exactly 1 item; '", "'got {cnt}: {spec}'", ".", "format", "(", "cnt", "=", "len", "(", "value", ")", ",", "spec", "=", "value", ")", ")", "if", "isinstance", "(", "value", ",", "dict", ")", ":", "if", "not", "value", ":", "return", "IsA", "(", "dict", ")", "items", "=", "[", "]", "for", "k", ",", "v", "in", "value", ".", "items", "(", ")", ":", "if", "isinstance", "(", "k", ",", "BaseValidator", ")", ":", "k_validator", "=", "k", "else", ":", "k_validator", "=", "translate", "(", "k", ")", "default", "=", "k_validator", ".", "get_default_for", "(", "None", ")", "if", "default", "is", "not", "None", ":", "k_validator", "=", "Equals", "(", "default", ")", "v_validator", "=", "translate", "(", "v", ")", "items", ".", "append", "(", "(", "k_validator", ",", "v_validator", ")", ")", "return", "DictOf", "(", "items", ")", "return", "IsA", "(", "type", "(", "value", ")", ",", "default", "=", "value", ")" ]
Translates given schema from "pythonic" syntax to a validator. Usage:: >>> translate(str) IsA(str) >>> translate('hello') IsA(str, default='hello')
[ "Translates", "given", "schema", "from", "pythonic", "syntax", "to", "a", "validator", "." ]
python
train
mezz64/pyEmby
pyemby/helpers.py
https://github.com/mezz64/pyEmby/blob/6bb621e4e25bf1b9b0aba2c38b588e68f8816226/pyemby/helpers.py#L10-L22
def deprecated_name(name): """Allow old method names for backwards compatability. """ def decorator(func): """Decorator function.""" def func_wrapper(self): """Wrapper for original function.""" if hasattr(self, name): # Return the old property return getattr(self, name) else: return func(self) return func_wrapper return decorator
[ "def", "deprecated_name", "(", "name", ")", ":", "def", "decorator", "(", "func", ")", ":", "\"\"\"Decorator function.\"\"\"", "def", "func_wrapper", "(", "self", ")", ":", "\"\"\"Wrapper for original function.\"\"\"", "if", "hasattr", "(", "self", ",", "name", ")", ":", "# Return the old property", "return", "getattr", "(", "self", ",", "name", ")", "else", ":", "return", "func", "(", "self", ")", "return", "func_wrapper", "return", "decorator" ]
Allow old method names for backwards compatability.
[ "Allow", "old", "method", "names", "for", "backwards", "compatability", "." ]
python
train
tensorflow/cleverhans
cleverhans/utils_tf.py
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/utils_tf.py#L358-L392
def clip_eta(eta, ord, eps): """ Helper function to clip the perturbation to epsilon norm ball. :param eta: A tensor with the current perturbation. :param ord: Order of the norm (mimics Numpy). Possible values: np.inf, 1 or 2. :param eps: Epsilon, bound of the perturbation. """ # Clipping perturbation eta to self.ord norm ball if ord not in [np.inf, 1, 2]: raise ValueError('ord must be np.inf, 1, or 2.') reduc_ind = list(xrange(1, len(eta.get_shape()))) avoid_zero_div = 1e-12 if ord == np.inf: eta = clip_by_value(eta, -eps, eps) else: if ord == 1: raise NotImplementedError("The expression below is not the correct way" " to project onto the L1 norm ball.") norm = tf.maximum(avoid_zero_div, reduce_sum(tf.abs(eta), reduc_ind, keepdims=True)) elif ord == 2: # avoid_zero_div must go inside sqrt to avoid a divide by zero # in the gradient through this operation norm = tf.sqrt(tf.maximum(avoid_zero_div, reduce_sum(tf.square(eta), reduc_ind, keepdims=True))) # We must *clip* to within the norm ball, not *normalize* onto the # surface of the ball factor = tf.minimum(1., div(eps, norm)) eta = eta * factor return eta
[ "def", "clip_eta", "(", "eta", ",", "ord", ",", "eps", ")", ":", "# Clipping perturbation eta to self.ord norm ball", "if", "ord", "not", "in", "[", "np", ".", "inf", ",", "1", ",", "2", "]", ":", "raise", "ValueError", "(", "'ord must be np.inf, 1, or 2.'", ")", "reduc_ind", "=", "list", "(", "xrange", "(", "1", ",", "len", "(", "eta", ".", "get_shape", "(", ")", ")", ")", ")", "avoid_zero_div", "=", "1e-12", "if", "ord", "==", "np", ".", "inf", ":", "eta", "=", "clip_by_value", "(", "eta", ",", "-", "eps", ",", "eps", ")", "else", ":", "if", "ord", "==", "1", ":", "raise", "NotImplementedError", "(", "\"The expression below is not the correct way\"", "\" to project onto the L1 norm ball.\"", ")", "norm", "=", "tf", ".", "maximum", "(", "avoid_zero_div", ",", "reduce_sum", "(", "tf", ".", "abs", "(", "eta", ")", ",", "reduc_ind", ",", "keepdims", "=", "True", ")", ")", "elif", "ord", "==", "2", ":", "# avoid_zero_div must go inside sqrt to avoid a divide by zero", "# in the gradient through this operation", "norm", "=", "tf", ".", "sqrt", "(", "tf", ".", "maximum", "(", "avoid_zero_div", ",", "reduce_sum", "(", "tf", ".", "square", "(", "eta", ")", ",", "reduc_ind", ",", "keepdims", "=", "True", ")", ")", ")", "# We must *clip* to within the norm ball, not *normalize* onto the", "# surface of the ball", "factor", "=", "tf", ".", "minimum", "(", "1.", ",", "div", "(", "eps", ",", "norm", ")", ")", "eta", "=", "eta", "*", "factor", "return", "eta" ]
Helper function to clip the perturbation to epsilon norm ball. :param eta: A tensor with the current perturbation. :param ord: Order of the norm (mimics Numpy). Possible values: np.inf, 1 or 2. :param eps: Epsilon, bound of the perturbation.
[ "Helper", "function", "to", "clip", "the", "perturbation", "to", "epsilon", "norm", "ball", ".", ":", "param", "eta", ":", "A", "tensor", "with", "the", "current", "perturbation", ".", ":", "param", "ord", ":", "Order", "of", "the", "norm", "(", "mimics", "Numpy", ")", ".", "Possible", "values", ":", "np", ".", "inf", "1", "or", "2", ".", ":", "param", "eps", ":", "Epsilon", "bound", "of", "the", "perturbation", "." ]
python
train
pypa/pipenv
pipenv/vendor/distlib/util.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/util.py#L145-L263
def parse_requirement(req): """ Parse a requirement passed in as a string. Return a Container whose attributes contain the various parts of the requirement. """ remaining = req.strip() if not remaining or remaining.startswith('#'): return None m = IDENTIFIER.match(remaining) if not m: raise SyntaxError('name expected: %s' % remaining) distname = m.groups()[0] remaining = remaining[m.end():] extras = mark_expr = versions = uri = None if remaining and remaining[0] == '[': i = remaining.find(']', 1) if i < 0: raise SyntaxError('unterminated extra: %s' % remaining) s = remaining[1:i] remaining = remaining[i + 1:].lstrip() extras = [] while s: m = IDENTIFIER.match(s) if not m: raise SyntaxError('malformed extra: %s' % s) extras.append(m.groups()[0]) s = s[m.end():] if not s: break if s[0] != ',': raise SyntaxError('comma expected in extras: %s' % s) s = s[1:].lstrip() if not extras: extras = None if remaining: if remaining[0] == '@': # it's a URI remaining = remaining[1:].lstrip() m = NON_SPACE.match(remaining) if not m: raise SyntaxError('invalid URI: %s' % remaining) uri = m.groups()[0] t = urlparse(uri) # there are issues with Python and URL parsing, so this test # is a bit crude. See bpo-20271, bpo-23505. Python doesn't # always parse invalid URLs correctly - it should raise # exceptions for malformed URLs if not (t.scheme and t.netloc): raise SyntaxError('Invalid URL: %s' % uri) remaining = remaining[m.end():].lstrip() else: def get_versions(ver_remaining): """ Return a list of operator, version tuples if any are specified, else None. """ m = COMPARE_OP.match(ver_remaining) versions = None if m: versions = [] while True: op = m.groups()[0] ver_remaining = ver_remaining[m.end():] m = VERSION_IDENTIFIER.match(ver_remaining) if not m: raise SyntaxError('invalid version: %s' % ver_remaining) v = m.groups()[0] versions.append((op, v)) ver_remaining = ver_remaining[m.end():] if not ver_remaining or ver_remaining[0] != ',': break ver_remaining = ver_remaining[1:].lstrip() m = COMPARE_OP.match(ver_remaining) if not m: raise SyntaxError('invalid constraint: %s' % ver_remaining) if not versions: versions = None return versions, ver_remaining if remaining[0] != '(': versions, remaining = get_versions(remaining) else: i = remaining.find(')', 1) if i < 0: raise SyntaxError('unterminated parenthesis: %s' % remaining) s = remaining[1:i] remaining = remaining[i + 1:].lstrip() # As a special diversion from PEP 508, allow a version number # a.b.c in parentheses as a synonym for ~= a.b.c (because this # is allowed in earlier PEPs) if COMPARE_OP.match(s): versions, _ = get_versions(s) else: m = VERSION_IDENTIFIER.match(s) if not m: raise SyntaxError('invalid constraint: %s' % s) v = m.groups()[0] s = s[m.end():].lstrip() if s: raise SyntaxError('invalid constraint: %s' % s) versions = [('~=', v)] if remaining: if remaining[0] != ';': raise SyntaxError('invalid requirement: %s' % remaining) remaining = remaining[1:].lstrip() mark_expr, remaining = parse_marker(remaining) if remaining and remaining[0] != '#': raise SyntaxError('unexpected trailing data: %s' % remaining) if not versions: rs = distname else: rs = '%s %s' % (distname, ', '.join(['%s %s' % con for con in versions])) return Container(name=distname, extras=extras, constraints=versions, marker=mark_expr, url=uri, requirement=rs)
[ "def", "parse_requirement", "(", "req", ")", ":", "remaining", "=", "req", ".", "strip", "(", ")", "if", "not", "remaining", "or", "remaining", ".", "startswith", "(", "'#'", ")", ":", "return", "None", "m", "=", "IDENTIFIER", ".", "match", "(", "remaining", ")", "if", "not", "m", ":", "raise", "SyntaxError", "(", "'name expected: %s'", "%", "remaining", ")", "distname", "=", "m", ".", "groups", "(", ")", "[", "0", "]", "remaining", "=", "remaining", "[", "m", ".", "end", "(", ")", ":", "]", "extras", "=", "mark_expr", "=", "versions", "=", "uri", "=", "None", "if", "remaining", "and", "remaining", "[", "0", "]", "==", "'['", ":", "i", "=", "remaining", ".", "find", "(", "']'", ",", "1", ")", "if", "i", "<", "0", ":", "raise", "SyntaxError", "(", "'unterminated extra: %s'", "%", "remaining", ")", "s", "=", "remaining", "[", "1", ":", "i", "]", "remaining", "=", "remaining", "[", "i", "+", "1", ":", "]", ".", "lstrip", "(", ")", "extras", "=", "[", "]", "while", "s", ":", "m", "=", "IDENTIFIER", ".", "match", "(", "s", ")", "if", "not", "m", ":", "raise", "SyntaxError", "(", "'malformed extra: %s'", "%", "s", ")", "extras", ".", "append", "(", "m", ".", "groups", "(", ")", "[", "0", "]", ")", "s", "=", "s", "[", "m", ".", "end", "(", ")", ":", "]", "if", "not", "s", ":", "break", "if", "s", "[", "0", "]", "!=", "','", ":", "raise", "SyntaxError", "(", "'comma expected in extras: %s'", "%", "s", ")", "s", "=", "s", "[", "1", ":", "]", ".", "lstrip", "(", ")", "if", "not", "extras", ":", "extras", "=", "None", "if", "remaining", ":", "if", "remaining", "[", "0", "]", "==", "'@'", ":", "# it's a URI", "remaining", "=", "remaining", "[", "1", ":", "]", ".", "lstrip", "(", ")", "m", "=", "NON_SPACE", ".", "match", "(", "remaining", ")", "if", "not", "m", ":", "raise", "SyntaxError", "(", "'invalid URI: %s'", "%", "remaining", ")", "uri", "=", "m", ".", "groups", "(", ")", "[", "0", "]", "t", "=", "urlparse", "(", "uri", ")", "# there are issues with Python and URL parsing, so this test", "# is a bit crude. See bpo-20271, bpo-23505. Python doesn't", "# always parse invalid URLs correctly - it should raise", "# exceptions for malformed URLs", "if", "not", "(", "t", ".", "scheme", "and", "t", ".", "netloc", ")", ":", "raise", "SyntaxError", "(", "'Invalid URL: %s'", "%", "uri", ")", "remaining", "=", "remaining", "[", "m", ".", "end", "(", ")", ":", "]", ".", "lstrip", "(", ")", "else", ":", "def", "get_versions", "(", "ver_remaining", ")", ":", "\"\"\"\n Return a list of operator, version tuples if any are\n specified, else None.\n \"\"\"", "m", "=", "COMPARE_OP", ".", "match", "(", "ver_remaining", ")", "versions", "=", "None", "if", "m", ":", "versions", "=", "[", "]", "while", "True", ":", "op", "=", "m", ".", "groups", "(", ")", "[", "0", "]", "ver_remaining", "=", "ver_remaining", "[", "m", ".", "end", "(", ")", ":", "]", "m", "=", "VERSION_IDENTIFIER", ".", "match", "(", "ver_remaining", ")", "if", "not", "m", ":", "raise", "SyntaxError", "(", "'invalid version: %s'", "%", "ver_remaining", ")", "v", "=", "m", ".", "groups", "(", ")", "[", "0", "]", "versions", ".", "append", "(", "(", "op", ",", "v", ")", ")", "ver_remaining", "=", "ver_remaining", "[", "m", ".", "end", "(", ")", ":", "]", "if", "not", "ver_remaining", "or", "ver_remaining", "[", "0", "]", "!=", "','", ":", "break", "ver_remaining", "=", "ver_remaining", "[", "1", ":", "]", ".", "lstrip", "(", ")", "m", "=", "COMPARE_OP", ".", "match", "(", "ver_remaining", ")", "if", "not", "m", ":", "raise", "SyntaxError", "(", "'invalid constraint: %s'", "%", "ver_remaining", ")", "if", "not", "versions", ":", "versions", "=", "None", "return", "versions", ",", "ver_remaining", "if", "remaining", "[", "0", "]", "!=", "'('", ":", "versions", ",", "remaining", "=", "get_versions", "(", "remaining", ")", "else", ":", "i", "=", "remaining", ".", "find", "(", "')'", ",", "1", ")", "if", "i", "<", "0", ":", "raise", "SyntaxError", "(", "'unterminated parenthesis: %s'", "%", "remaining", ")", "s", "=", "remaining", "[", "1", ":", "i", "]", "remaining", "=", "remaining", "[", "i", "+", "1", ":", "]", ".", "lstrip", "(", ")", "# As a special diversion from PEP 508, allow a version number", "# a.b.c in parentheses as a synonym for ~= a.b.c (because this", "# is allowed in earlier PEPs)", "if", "COMPARE_OP", ".", "match", "(", "s", ")", ":", "versions", ",", "_", "=", "get_versions", "(", "s", ")", "else", ":", "m", "=", "VERSION_IDENTIFIER", ".", "match", "(", "s", ")", "if", "not", "m", ":", "raise", "SyntaxError", "(", "'invalid constraint: %s'", "%", "s", ")", "v", "=", "m", ".", "groups", "(", ")", "[", "0", "]", "s", "=", "s", "[", "m", ".", "end", "(", ")", ":", "]", ".", "lstrip", "(", ")", "if", "s", ":", "raise", "SyntaxError", "(", "'invalid constraint: %s'", "%", "s", ")", "versions", "=", "[", "(", "'~='", ",", "v", ")", "]", "if", "remaining", ":", "if", "remaining", "[", "0", "]", "!=", "';'", ":", "raise", "SyntaxError", "(", "'invalid requirement: %s'", "%", "remaining", ")", "remaining", "=", "remaining", "[", "1", ":", "]", ".", "lstrip", "(", ")", "mark_expr", ",", "remaining", "=", "parse_marker", "(", "remaining", ")", "if", "remaining", "and", "remaining", "[", "0", "]", "!=", "'#'", ":", "raise", "SyntaxError", "(", "'unexpected trailing data: %s'", "%", "remaining", ")", "if", "not", "versions", ":", "rs", "=", "distname", "else", ":", "rs", "=", "'%s %s'", "%", "(", "distname", ",", "', '", ".", "join", "(", "[", "'%s %s'", "%", "con", "for", "con", "in", "versions", "]", ")", ")", "return", "Container", "(", "name", "=", "distname", ",", "extras", "=", "extras", ",", "constraints", "=", "versions", ",", "marker", "=", "mark_expr", ",", "url", "=", "uri", ",", "requirement", "=", "rs", ")" ]
Parse a requirement passed in as a string. Return a Container whose attributes contain the various parts of the requirement.
[ "Parse", "a", "requirement", "passed", "in", "as", "a", "string", ".", "Return", "a", "Container", "whose", "attributes", "contain", "the", "various", "parts", "of", "the", "requirement", "." ]
python
train
tensorpack/tensorpack
tensorpack/models/registry.py
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/models/registry.py#L39-L49
def get_registered_layer(name): """ Args: name (str): the name of the layer, e.g. 'Conv2D' Returns: the wrapped layer function, or None if not registered. """ ret = _LAYER_REGISTRY.get(name, None) if ret == _NameConflict: raise KeyError("Layer named '{}' is registered with `@layer_register` more than once!".format(name)) return ret
[ "def", "get_registered_layer", "(", "name", ")", ":", "ret", "=", "_LAYER_REGISTRY", ".", "get", "(", "name", ",", "None", ")", "if", "ret", "==", "_NameConflict", ":", "raise", "KeyError", "(", "\"Layer named '{}' is registered with `@layer_register` more than once!\"", ".", "format", "(", "name", ")", ")", "return", "ret" ]
Args: name (str): the name of the layer, e.g. 'Conv2D' Returns: the wrapped layer function, or None if not registered.
[ "Args", ":", "name", "(", "str", ")", ":", "the", "name", "of", "the", "layer", "e", ".", "g", ".", "Conv2D", "Returns", ":", "the", "wrapped", "layer", "function", "or", "None", "if", "not", "registered", "." ]
python
train
raymondEhlers/pachyderm
pachyderm/projectors.py
https://github.com/raymondEhlers/pachyderm/blob/aaa1d8374fd871246290ce76f1796f2f7582b01d/pachyderm/projectors.py#L380-L420
def _project_TH3(self, hist: Hist) -> Any: """ Perform the actual TH3 -> TH1 projection. This projection could be to 1D or 2D. Args: hist (ROOT.TH3): Histogram from which the projections should be performed. Returns: ROOT.TH1: The projected histogram. """ # Axis length validation if len(self.projection_axes) < 1 or len(self.projection_axes) > 2: raise ValueError(len(self.projection_axes), "Invalid number of axes") # Need to concatenate the names of the axes together projection_axis_name = "" for axis in self.projection_axes: # Determine the axis name based on the name of the axis type. # [:1] returns just the first letter. For example, we could get "xy" if the first axis as # x_axis and the second was y_axis. # NOTE: Careful. This depends on the name of the enumerated values!!! Since this isn't terribly # safe, we then perform additional validation on the same to ensure that it is one of the # expected axis names. proj_axis_name = axis.axis_type.name[:1] if proj_axis_name not in ["x", "y", "z"]: raise ValueError(f"Projection axis name {proj_axis_name} is not 'x', 'y', or 'z'. Please check your configuration.") projection_axis_name += proj_axis_name # Handle ROOT Project3D quirk... # 2D projection are called as (y, x, options), so we should reverse the order so it performs # as expected. # NOTE: This isn't well documented in TH3. It is instead described in THnBase.Projection(...) if len(self.projection_axes) == 2: # Reverse the axes projection_axis_name = projection_axis_name[::-1] # Do the actual projection logger.info(f"Projecting onto axes \"{projection_axis_name}\" from hist {hist.GetName()}") projected_hist = hist.Project3D(projection_axis_name) return projected_hist
[ "def", "_project_TH3", "(", "self", ",", "hist", ":", "Hist", ")", "->", "Any", ":", "# Axis length validation", "if", "len", "(", "self", ".", "projection_axes", ")", "<", "1", "or", "len", "(", "self", ".", "projection_axes", ")", ">", "2", ":", "raise", "ValueError", "(", "len", "(", "self", ".", "projection_axes", ")", ",", "\"Invalid number of axes\"", ")", "# Need to concatenate the names of the axes together", "projection_axis_name", "=", "\"\"", "for", "axis", "in", "self", ".", "projection_axes", ":", "# Determine the axis name based on the name of the axis type.", "# [:1] returns just the first letter. For example, we could get \"xy\" if the first axis as", "# x_axis and the second was y_axis.", "# NOTE: Careful. This depends on the name of the enumerated values!!! Since this isn't terribly", "# safe, we then perform additional validation on the same to ensure that it is one of the", "# expected axis names.", "proj_axis_name", "=", "axis", ".", "axis_type", ".", "name", "[", ":", "1", "]", "if", "proj_axis_name", "not", "in", "[", "\"x\"", ",", "\"y\"", ",", "\"z\"", "]", ":", "raise", "ValueError", "(", "f\"Projection axis name {proj_axis_name} is not 'x', 'y', or 'z'. Please check your configuration.\"", ")", "projection_axis_name", "+=", "proj_axis_name", "# Handle ROOT Project3D quirk...", "# 2D projection are called as (y, x, options), so we should reverse the order so it performs", "# as expected.", "# NOTE: This isn't well documented in TH3. It is instead described in THnBase.Projection(...)", "if", "len", "(", "self", ".", "projection_axes", ")", "==", "2", ":", "# Reverse the axes", "projection_axis_name", "=", "projection_axis_name", "[", ":", ":", "-", "1", "]", "# Do the actual projection", "logger", ".", "info", "(", "f\"Projecting onto axes \\\"{projection_axis_name}\\\" from hist {hist.GetName()}\"", ")", "projected_hist", "=", "hist", ".", "Project3D", "(", "projection_axis_name", ")", "return", "projected_hist" ]
Perform the actual TH3 -> TH1 projection. This projection could be to 1D or 2D. Args: hist (ROOT.TH3): Histogram from which the projections should be performed. Returns: ROOT.TH1: The projected histogram.
[ "Perform", "the", "actual", "TH3", "-", ">", "TH1", "projection", "." ]
python
train
jobovy/galpy
galpy/util/bovy_coords.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/util/bovy_coords.py#L1039-L1062
def cyl_to_rect(R,phi,Z): """ NAME: cyl_to_rect PURPOSE: convert from cylindrical to rectangular coordinates INPUT: R, phi, Z - cylindrical coordinates OUTPUT: X,Y,Z HISTORY: 2011-02-23 - Written - Bovy (NYU) """ return (R*sc.cos(phi),R*sc.sin(phi),Z)
[ "def", "cyl_to_rect", "(", "R", ",", "phi", ",", "Z", ")", ":", "return", "(", "R", "*", "sc", ".", "cos", "(", "phi", ")", ",", "R", "*", "sc", ".", "sin", "(", "phi", ")", ",", "Z", ")" ]
NAME: cyl_to_rect PURPOSE: convert from cylindrical to rectangular coordinates INPUT: R, phi, Z - cylindrical coordinates OUTPUT: X,Y,Z HISTORY: 2011-02-23 - Written - Bovy (NYU)
[ "NAME", ":" ]
python
train
PyPSA/PyPSA
pypsa/pf.py
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/pf.py#L579-L611
def find_bus_controls(sub_network): """Find slack and all PV and PQ buses for a sub_network. This function also fixes sub_network.buses_o, a DataFrame ordered by control type.""" network = sub_network.network find_slack_bus(sub_network) gens = sub_network.generators() buses_i = sub_network.buses_i() #default bus control is PQ network.buses.loc[buses_i, "control"] = "PQ" #find all buses with one or more gens with PV pvs = gens[gens.control == 'PV'].index.to_series() if len(pvs) > 0: pvs = pvs.groupby(gens.bus).first() network.buses.loc[pvs.index, "control"] = "PV" network.buses.loc[pvs.index, "generator"] = pvs network.buses.loc[sub_network.slack_bus, "control"] = "Slack" network.buses.loc[sub_network.slack_bus, "generator"] = sub_network.slack_generator buses_control = network.buses.loc[buses_i, "control"] sub_network.pvs = buses_control.index[buses_control == "PV"] sub_network.pqs = buses_control.index[buses_control == "PQ"] sub_network.pvpqs = sub_network.pvs.append(sub_network.pqs) # order buses sub_network.buses_o = sub_network.pvpqs.insert(0, sub_network.slack_bus)
[ "def", "find_bus_controls", "(", "sub_network", ")", ":", "network", "=", "sub_network", ".", "network", "find_slack_bus", "(", "sub_network", ")", "gens", "=", "sub_network", ".", "generators", "(", ")", "buses_i", "=", "sub_network", ".", "buses_i", "(", ")", "#default bus control is PQ", "network", ".", "buses", ".", "loc", "[", "buses_i", ",", "\"control\"", "]", "=", "\"PQ\"", "#find all buses with one or more gens with PV", "pvs", "=", "gens", "[", "gens", ".", "control", "==", "'PV'", "]", ".", "index", ".", "to_series", "(", ")", "if", "len", "(", "pvs", ")", ">", "0", ":", "pvs", "=", "pvs", ".", "groupby", "(", "gens", ".", "bus", ")", ".", "first", "(", ")", "network", ".", "buses", ".", "loc", "[", "pvs", ".", "index", ",", "\"control\"", "]", "=", "\"PV\"", "network", ".", "buses", ".", "loc", "[", "pvs", ".", "index", ",", "\"generator\"", "]", "=", "pvs", "network", ".", "buses", ".", "loc", "[", "sub_network", ".", "slack_bus", ",", "\"control\"", "]", "=", "\"Slack\"", "network", ".", "buses", ".", "loc", "[", "sub_network", ".", "slack_bus", ",", "\"generator\"", "]", "=", "sub_network", ".", "slack_generator", "buses_control", "=", "network", ".", "buses", ".", "loc", "[", "buses_i", ",", "\"control\"", "]", "sub_network", ".", "pvs", "=", "buses_control", ".", "index", "[", "buses_control", "==", "\"PV\"", "]", "sub_network", ".", "pqs", "=", "buses_control", ".", "index", "[", "buses_control", "==", "\"PQ\"", "]", "sub_network", ".", "pvpqs", "=", "sub_network", ".", "pvs", ".", "append", "(", "sub_network", ".", "pqs", ")", "# order buses", "sub_network", ".", "buses_o", "=", "sub_network", ".", "pvpqs", ".", "insert", "(", "0", ",", "sub_network", ".", "slack_bus", ")" ]
Find slack and all PV and PQ buses for a sub_network. This function also fixes sub_network.buses_o, a DataFrame ordered by control type.
[ "Find", "slack", "and", "all", "PV", "and", "PQ", "buses", "for", "a", "sub_network", ".", "This", "function", "also", "fixes", "sub_network", ".", "buses_o", "a", "DataFrame", "ordered", "by", "control", "type", "." ]
python
train
acorg/dark-matter
dark/reads.py
https://github.com/acorg/dark-matter/blob/c78a1bf262667fa5db3548fa7066c4ec14d0551d/dark/reads.py#L1221-L1235
def filterRead(self, read): """ Filter a read, according to our set of filters. @param read: A C{Read} instance or one of its subclasses. @return: C{False} if the read fails any of our filters, else the C{Read} instance returned by our list of filters. """ for filterFunc in self._filters: filteredRead = filterFunc(read) if filteredRead is False: return False else: read = filteredRead return read
[ "def", "filterRead", "(", "self", ",", "read", ")", ":", "for", "filterFunc", "in", "self", ".", "_filters", ":", "filteredRead", "=", "filterFunc", "(", "read", ")", "if", "filteredRead", "is", "False", ":", "return", "False", "else", ":", "read", "=", "filteredRead", "return", "read" ]
Filter a read, according to our set of filters. @param read: A C{Read} instance or one of its subclasses. @return: C{False} if the read fails any of our filters, else the C{Read} instance returned by our list of filters.
[ "Filter", "a", "read", "according", "to", "our", "set", "of", "filters", "." ]
python
train
sporsh/carnifex
carnifex/endpoint.py
https://github.com/sporsh/carnifex/blob/82dd3bd2bc134dfb69a78f43171e227f2127060b/carnifex/endpoint.py#L149-L156
def childDataReceived(self, childFD, data): """Relay data received on any file descriptor to the process """ protocol = getattr(self, 'protocol', None) if protocol: protocol.dataReceived(data) else: self.data.append((childFD, data))
[ "def", "childDataReceived", "(", "self", ",", "childFD", ",", "data", ")", ":", "protocol", "=", "getattr", "(", "self", ",", "'protocol'", ",", "None", ")", "if", "protocol", ":", "protocol", ".", "dataReceived", "(", "data", ")", "else", ":", "self", ".", "data", ".", "append", "(", "(", "childFD", ",", "data", ")", ")" ]
Relay data received on any file descriptor to the process
[ "Relay", "data", "received", "on", "any", "file", "descriptor", "to", "the", "process" ]
python
train
hvac/hvac
hvac/api/secrets_engines/identity.py
https://github.com/hvac/hvac/blob/cce5b86889193f622c2a72a4a1b7e1c9c8aff1ce/hvac/api/secrets_engines/identity.py#L887-L919
def update_group_alias(self, entity_id, name, mount_accessor="", canonical_id="", mount_point=DEFAULT_MOUNT_POINT): """Update an existing group alias. Supported methods: POST: /{mount_point}/group-alias/id/{id}. Produces: 200 application/json :param entity_id: ID of the group alias. :type entity_id: str | unicode :param name: Name of the group alias. :type name: str | unicode :param mount_accessor: Mount accessor to which this alias belongs toMount accessor to which this alias belongs to. :type mount_accessor: str | unicode :param canonical_id: ID of the group to which this is an alias. :type canonical_id: str | unicode :param mount_point: The "path" the method/backend was mounted on. :type mount_point: str | unicode :return: The response of the request. :rtype: requests.Response """ params = { 'name': name, 'mount_accessor': mount_accessor, 'canonical_id': canonical_id, } api_path = '/v1/{mount_point}/group-alias/id/{id}'.format( mount_point=mount_point, id=entity_id, ) return self._adapter.post( url=api_path, json=params, )
[ "def", "update_group_alias", "(", "self", ",", "entity_id", ",", "name", ",", "mount_accessor", "=", "\"\"", ",", "canonical_id", "=", "\"\"", ",", "mount_point", "=", "DEFAULT_MOUNT_POINT", ")", ":", "params", "=", "{", "'name'", ":", "name", ",", "'mount_accessor'", ":", "mount_accessor", ",", "'canonical_id'", ":", "canonical_id", ",", "}", "api_path", "=", "'/v1/{mount_point}/group-alias/id/{id}'", ".", "format", "(", "mount_point", "=", "mount_point", ",", "id", "=", "entity_id", ",", ")", "return", "self", ".", "_adapter", ".", "post", "(", "url", "=", "api_path", ",", "json", "=", "params", ",", ")" ]
Update an existing group alias. Supported methods: POST: /{mount_point}/group-alias/id/{id}. Produces: 200 application/json :param entity_id: ID of the group alias. :type entity_id: str | unicode :param name: Name of the group alias. :type name: str | unicode :param mount_accessor: Mount accessor to which this alias belongs toMount accessor to which this alias belongs to. :type mount_accessor: str | unicode :param canonical_id: ID of the group to which this is an alias. :type canonical_id: str | unicode :param mount_point: The "path" the method/backend was mounted on. :type mount_point: str | unicode :return: The response of the request. :rtype: requests.Response
[ "Update", "an", "existing", "group", "alias", "." ]
python
train
fabioz/PyDev.Debugger
_pydevd_bundle/pydevd_vars.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/_pydevd_bundle/pydevd_vars.py#L243-L279
def evaluate_expression(dbg, frame, expression, is_exec): '''returns the result of the evaluated expression @param is_exec: determines if we should do an exec or an eval ''' if frame is None: return # Not using frame.f_globals because of https://sourceforge.net/tracker2/?func=detail&aid=2541355&group_id=85796&atid=577329 # (Names not resolved in generator expression in method) # See message: http://mail.python.org/pipermail/python-list/2009-January/526522.html updated_globals = {} updated_globals.update(frame.f_globals) updated_globals.update(frame.f_locals) # locals later because it has precedence over the actual globals try: expression = str(expression.replace('@LINE@', '\n')) if is_exec: try: # try to make it an eval (if it is an eval we can print it, otherwise we'll exec it and # it will have whatever the user actually did) compiled = compile(expression, '<string>', 'eval') except: Exec(expression, updated_globals, frame.f_locals) pydevd_save_locals.save_locals(frame) else: result = eval(compiled, updated_globals, frame.f_locals) if result is not None: # Only print if it's not None (as python does) sys.stdout.write('%s\n' % (result,)) return else: return eval_in_context(expression, updated_globals, frame.f_locals) finally: # Should not be kept alive if an exception happens and this frame is kept in the stack. del updated_globals del frame
[ "def", "evaluate_expression", "(", "dbg", ",", "frame", ",", "expression", ",", "is_exec", ")", ":", "if", "frame", "is", "None", ":", "return", "# Not using frame.f_globals because of https://sourceforge.net/tracker2/?func=detail&aid=2541355&group_id=85796&atid=577329", "# (Names not resolved in generator expression in method)", "# See message: http://mail.python.org/pipermail/python-list/2009-January/526522.html", "updated_globals", "=", "{", "}", "updated_globals", ".", "update", "(", "frame", ".", "f_globals", ")", "updated_globals", ".", "update", "(", "frame", ".", "f_locals", ")", "# locals later because it has precedence over the actual globals", "try", ":", "expression", "=", "str", "(", "expression", ".", "replace", "(", "'@LINE@'", ",", "'\\n'", ")", ")", "if", "is_exec", ":", "try", ":", "# try to make it an eval (if it is an eval we can print it, otherwise we'll exec it and", "# it will have whatever the user actually did)", "compiled", "=", "compile", "(", "expression", ",", "'<string>'", ",", "'eval'", ")", "except", ":", "Exec", "(", "expression", ",", "updated_globals", ",", "frame", ".", "f_locals", ")", "pydevd_save_locals", ".", "save_locals", "(", "frame", ")", "else", ":", "result", "=", "eval", "(", "compiled", ",", "updated_globals", ",", "frame", ".", "f_locals", ")", "if", "result", "is", "not", "None", ":", "# Only print if it's not None (as python does)", "sys", ".", "stdout", ".", "write", "(", "'%s\\n'", "%", "(", "result", ",", ")", ")", "return", "else", ":", "return", "eval_in_context", "(", "expression", ",", "updated_globals", ",", "frame", ".", "f_locals", ")", "finally", ":", "# Should not be kept alive if an exception happens and this frame is kept in the stack.", "del", "updated_globals", "del", "frame" ]
returns the result of the evaluated expression @param is_exec: determines if we should do an exec or an eval
[ "returns", "the", "result", "of", "the", "evaluated", "expression" ]
python
train
GeorgeArgyros/sfalearn
sfalearn/angluin_fst.py
https://github.com/GeorgeArgyros/sfalearn/blob/68a93f507e2fb7d89ca04bd8a8f0da2d6c680443/sfalearn/angluin_fst.py#L255-L282
def _process_counter_example(self, mma, w_string): """" Process a counterexample in the Rivest-Schapire way. Args: mma (DFA): The hypothesis automaton w_string (str): The examined string to be consumed Returns: None """ w_string = self._find_bad_transition(mma, w_string) diff = len(w_string) same = 0 while True: i = (same + diff) / 2 access_string = self._run_in_hypothesis(mma, w_string, i) is_diff = self._check_suffix(w_string, access_string, i) if is_diff: diff = i else: same = i if diff - same == 1: break exp = w_string[diff:] self.observation_table.em_vector.append(exp) for row in self.observation_table.sm_vector + self.observation_table.smi_vector: self._fill_table_entry(row, exp)
[ "def", "_process_counter_example", "(", "self", ",", "mma", ",", "w_string", ")", ":", "w_string", "=", "self", ".", "_find_bad_transition", "(", "mma", ",", "w_string", ")", "diff", "=", "len", "(", "w_string", ")", "same", "=", "0", "while", "True", ":", "i", "=", "(", "same", "+", "diff", ")", "/", "2", "access_string", "=", "self", ".", "_run_in_hypothesis", "(", "mma", ",", "w_string", ",", "i", ")", "is_diff", "=", "self", ".", "_check_suffix", "(", "w_string", ",", "access_string", ",", "i", ")", "if", "is_diff", ":", "diff", "=", "i", "else", ":", "same", "=", "i", "if", "diff", "-", "same", "==", "1", ":", "break", "exp", "=", "w_string", "[", "diff", ":", "]", "self", ".", "observation_table", ".", "em_vector", ".", "append", "(", "exp", ")", "for", "row", "in", "self", ".", "observation_table", ".", "sm_vector", "+", "self", ".", "observation_table", ".", "smi_vector", ":", "self", ".", "_fill_table_entry", "(", "row", ",", "exp", ")" ]
Process a counterexample in the Rivest-Schapire way. Args: mma (DFA): The hypothesis automaton w_string (str): The examined string to be consumed Returns: None
[ "Process", "a", "counterexample", "in", "the", "Rivest", "-", "Schapire", "way", ".", "Args", ":", "mma", "(", "DFA", ")", ":", "The", "hypothesis", "automaton", "w_string", "(", "str", ")", ":", "The", "examined", "string", "to", "be", "consumed", "Returns", ":", "None" ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_lag.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_lag.py#L27-L40
def get_port_channel_detail_input_request_type_get_next_request_last_aggregator_id(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_port_channel_detail = ET.Element("get_port_channel_detail") config = get_port_channel_detail input = ET.SubElement(get_port_channel_detail, "input") request_type = ET.SubElement(input, "request-type") get_next_request = ET.SubElement(request_type, "get-next-request") last_aggregator_id = ET.SubElement(get_next_request, "last-aggregator-id") last_aggregator_id.text = kwargs.pop('last_aggregator_id') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_port_channel_detail_input_request_type_get_next_request_last_aggregator_id", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_port_channel_detail", "=", "ET", ".", "Element", "(", "\"get_port_channel_detail\"", ")", "config", "=", "get_port_channel_detail", "input", "=", "ET", ".", "SubElement", "(", "get_port_channel_detail", ",", "\"input\"", ")", "request_type", "=", "ET", ".", "SubElement", "(", "input", ",", "\"request-type\"", ")", "get_next_request", "=", "ET", ".", "SubElement", "(", "request_type", ",", "\"get-next-request\"", ")", "last_aggregator_id", "=", "ET", ".", "SubElement", "(", "get_next_request", ",", "\"last-aggregator-id\"", ")", "last_aggregator_id", ".", "text", "=", "kwargs", ".", "pop", "(", "'last_aggregator_id'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
jilljenn/tryalgo
tryalgo/eulerian_tour.py
https://github.com/jilljenn/tryalgo/blob/89a4dd9655e7b6b0a176f72b4c60d0196420dfe1/tryalgo/eulerian_tour.py#L104-L119
def is_eulerian_tour(graph, tour): """Eulerian tour on an undirected graph :param graph: directed graph in listlist format, cannot be listdict :param tour: vertex list :returns: test if tour is eulerian :complexity: `O(|V|*|E|)` under the assumption that set membership is in constant time """ m = len(tour)-1 arcs = set((tour[i], tour[i+1]) for i in range(m)) if len(arcs) != m: return False for (u,v) in arcs: if v not in graph[u]: return False return True
[ "def", "is_eulerian_tour", "(", "graph", ",", "tour", ")", ":", "m", "=", "len", "(", "tour", ")", "-", "1", "arcs", "=", "set", "(", "(", "tour", "[", "i", "]", ",", "tour", "[", "i", "+", "1", "]", ")", "for", "i", "in", "range", "(", "m", ")", ")", "if", "len", "(", "arcs", ")", "!=", "m", ":", "return", "False", "for", "(", "u", ",", "v", ")", "in", "arcs", ":", "if", "v", "not", "in", "graph", "[", "u", "]", ":", "return", "False", "return", "True" ]
Eulerian tour on an undirected graph :param graph: directed graph in listlist format, cannot be listdict :param tour: vertex list :returns: test if tour is eulerian :complexity: `O(|V|*|E|)` under the assumption that set membership is in constant time
[ "Eulerian", "tour", "on", "an", "undirected", "graph" ]
python
train
gmr/helper
helper/__init__.py
https://github.com/gmr/helper/blob/fe8e45fc8eabf619429b2940c682c252ee33c082/helper/__init__.py#L149-L171
def start(controller_class): """Start the Helper controller either in the foreground or as a daemon process. :param controller_class: The controller class handle to create and run :type controller_class: callable """ args = parser.parse() obj = controller_class(args, platform.operating_system()) if args.foreground: try: obj.start() except KeyboardInterrupt: obj.stop() else: try: with platform.Daemon(obj) as daemon: daemon.start() except (OSError, ValueError) as error: sys.stderr.write('\nError starting %s: %s\n\n' % (sys.argv[0], error)) sys.exit(1)
[ "def", "start", "(", "controller_class", ")", ":", "args", "=", "parser", ".", "parse", "(", ")", "obj", "=", "controller_class", "(", "args", ",", "platform", ".", "operating_system", "(", ")", ")", "if", "args", ".", "foreground", ":", "try", ":", "obj", ".", "start", "(", ")", "except", "KeyboardInterrupt", ":", "obj", ".", "stop", "(", ")", "else", ":", "try", ":", "with", "platform", ".", "Daemon", "(", "obj", ")", "as", "daemon", ":", "daemon", ".", "start", "(", ")", "except", "(", "OSError", ",", "ValueError", ")", "as", "error", ":", "sys", ".", "stderr", ".", "write", "(", "'\\nError starting %s: %s\\n\\n'", "%", "(", "sys", ".", "argv", "[", "0", "]", ",", "error", ")", ")", "sys", ".", "exit", "(", "1", ")" ]
Start the Helper controller either in the foreground or as a daemon process. :param controller_class: The controller class handle to create and run :type controller_class: callable
[ "Start", "the", "Helper", "controller", "either", "in", "the", "foreground", "or", "as", "a", "daemon", "process", "." ]
python
train
isogeo/isogeo-api-py-minsdk
isogeo_pysdk/checker.py
https://github.com/isogeo/isogeo-api-py-minsdk/blob/57a604be92c7767b26abd247012cc1a584b386a0/isogeo_pysdk/checker.py#L283-L306
def check_is_uuid(self, uuid_str: str): """Check if it's an Isogeo UUID handling specific form. :param str uuid_str: UUID string to check """ # check uuid type if not isinstance(uuid_str, str): raise TypeError("'uuid_str' expected a str value.") else: pass # handle Isogeo specific UUID in XML exports if "isogeo:metadata" in uuid_str: uuid_str = "urn:uuid:{}".format(uuid_str.split(":")[-1]) else: pass # test it try: uid = UUID(uuid_str) return uid.hex == uuid_str.replace("-", "").replace("urn:uuid:", "") except ValueError as e: logging.error( "uuid ValueError. {} ({}) -- {}".format(type(uuid_str), uuid_str, e) ) return False
[ "def", "check_is_uuid", "(", "self", ",", "uuid_str", ":", "str", ")", ":", "# check uuid type", "if", "not", "isinstance", "(", "uuid_str", ",", "str", ")", ":", "raise", "TypeError", "(", "\"'uuid_str' expected a str value.\"", ")", "else", ":", "pass", "# handle Isogeo specific UUID in XML exports", "if", "\"isogeo:metadata\"", "in", "uuid_str", ":", "uuid_str", "=", "\"urn:uuid:{}\"", ".", "format", "(", "uuid_str", ".", "split", "(", "\":\"", ")", "[", "-", "1", "]", ")", "else", ":", "pass", "# test it", "try", ":", "uid", "=", "UUID", "(", "uuid_str", ")", "return", "uid", ".", "hex", "==", "uuid_str", ".", "replace", "(", "\"-\"", ",", "\"\"", ")", ".", "replace", "(", "\"urn:uuid:\"", ",", "\"\"", ")", "except", "ValueError", "as", "e", ":", "logging", ".", "error", "(", "\"uuid ValueError. {} ({}) -- {}\"", ".", "format", "(", "type", "(", "uuid_str", ")", ",", "uuid_str", ",", "e", ")", ")", "return", "False" ]
Check if it's an Isogeo UUID handling specific form. :param str uuid_str: UUID string to check
[ "Check", "if", "it", "s", "an", "Isogeo", "UUID", "handling", "specific", "form", "." ]
python
train
openatx/facebook-wda
wda/__init__.py
https://github.com/openatx/facebook-wda/blob/aa644204620c6d5c7705a9c7452d8c0cc39330d5/wda/__init__.py#L707-L721
def _add_escape_character_for_quote_prime_character(self, text): """ Fix for https://github.com/openatx/facebook-wda/issues/33 Returns: string with properly formated quotes, or non changed text """ if text is not None: if "'" in text: return text.replace("'","\\'") elif '"' in text: return text.replace('"','\\"') else: return text else: return text
[ "def", "_add_escape_character_for_quote_prime_character", "(", "self", ",", "text", ")", ":", "if", "text", "is", "not", "None", ":", "if", "\"'\"", "in", "text", ":", "return", "text", ".", "replace", "(", "\"'\"", ",", "\"\\\\'\"", ")", "elif", "'\"'", "in", "text", ":", "return", "text", ".", "replace", "(", "'\"'", ",", "'\\\\\"'", ")", "else", ":", "return", "text", "else", ":", "return", "text" ]
Fix for https://github.com/openatx/facebook-wda/issues/33 Returns: string with properly formated quotes, or non changed text
[ "Fix", "for", "https", ":", "//", "github", ".", "com", "/", "openatx", "/", "facebook", "-", "wda", "/", "issues", "/", "33", "Returns", ":", "string", "with", "properly", "formated", "quotes", "or", "non", "changed", "text" ]
python
train
pypa/pipenv
pipenv/patched/notpip/_internal/utils/misc.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_internal/utils/misc.py#L313-L323
def is_local(path): # type: (str) -> bool """ Return True if path is within sys.prefix, if we're running in a virtualenv. If we're not in a virtualenv, all paths are considered "local." """ if not running_under_virtualenv(): return True return normalize_path(path).startswith(normalize_path(sys.prefix))
[ "def", "is_local", "(", "path", ")", ":", "# type: (str) -> bool", "if", "not", "running_under_virtualenv", "(", ")", ":", "return", "True", "return", "normalize_path", "(", "path", ")", ".", "startswith", "(", "normalize_path", "(", "sys", ".", "prefix", ")", ")" ]
Return True if path is within sys.prefix, if we're running in a virtualenv. If we're not in a virtualenv, all paths are considered "local."
[ "Return", "True", "if", "path", "is", "within", "sys", ".", "prefix", "if", "we", "re", "running", "in", "a", "virtualenv", "." ]
python
train
kata198/AdvancedHTMLParser
AdvancedHTMLParser/Parser.py
https://github.com/kata198/AdvancedHTMLParser/blob/06aeea5d8e2ea86e155aae0fc237623d3e9b7f9d/AdvancedHTMLParser/Parser.py#L1183-L1202
def getElementsByTagName(self, tagName, root='root', useIndex=True): ''' getElementsByTagName - Searches and returns all elements with a specific tag name. @param tagName <lowercase str> - A lowercase string of the tag name. @param root <AdvancedTag/'root'> - Search starting at a specific node, if provided. if string 'root', the root of the parsed tree will be used. @param useIndex - If True [default] and tag names are set to be indexed [default, see constructor], only the index will be used. If False, all tags will be searched. ''' (root, isFromRoot) = self._handleRootArg(root) if useIndex is True and self.indexTagNames is True: elements = self._tagNameMap.get(tagName, []) # Use .get here as to not create a lot of extra indexes on the defaultdict for misses if isFromRoot is False: _hasTagInParentLine = self._hasTagInParentLine elements = [x for x in elements if _hasTagInParentLine(x, root)] return TagCollection(elements) return AdvancedHTMLParser.getElementsByTagName(self, tagName, root)
[ "def", "getElementsByTagName", "(", "self", ",", "tagName", ",", "root", "=", "'root'", ",", "useIndex", "=", "True", ")", ":", "(", "root", ",", "isFromRoot", ")", "=", "self", ".", "_handleRootArg", "(", "root", ")", "if", "useIndex", "is", "True", "and", "self", ".", "indexTagNames", "is", "True", ":", "elements", "=", "self", ".", "_tagNameMap", ".", "get", "(", "tagName", ",", "[", "]", ")", "# Use .get here as to not create a lot of extra indexes on the defaultdict for misses", "if", "isFromRoot", "is", "False", ":", "_hasTagInParentLine", "=", "self", ".", "_hasTagInParentLine", "elements", "=", "[", "x", "for", "x", "in", "elements", "if", "_hasTagInParentLine", "(", "x", ",", "root", ")", "]", "return", "TagCollection", "(", "elements", ")", "return", "AdvancedHTMLParser", ".", "getElementsByTagName", "(", "self", ",", "tagName", ",", "root", ")" ]
getElementsByTagName - Searches and returns all elements with a specific tag name. @param tagName <lowercase str> - A lowercase string of the tag name. @param root <AdvancedTag/'root'> - Search starting at a specific node, if provided. if string 'root', the root of the parsed tree will be used. @param useIndex - If True [default] and tag names are set to be indexed [default, see constructor], only the index will be used. If False, all tags will be searched.
[ "getElementsByTagName", "-", "Searches", "and", "returns", "all", "elements", "with", "a", "specific", "tag", "name", "." ]
python
train
infothrill/python-dyndnsc
dyndnsc/conf.py
https://github.com/infothrill/python-dyndnsc/blob/2196d48aa6098da9835a7611fbdb0b5f0fbf51e4/dyndnsc/conf.py#L48-L73
def _iraw_client_configs(cfg): """ Generate (client_name, client_cfg_dict) tuples from the configuration. Conflates the presets and removes traces of the preset configuration so that the returned dict can be used directly on a dyndnsc factory. :param cfg: ConfigParser """ client_names = cfg.get("dyndnsc", "configs").split(",") _preset_prefix = "preset:" _use_preset = "use_preset" for client_name in (x.strip() for x in client_names if x.strip()): client_cfg_dict = dict(cfg.items(client_name)) if cfg.has_option(client_name, _use_preset): prf = dict( cfg.items(_preset_prefix + cfg.get(client_name, _use_preset))) prf.update(client_cfg_dict) client_cfg_dict = prf else: # raw config with NO preset in use, so no updating of dict pass logging.debug("raw config for '%s': %r", client_name, client_cfg_dict) if _use_preset in client_cfg_dict: del client_cfg_dict[_use_preset] yield client_name, client_cfg_dict
[ "def", "_iraw_client_configs", "(", "cfg", ")", ":", "client_names", "=", "cfg", ".", "get", "(", "\"dyndnsc\"", ",", "\"configs\"", ")", ".", "split", "(", "\",\"", ")", "_preset_prefix", "=", "\"preset:\"", "_use_preset", "=", "\"use_preset\"", "for", "client_name", "in", "(", "x", ".", "strip", "(", ")", "for", "x", "in", "client_names", "if", "x", ".", "strip", "(", ")", ")", ":", "client_cfg_dict", "=", "dict", "(", "cfg", ".", "items", "(", "client_name", ")", ")", "if", "cfg", ".", "has_option", "(", "client_name", ",", "_use_preset", ")", ":", "prf", "=", "dict", "(", "cfg", ".", "items", "(", "_preset_prefix", "+", "cfg", ".", "get", "(", "client_name", ",", "_use_preset", ")", ")", ")", "prf", ".", "update", "(", "client_cfg_dict", ")", "client_cfg_dict", "=", "prf", "else", ":", "# raw config with NO preset in use, so no updating of dict", "pass", "logging", ".", "debug", "(", "\"raw config for '%s': %r\"", ",", "client_name", ",", "client_cfg_dict", ")", "if", "_use_preset", "in", "client_cfg_dict", ":", "del", "client_cfg_dict", "[", "_use_preset", "]", "yield", "client_name", ",", "client_cfg_dict" ]
Generate (client_name, client_cfg_dict) tuples from the configuration. Conflates the presets and removes traces of the preset configuration so that the returned dict can be used directly on a dyndnsc factory. :param cfg: ConfigParser
[ "Generate", "(", "client_name", "client_cfg_dict", ")", "tuples", "from", "the", "configuration", "." ]
python
train
hazelcast/hazelcast-python-client
hazelcast/proxy/multi_map.py
https://github.com/hazelcast/hazelcast-python-client/blob/3f6639443c23d6d036aa343f8e094f052250d2c1/hazelcast/proxy/multi_map.py#L271-L284
def value_count(self, key): """ Returns the number of values that match the given key in the multimap. **Warning: This method uses __hash__ and __eq__ methods of binary form of the key, not the actual implementations of __hash__ and __eq__ defined in key's class.** :param key: (object), the key whose values count is to be returned. :return: (int), the number of values that match the given key in the multimap. """ check_not_none(key, "key can't be None") key_data = self._to_data(key) return self._encode_invoke_on_key(multi_map_value_count_codec, key_data, key=key_data, thread_id=thread_id())
[ "def", "value_count", "(", "self", ",", "key", ")", ":", "check_not_none", "(", "key", ",", "\"key can't be None\"", ")", "key_data", "=", "self", ".", "_to_data", "(", "key", ")", "return", "self", ".", "_encode_invoke_on_key", "(", "multi_map_value_count_codec", ",", "key_data", ",", "key", "=", "key_data", ",", "thread_id", "=", "thread_id", "(", ")", ")" ]
Returns the number of values that match the given key in the multimap. **Warning: This method uses __hash__ and __eq__ methods of binary form of the key, not the actual implementations of __hash__ and __eq__ defined in key's class.** :param key: (object), the key whose values count is to be returned. :return: (int), the number of values that match the given key in the multimap.
[ "Returns", "the", "number", "of", "values", "that", "match", "the", "given", "key", "in", "the", "multimap", "." ]
python
train
SUNCAT-Center/CatHub
cathub/cathubsqlite.py
https://github.com/SUNCAT-Center/CatHub/blob/324625d1d8e740673f139658b2de4c9e1059739e/cathub/cathubsqlite.py#L353-L372
def get_last_id(self, cur, table='reaction'): """ Get the id of the last written row in table Parameters ---------- cur: database connection().cursor() object table: str 'reaction', 'publication', 'publication_system', 'reaction_system' Returns: id """ cur.execute("SELECT seq FROM sqlite_sequence WHERE name='{0}'" .format(table)) result = cur.fetchone() if result is not None: id = result[0] else: id = 0 return id
[ "def", "get_last_id", "(", "self", ",", "cur", ",", "table", "=", "'reaction'", ")", ":", "cur", ".", "execute", "(", "\"SELECT seq FROM sqlite_sequence WHERE name='{0}'\"", ".", "format", "(", "table", ")", ")", "result", "=", "cur", ".", "fetchone", "(", ")", "if", "result", "is", "not", "None", ":", "id", "=", "result", "[", "0", "]", "else", ":", "id", "=", "0", "return", "id" ]
Get the id of the last written row in table Parameters ---------- cur: database connection().cursor() object table: str 'reaction', 'publication', 'publication_system', 'reaction_system' Returns: id
[ "Get", "the", "id", "of", "the", "last", "written", "row", "in", "table" ]
python
train
fishtown-analytics/dbt
core/dbt/adapters/cache.py
https://github.com/fishtown-analytics/dbt/blob/aa4f771df28b307af0cf9fe2fc24432f10a8236b/core/dbt/adapters/cache.py#L116-L135
def rename(self, new_relation): """Rename this cached relation to new_relation. Note that this will change the output of key(), all refs must be updated! :param _CachedRelation new_relation: The new name to apply to the relation """ # Relations store this stuff inside their `path` dict. But they # also store a table_name, and usually use it in their .render(), # so we need to update that as well. It doesn't appear that # table_name is ever anything but the identifier (via .create()) self.inner = self.inner.incorporate( path={ 'database': new_relation.inner.database, 'schema': new_relation.inner.schema, 'identifier': new_relation.inner.identifier }, table_name=new_relation.inner.identifier )
[ "def", "rename", "(", "self", ",", "new_relation", ")", ":", "# Relations store this stuff inside their `path` dict. But they", "# also store a table_name, and usually use it in their .render(),", "# so we need to update that as well. It doesn't appear that", "# table_name is ever anything but the identifier (via .create())", "self", ".", "inner", "=", "self", ".", "inner", ".", "incorporate", "(", "path", "=", "{", "'database'", ":", "new_relation", ".", "inner", ".", "database", ",", "'schema'", ":", "new_relation", ".", "inner", ".", "schema", ",", "'identifier'", ":", "new_relation", ".", "inner", ".", "identifier", "}", ",", "table_name", "=", "new_relation", ".", "inner", ".", "identifier", ")" ]
Rename this cached relation to new_relation. Note that this will change the output of key(), all refs must be updated! :param _CachedRelation new_relation: The new name to apply to the relation
[ "Rename", "this", "cached", "relation", "to", "new_relation", ".", "Note", "that", "this", "will", "change", "the", "output", "of", "key", "()", "all", "refs", "must", "be", "updated!" ]
python
train
dcaune/perseus-lib-python-common
exifread/utils.py
https://github.com/dcaune/perseus-lib-python-common/blob/ba48fe0fd9bb4a75b53e7d10c41ada36a72d4496/exifread/utils.py#L12-L27
def make_string(seq): """ Don't throw an exception when given an out of range character. """ string = '' for c in seq: # Screen out non-printing characters try: if 32 <= c and c < 256: string += chr(c) except TypeError: pass # If no printing chars if not string: return str(seq) return string
[ "def", "make_string", "(", "seq", ")", ":", "string", "=", "''", "for", "c", "in", "seq", ":", "# Screen out non-printing characters", "try", ":", "if", "32", "<=", "c", "and", "c", "<", "256", ":", "string", "+=", "chr", "(", "c", ")", "except", "TypeError", ":", "pass", "# If no printing chars", "if", "not", "string", ":", "return", "str", "(", "seq", ")", "return", "string" ]
Don't throw an exception when given an out of range character.
[ "Don", "t", "throw", "an", "exception", "when", "given", "an", "out", "of", "range", "character", "." ]
python
train
openthread/openthread
tools/harness-thci/OpenThread_WpanCtl.py
https://github.com/openthread/openthread/blob/0208d10563aa21c518092985c78ecf9cd223ab74/tools/harness-thci/OpenThread_WpanCtl.py#L2238-L2269
def MGMT_ACTIVE_GET(self, Addr='', TLVs=[]): """send MGMT_ACTIVE_GET command Returns: True: successful to send MGMT_ACTIVE_GET False: fail to send MGMT_ACTIVE_GET """ print '%s call MGMT_ACTIVE_GET' % self.port try: cmd = WPANCTL_CMD + 'dataset mgmt-get-active' if len(TLVs) != 0: tlvs = "".join(hex(tlv).lstrip("0x").zfill(2) for tlv in TLVs) setTLVCmd = WPANCTL_CMD + 'setprop Dataset:RawTlvs ' + tlvs if self.__sendCommand(setTLVCmd)[0] == 'Fail': return False else: if self.__sendCommand(WPANCTL_CMD + 'dataset erase')[0] == 'Fail': return False if Addr != '': setAddressCmd = WPANCTL_CMD + 'setprop Dataset:DestIpAddress ' + Addr if self.__sendCommand(setAddressCmd)[0] == 'Fail': return False print cmd return self.__sendCommand(cmd)[0] != 'Fail' except Exception, e: ModuleHelper.WriteIntoDebugLogger('MGMT_ACTIVE_GET() Error: ' + str(e))
[ "def", "MGMT_ACTIVE_GET", "(", "self", ",", "Addr", "=", "''", ",", "TLVs", "=", "[", "]", ")", ":", "print", "'%s call MGMT_ACTIVE_GET'", "%", "self", ".", "port", "try", ":", "cmd", "=", "WPANCTL_CMD", "+", "'dataset mgmt-get-active'", "if", "len", "(", "TLVs", ")", "!=", "0", ":", "tlvs", "=", "\"\"", ".", "join", "(", "hex", "(", "tlv", ")", ".", "lstrip", "(", "\"0x\"", ")", ".", "zfill", "(", "2", ")", "for", "tlv", "in", "TLVs", ")", "setTLVCmd", "=", "WPANCTL_CMD", "+", "'setprop Dataset:RawTlvs '", "+", "tlvs", "if", "self", ".", "__sendCommand", "(", "setTLVCmd", ")", "[", "0", "]", "==", "'Fail'", ":", "return", "False", "else", ":", "if", "self", ".", "__sendCommand", "(", "WPANCTL_CMD", "+", "'dataset erase'", ")", "[", "0", "]", "==", "'Fail'", ":", "return", "False", "if", "Addr", "!=", "''", ":", "setAddressCmd", "=", "WPANCTL_CMD", "+", "'setprop Dataset:DestIpAddress '", "+", "Addr", "if", "self", ".", "__sendCommand", "(", "setAddressCmd", ")", "[", "0", "]", "==", "'Fail'", ":", "return", "False", "print", "cmd", "return", "self", ".", "__sendCommand", "(", "cmd", ")", "[", "0", "]", "!=", "'Fail'", "except", "Exception", ",", "e", ":", "ModuleHelper", ".", "WriteIntoDebugLogger", "(", "'MGMT_ACTIVE_GET() Error: '", "+", "str", "(", "e", ")", ")" ]
send MGMT_ACTIVE_GET command Returns: True: successful to send MGMT_ACTIVE_GET False: fail to send MGMT_ACTIVE_GET
[ "send", "MGMT_ACTIVE_GET", "command" ]
python
train
saltstack/salt
salt/cloud/clouds/ec2.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/ec2.py#L4696-L4736
def copy_snapshot(kwargs=None, call=None): ''' Copy a snapshot ''' if call != 'function': log.error( 'The copy_snapshot function must be called with -f or --function.' ) return False if 'source_region' not in kwargs: log.error('A source_region must be specified to copy a snapshot.') return False if 'source_snapshot_id' not in kwargs: log.error('A source_snapshot_id must be specified to copy a snapshot.') return False if 'description' not in kwargs: kwargs['description'] = '' params = {'Action': 'CopySnapshot'} if 'source_region' in kwargs: params['SourceRegion'] = kwargs['source_region'] if 'source_snapshot_id' in kwargs: params['SourceSnapshotId'] = kwargs['source_snapshot_id'] if 'description' in kwargs: params['Description'] = kwargs['description'] log.debug(params) data = aws.query(params, return_url=True, location=get_location(), provider=get_provider(), opts=__opts__, sigver='4') return data
[ "def", "copy_snapshot", "(", "kwargs", "=", "None", ",", "call", "=", "None", ")", ":", "if", "call", "!=", "'function'", ":", "log", ".", "error", "(", "'The copy_snapshot function must be called with -f or --function.'", ")", "return", "False", "if", "'source_region'", "not", "in", "kwargs", ":", "log", ".", "error", "(", "'A source_region must be specified to copy a snapshot.'", ")", "return", "False", "if", "'source_snapshot_id'", "not", "in", "kwargs", ":", "log", ".", "error", "(", "'A source_snapshot_id must be specified to copy a snapshot.'", ")", "return", "False", "if", "'description'", "not", "in", "kwargs", ":", "kwargs", "[", "'description'", "]", "=", "''", "params", "=", "{", "'Action'", ":", "'CopySnapshot'", "}", "if", "'source_region'", "in", "kwargs", ":", "params", "[", "'SourceRegion'", "]", "=", "kwargs", "[", "'source_region'", "]", "if", "'source_snapshot_id'", "in", "kwargs", ":", "params", "[", "'SourceSnapshotId'", "]", "=", "kwargs", "[", "'source_snapshot_id'", "]", "if", "'description'", "in", "kwargs", ":", "params", "[", "'Description'", "]", "=", "kwargs", "[", "'description'", "]", "log", ".", "debug", "(", "params", ")", "data", "=", "aws", ".", "query", "(", "params", ",", "return_url", "=", "True", ",", "location", "=", "get_location", "(", ")", ",", "provider", "=", "get_provider", "(", ")", ",", "opts", "=", "__opts__", ",", "sigver", "=", "'4'", ")", "return", "data" ]
Copy a snapshot
[ "Copy", "a", "snapshot" ]
python
train
datastax/python-driver
cassandra/concurrent.py
https://github.com/datastax/python-driver/blob/30a80d0b798b1f45f8cb77163b1fa791f3e3ca29/cassandra/concurrent.py#L226-L238
def execute_concurrent_with_args(session, statement, parameters, *args, **kwargs): """ Like :meth:`~cassandra.concurrent.execute_concurrent()`, but takes a single statement and a sequence of parameters. Each item in ``parameters`` should be a sequence or :const:`None`. Example usage:: statement = session.prepare("INSERT INTO mytable (a, b) VALUES (1, ?)") parameters = [(x,) for x in range(1000)] execute_concurrent_with_args(session, statement, parameters, concurrency=50) """ return execute_concurrent(session, zip(cycle((statement,)), parameters), *args, **kwargs)
[ "def", "execute_concurrent_with_args", "(", "session", ",", "statement", ",", "parameters", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "execute_concurrent", "(", "session", ",", "zip", "(", "cycle", "(", "(", "statement", ",", ")", ")", ",", "parameters", ")", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Like :meth:`~cassandra.concurrent.execute_concurrent()`, but takes a single statement and a sequence of parameters. Each item in ``parameters`` should be a sequence or :const:`None`. Example usage:: statement = session.prepare("INSERT INTO mytable (a, b) VALUES (1, ?)") parameters = [(x,) for x in range(1000)] execute_concurrent_with_args(session, statement, parameters, concurrency=50)
[ "Like", ":", "meth", ":", "~cassandra", ".", "concurrent", ".", "execute_concurrent", "()", "but", "takes", "a", "single", "statement", "and", "a", "sequence", "of", "parameters", ".", "Each", "item", "in", "parameters", "should", "be", "a", "sequence", "or", ":", "const", ":", "None", "." ]
python
train
sods/ods
pods/datasets.py
https://github.com/sods/ods/blob/3995c659f25a0a640f6009ed7fcc2559ce659b1d/pods/datasets.py#L1122-L1155
def airline_delay(data_set='airline_delay', num_train=700000, num_test=100000, seed=default_seed): """Airline delay data used in Gaussian Processes for Big Data by Hensman, Fusi and Lawrence""" if not data_available(data_set): download_data(data_set) dir_path = os.path.join(data_path, data_set) filename = os.path.join(dir_path, 'filtered_data.pickle') # 1. Load the dataset import pandas as pd data = pd.read_pickle(filename) # WARNING: removing year data.pop('Year') # Get data matrices Yall = data.pop('ArrDelay').values[:,None] Xall = data.values # Subset the data (memory!!) all_data = num_train+num_test Xall = Xall[:all_data] Yall = Yall[:all_data] # Get testing points np.random.seed(seed=seed) N_shuffled = permute(Yall.shape[0]) train, test = N_shuffled[num_test:], N_shuffled[:num_test] X, Y = Xall[train], Yall[train] Xtest, Ytest = Xall[test], Yall[test] covariates = ['month', 'day of month', 'day of week', 'departure time', 'arrival time', 'air time', 'distance to travel', 'age of aircraft / years'] response = ['delay'] return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'seed' : seed, 'info': "Airline delay data used for demonstrating Gaussian processes for big data.", 'covariates': covariates, 'response': response}, data_set)
[ "def", "airline_delay", "(", "data_set", "=", "'airline_delay'", ",", "num_train", "=", "700000", ",", "num_test", "=", "100000", ",", "seed", "=", "default_seed", ")", ":", "if", "not", "data_available", "(", "data_set", ")", ":", "download_data", "(", "data_set", ")", "dir_path", "=", "os", ".", "path", ".", "join", "(", "data_path", ",", "data_set", ")", "filename", "=", "os", ".", "path", ".", "join", "(", "dir_path", ",", "'filtered_data.pickle'", ")", "# 1. Load the dataset", "import", "pandas", "as", "pd", "data", "=", "pd", ".", "read_pickle", "(", "filename", ")", "# WARNING: removing year", "data", ".", "pop", "(", "'Year'", ")", "# Get data matrices", "Yall", "=", "data", ".", "pop", "(", "'ArrDelay'", ")", ".", "values", "[", ":", ",", "None", "]", "Xall", "=", "data", ".", "values", "# Subset the data (memory!!)", "all_data", "=", "num_train", "+", "num_test", "Xall", "=", "Xall", "[", ":", "all_data", "]", "Yall", "=", "Yall", "[", ":", "all_data", "]", "# Get testing points", "np", ".", "random", ".", "seed", "(", "seed", "=", "seed", ")", "N_shuffled", "=", "permute", "(", "Yall", ".", "shape", "[", "0", "]", ")", "train", ",", "test", "=", "N_shuffled", "[", "num_test", ":", "]", ",", "N_shuffled", "[", ":", "num_test", "]", "X", ",", "Y", "=", "Xall", "[", "train", "]", ",", "Yall", "[", "train", "]", "Xtest", ",", "Ytest", "=", "Xall", "[", "test", "]", ",", "Yall", "[", "test", "]", "covariates", "=", "[", "'month'", ",", "'day of month'", ",", "'day of week'", ",", "'departure time'", ",", "'arrival time'", ",", "'air time'", ",", "'distance to travel'", ",", "'age of aircraft / years'", "]", "response", "=", "[", "'delay'", "]", "return", "data_details_return", "(", "{", "'X'", ":", "X", ",", "'Y'", ":", "Y", ",", "'Xtest'", ":", "Xtest", ",", "'Ytest'", ":", "Ytest", ",", "'seed'", ":", "seed", ",", "'info'", ":", "\"Airline delay data used for demonstrating Gaussian processes for big data.\"", ",", "'covariates'", ":", "covariates", ",", "'response'", ":", "response", "}", ",", "data_set", ")" ]
Airline delay data used in Gaussian Processes for Big Data by Hensman, Fusi and Lawrence
[ "Airline", "delay", "data", "used", "in", "Gaussian", "Processes", "for", "Big", "Data", "by", "Hensman", "Fusi", "and", "Lawrence" ]
python
train
Opentrons/opentrons
api/src/opentrons/drivers/smoothie_drivers/driver_3_0.py
https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/api/src/opentrons/drivers/smoothie_drivers/driver_3_0.py#L501-L511
def position(self): """ Instead of sending M114.2 we are storing target values in self._position since movement and home commands are blocking and assumed to go the correct place. Cases where Smoothie would not be in the correct place (such as if a belt slips) would not be corrected by getting position with M114.2 because Smoothie would also not be aware of slippage. """ return {k.upper(): v for k, v in self._position.items()}
[ "def", "position", "(", "self", ")", ":", "return", "{", "k", ".", "upper", "(", ")", ":", "v", "for", "k", ",", "v", "in", "self", ".", "_position", ".", "items", "(", ")", "}" ]
Instead of sending M114.2 we are storing target values in self._position since movement and home commands are blocking and assumed to go the correct place. Cases where Smoothie would not be in the correct place (such as if a belt slips) would not be corrected by getting position with M114.2 because Smoothie would also not be aware of slippage.
[ "Instead", "of", "sending", "M114", ".", "2", "we", "are", "storing", "target", "values", "in", "self", ".", "_position", "since", "movement", "and", "home", "commands", "are", "blocking", "and", "assumed", "to", "go", "the", "correct", "place", "." ]
python
train
pyviz/holoviews
holoviews/plotting/bokeh/styles.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/plotting/bokeh/styles.py#L115-L140
def validate(style, value, scalar=False): """ Validates a style and associated value. Arguments --------- style: str The style to validate (e.g. 'color', 'size' or 'marker') value: The style value to validate scalar: bool Returns ------- valid: boolean or None If validation is supported returns boolean, otherwise None """ validator = get_validator(style) if validator is None: return None if isinstance(value, (np.ndarray, list)): if scalar: return False return all(validator(v) for v in value) return validator(value)
[ "def", "validate", "(", "style", ",", "value", ",", "scalar", "=", "False", ")", ":", "validator", "=", "get_validator", "(", "style", ")", "if", "validator", "is", "None", ":", "return", "None", "if", "isinstance", "(", "value", ",", "(", "np", ".", "ndarray", ",", "list", ")", ")", ":", "if", "scalar", ":", "return", "False", "return", "all", "(", "validator", "(", "v", ")", "for", "v", "in", "value", ")", "return", "validator", "(", "value", ")" ]
Validates a style and associated value. Arguments --------- style: str The style to validate (e.g. 'color', 'size' or 'marker') value: The style value to validate scalar: bool Returns ------- valid: boolean or None If validation is supported returns boolean, otherwise None
[ "Validates", "a", "style", "and", "associated", "value", "." ]
python
train
vertexproject/synapse
synapse/lib/msgpack.py
https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/lib/msgpack.py#L168-L180
def dumpfile(item, path): ''' Dump an object to a file by path. Args: item (object): The object to serialize. path (str): The file path to save. Returns: None ''' with io.open(path, 'wb') as fd: fd.write(en(item))
[ "def", "dumpfile", "(", "item", ",", "path", ")", ":", "with", "io", ".", "open", "(", "path", ",", "'wb'", ")", "as", "fd", ":", "fd", ".", "write", "(", "en", "(", "item", ")", ")" ]
Dump an object to a file by path. Args: item (object): The object to serialize. path (str): The file path to save. Returns: None
[ "Dump", "an", "object", "to", "a", "file", "by", "path", "." ]
python
train
wbond/asn1crypto
asn1crypto/core.py
https://github.com/wbond/asn1crypto/blob/ecda20176f55d37021cbca1f6da9083a8e491197/asn1crypto/core.py#L4224-L4258
def _parse_children(self, recurse=False): """ Parses the contents and generates Asn1Value objects based on the definitions from _child_spec. :param recurse: If child objects that are Sequence or SequenceOf objects should be recursively parsed :raises: ValueError - when an error occurs parsing child objects """ try: self.children = [] if self._contents is None: return contents_length = len(self._contents) child_pointer = 0 while child_pointer < contents_length: parts, child_pointer = _parse(self._contents, contents_length, pointer=child_pointer) if self._child_spec: child = parts + (self._child_spec,) else: child = parts if recurse: child = _build(*child) if isinstance(child, (Sequence, SequenceOf)): child._parse_children(recurse=True) self.children.append(child) except (ValueError, TypeError) as e: self.children = None args = e.args[1:] e.args = (e.args[0] + '\n while parsing %s' % type_name(self),) + args raise e
[ "def", "_parse_children", "(", "self", ",", "recurse", "=", "False", ")", ":", "try", ":", "self", ".", "children", "=", "[", "]", "if", "self", ".", "_contents", "is", "None", ":", "return", "contents_length", "=", "len", "(", "self", ".", "_contents", ")", "child_pointer", "=", "0", "while", "child_pointer", "<", "contents_length", ":", "parts", ",", "child_pointer", "=", "_parse", "(", "self", ".", "_contents", ",", "contents_length", ",", "pointer", "=", "child_pointer", ")", "if", "self", ".", "_child_spec", ":", "child", "=", "parts", "+", "(", "self", ".", "_child_spec", ",", ")", "else", ":", "child", "=", "parts", "if", "recurse", ":", "child", "=", "_build", "(", "*", "child", ")", "if", "isinstance", "(", "child", ",", "(", "Sequence", ",", "SequenceOf", ")", ")", ":", "child", ".", "_parse_children", "(", "recurse", "=", "True", ")", "self", ".", "children", ".", "append", "(", "child", ")", "except", "(", "ValueError", ",", "TypeError", ")", "as", "e", ":", "self", ".", "children", "=", "None", "args", "=", "e", ".", "args", "[", "1", ":", "]", "e", ".", "args", "=", "(", "e", ".", "args", "[", "0", "]", "+", "'\\n while parsing %s'", "%", "type_name", "(", "self", ")", ",", ")", "+", "args", "raise", "e" ]
Parses the contents and generates Asn1Value objects based on the definitions from _child_spec. :param recurse: If child objects that are Sequence or SequenceOf objects should be recursively parsed :raises: ValueError - when an error occurs parsing child objects
[ "Parses", "the", "contents", "and", "generates", "Asn1Value", "objects", "based", "on", "the", "definitions", "from", "_child_spec", "." ]
python
train
openstack/hacking
hacking/checks/docstrings.py
https://github.com/openstack/hacking/blob/10e58f907181cac91d3b2af422c2458b04a1ec79/hacking/checks/docstrings.py#L103-L125
def hacking_docstring_summary(physical_line, previous_logical, tokens): r"""Check multi line docstring summary is separated with empty line. OpenStack HACKING guide recommendation for docstring: Docstring should start with a one-line summary, less than 80 characters. Okay: def foo():\n a = '''\nnot\na docstring\n''' Okay: '''foobar\n\nfoo\nbar\n''' H405: def foo():\n '''foobar\nfoo\nbar\n''' H405: def foo():\n r'''foobar\nfoo\nbar\n''' H405: def foo():\n '''foobar\n''' """ docstring = is_docstring(tokens, previous_logical) if docstring: if '\n' not in docstring: # not a multi line docstring return lines = docstring.split('\n') if len(lines) > 1 and len(lines[1].strip()) is not 0: # docstrings get tokenized on the last line of the docstring, so # we don't know the exact position. return (0, "H405: multi line docstring " "summary not separated with an empty line")
[ "def", "hacking_docstring_summary", "(", "physical_line", ",", "previous_logical", ",", "tokens", ")", ":", "docstring", "=", "is_docstring", "(", "tokens", ",", "previous_logical", ")", "if", "docstring", ":", "if", "'\\n'", "not", "in", "docstring", ":", "# not a multi line docstring", "return", "lines", "=", "docstring", ".", "split", "(", "'\\n'", ")", "if", "len", "(", "lines", ")", ">", "1", "and", "len", "(", "lines", "[", "1", "]", ".", "strip", "(", ")", ")", "is", "not", "0", ":", "# docstrings get tokenized on the last line of the docstring, so", "# we don't know the exact position.", "return", "(", "0", ",", "\"H405: multi line docstring \"", "\"summary not separated with an empty line\"", ")" ]
r"""Check multi line docstring summary is separated with empty line. OpenStack HACKING guide recommendation for docstring: Docstring should start with a one-line summary, less than 80 characters. Okay: def foo():\n a = '''\nnot\na docstring\n''' Okay: '''foobar\n\nfoo\nbar\n''' H405: def foo():\n '''foobar\nfoo\nbar\n''' H405: def foo():\n r'''foobar\nfoo\nbar\n''' H405: def foo():\n '''foobar\n'''
[ "r", "Check", "multi", "line", "docstring", "summary", "is", "separated", "with", "empty", "line", "." ]
python
train
openstack/proliantutils
proliantutils/redfish/redfish.py
https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/redfish.py#L1042-L1053
def get_host_post_state(self): """Get the current state of system POST. Retrieves current state of system POST. :returns: POST state of the server. The valida states are:- null, Unknown, Reset, PowerOff, InPost, InPostDiscoveryComplete and FinishedPost. :raises: IloError, on an error from iLO """ sushy_system = self._get_sushy_system(PROLIANT_SYSTEM_ID) return GET_POST_STATE_MAP.get(sushy_system.post_state)
[ "def", "get_host_post_state", "(", "self", ")", ":", "sushy_system", "=", "self", ".", "_get_sushy_system", "(", "PROLIANT_SYSTEM_ID", ")", "return", "GET_POST_STATE_MAP", ".", "get", "(", "sushy_system", ".", "post_state", ")" ]
Get the current state of system POST. Retrieves current state of system POST. :returns: POST state of the server. The valida states are:- null, Unknown, Reset, PowerOff, InPost, InPostDiscoveryComplete and FinishedPost. :raises: IloError, on an error from iLO
[ "Get", "the", "current", "state", "of", "system", "POST", "." ]
python
train
PmagPy/PmagPy
pmagpy/pmagplotlib.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmagplotlib.py#L3674-L3722
def plot_ts(ax, agemin, agemax, timescale='gts12', ylabel="Age (Ma)"): """ Make a time scale plot between specified ages. Parameters: ------------ ax : figure object agemin : Minimum age for timescale agemax : Maximum age for timescale timescale : Time Scale [ default is Gradstein et al., (2012)] for other options see pmag.get_ts() ylabel : if set, plot as ylabel """ ax.set_title(timescale.upper()) ax.axis([-.25, 1.5, agemax, agemin]) ax.axes.get_xaxis().set_visible(False) # get dates and chron names for timescale TS, Chrons = pmag.get_ts(timescale) X, Y, Y2 = [0, 1], [], [] cnt = 0 if agemin < TS[1]: # in the Brunhes Y = [agemin, agemin] # minimum age Y1 = [TS[1], TS[1]] # age of the B/M boundary ax.fill_between(X, Y, Y1, facecolor='black') # color in Brunhes, black for d in TS[1:]: pol = cnt % 2 cnt += 1 if d <= agemax and d >= agemin: ind = TS.index(d) Y = [TS[ind], TS[ind]] Y1 = [TS[ind+1], TS[ind+1]] if pol: # fill in every other time ax.fill_between(X, Y, Y1, facecolor='black') ax.plot([0, 1, 1, 0, 0], [agemin, agemin, agemax, agemax, agemin], 'k-') plt.yticks(np.arange(agemin, agemax+1, 1)) if ylabel != "": ax.set_ylabel(ylabel) ax2 = ax.twinx() ax2.axis('off') for k in range(len(Chrons)-1): c = Chrons[k] cnext = Chrons[k+1] d = cnext[1]-(cnext[1]-c[1])/3. if d >= agemin and d < agemax: # make the Chron boundary tick ax2.plot([1, 1.5], [c[1], c[1]], 'k-') ax2.text(1.05, d, c[0]) ax2.axis([-.25, 1.5, agemax, agemin])
[ "def", "plot_ts", "(", "ax", ",", "agemin", ",", "agemax", ",", "timescale", "=", "'gts12'", ",", "ylabel", "=", "\"Age (Ma)\"", ")", ":", "ax", ".", "set_title", "(", "timescale", ".", "upper", "(", ")", ")", "ax", ".", "axis", "(", "[", "-", ".25", ",", "1.5", ",", "agemax", ",", "agemin", "]", ")", "ax", ".", "axes", ".", "get_xaxis", "(", ")", ".", "set_visible", "(", "False", ")", "# get dates and chron names for timescale", "TS", ",", "Chrons", "=", "pmag", ".", "get_ts", "(", "timescale", ")", "X", ",", "Y", ",", "Y2", "=", "[", "0", ",", "1", "]", ",", "[", "]", ",", "[", "]", "cnt", "=", "0", "if", "agemin", "<", "TS", "[", "1", "]", ":", "# in the Brunhes", "Y", "=", "[", "agemin", ",", "agemin", "]", "# minimum age", "Y1", "=", "[", "TS", "[", "1", "]", ",", "TS", "[", "1", "]", "]", "# age of the B/M boundary", "ax", ".", "fill_between", "(", "X", ",", "Y", ",", "Y1", ",", "facecolor", "=", "'black'", ")", "# color in Brunhes, black", "for", "d", "in", "TS", "[", "1", ":", "]", ":", "pol", "=", "cnt", "%", "2", "cnt", "+=", "1", "if", "d", "<=", "agemax", "and", "d", ">=", "agemin", ":", "ind", "=", "TS", ".", "index", "(", "d", ")", "Y", "=", "[", "TS", "[", "ind", "]", ",", "TS", "[", "ind", "]", "]", "Y1", "=", "[", "TS", "[", "ind", "+", "1", "]", ",", "TS", "[", "ind", "+", "1", "]", "]", "if", "pol", ":", "# fill in every other time", "ax", ".", "fill_between", "(", "X", ",", "Y", ",", "Y1", ",", "facecolor", "=", "'black'", ")", "ax", ".", "plot", "(", "[", "0", ",", "1", ",", "1", ",", "0", ",", "0", "]", ",", "[", "agemin", ",", "agemin", ",", "agemax", ",", "agemax", ",", "agemin", "]", ",", "'k-'", ")", "plt", ".", "yticks", "(", "np", ".", "arange", "(", "agemin", ",", "agemax", "+", "1", ",", "1", ")", ")", "if", "ylabel", "!=", "\"\"", ":", "ax", ".", "set_ylabel", "(", "ylabel", ")", "ax2", "=", "ax", ".", "twinx", "(", ")", "ax2", ".", "axis", "(", "'off'", ")", "for", "k", "in", "range", "(", "len", "(", "Chrons", ")", "-", "1", ")", ":", "c", "=", "Chrons", "[", "k", "]", "cnext", "=", "Chrons", "[", "k", "+", "1", "]", "d", "=", "cnext", "[", "1", "]", "-", "(", "cnext", "[", "1", "]", "-", "c", "[", "1", "]", ")", "/", "3.", "if", "d", ">=", "agemin", "and", "d", "<", "agemax", ":", "# make the Chron boundary tick", "ax2", ".", "plot", "(", "[", "1", ",", "1.5", "]", ",", "[", "c", "[", "1", "]", ",", "c", "[", "1", "]", "]", ",", "'k-'", ")", "ax2", ".", "text", "(", "1.05", ",", "d", ",", "c", "[", "0", "]", ")", "ax2", ".", "axis", "(", "[", "-", ".25", ",", "1.5", ",", "agemax", ",", "agemin", "]", ")" ]
Make a time scale plot between specified ages. Parameters: ------------ ax : figure object agemin : Minimum age for timescale agemax : Maximum age for timescale timescale : Time Scale [ default is Gradstein et al., (2012)] for other options see pmag.get_ts() ylabel : if set, plot as ylabel
[ "Make", "a", "time", "scale", "plot", "between", "specified", "ages", "." ]
python
train
pgmpy/pgmpy
pgmpy/models/MarkovChain.py
https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/models/MarkovChain.py#L440-L469
def copy(self): """ Returns a copy of Markov Chain Model. Return Type: ------------ MarkovChain : Copy of MarkovChain. Examples: --------- >>> from pgmpy.models import MarkovChain >>> from pgmpy.factors.discrete import State >>> model = MarkovChain() >>> model.add_variables_from(['intel', 'diff'], [3, 2]) >>> intel_tm = {0: {0: 0.2, 1: 0.4, 2:0.4}, 1: {0: 0, 1: 0.5, 2: 0.5}, 2: {0: 0.3, 1: 0.3, 2: 0.4}} >>> model.add_transition_model('intel', intel_tm) >>> diff_tm = {0: {0: 0.5, 1: 0.5}, 1: {0: 0.25, 1:0.75}} >>> model.add_transition_model('diff', diff_tm) >>> model.set_start_state([State('intel', 0), State('diff', 2)]) >>> model_copy = model.copy() >>> model_copy.transition_models >>> {'diff': {0: {0: 0.1, 1: 0.5, 2: 0.4}, 1: {0: 0.2, 1: 0.2, 2: 0.6}, 2: {0: 0.7, 1: 0.15, 2: 0.15}}, 'intel': {0: {0: 0.25, 1: 0.75}, 1: {0: 0.5, 1: 0.5}}} """ markovchain_copy = MarkovChain(variables=list(self.cardinalities.keys()), card=list(self.cardinalities.values()), start_state=self.state) if self.transition_models: markovchain_copy.transition_models = self.transition_models.copy() return markovchain_copy
[ "def", "copy", "(", "self", ")", ":", "markovchain_copy", "=", "MarkovChain", "(", "variables", "=", "list", "(", "self", ".", "cardinalities", ".", "keys", "(", ")", ")", ",", "card", "=", "list", "(", "self", ".", "cardinalities", ".", "values", "(", ")", ")", ",", "start_state", "=", "self", ".", "state", ")", "if", "self", ".", "transition_models", ":", "markovchain_copy", ".", "transition_models", "=", "self", ".", "transition_models", ".", "copy", "(", ")", "return", "markovchain_copy" ]
Returns a copy of Markov Chain Model. Return Type: ------------ MarkovChain : Copy of MarkovChain. Examples: --------- >>> from pgmpy.models import MarkovChain >>> from pgmpy.factors.discrete import State >>> model = MarkovChain() >>> model.add_variables_from(['intel', 'diff'], [3, 2]) >>> intel_tm = {0: {0: 0.2, 1: 0.4, 2:0.4}, 1: {0: 0, 1: 0.5, 2: 0.5}, 2: {0: 0.3, 1: 0.3, 2: 0.4}} >>> model.add_transition_model('intel', intel_tm) >>> diff_tm = {0: {0: 0.5, 1: 0.5}, 1: {0: 0.25, 1:0.75}} >>> model.add_transition_model('diff', diff_tm) >>> model.set_start_state([State('intel', 0), State('diff', 2)]) >>> model_copy = model.copy() >>> model_copy.transition_models >>> {'diff': {0: {0: 0.1, 1: 0.5, 2: 0.4}, 1: {0: 0.2, 1: 0.2, 2: 0.6}, 2: {0: 0.7, 1: 0.15, 2: 0.15}}, 'intel': {0: {0: 0.25, 1: 0.75}, 1: {0: 0.5, 1: 0.5}}}
[ "Returns", "a", "copy", "of", "Markov", "Chain", "Model", "." ]
python
train
marcinn/sqltemplate
sqltemplate/contrib/django/loader.py
https://github.com/marcinn/sqltemplate/blob/28d26ebcc474b8c37942218adfa9f54e89327ce0/sqltemplate/contrib/django/loader.py#L53-L62
def render_to_string(template_name, context=None, request=None, using=None): """ Loads a template and renders it with a context. Returns a string. template_name may be a string or a list of strings. """ if isinstance(template_name, (list, tuple)): template = select_template(template_name, using=using) else: template = get_template(template_name, using=using) return template.render(context, request)
[ "def", "render_to_string", "(", "template_name", ",", "context", "=", "None", ",", "request", "=", "None", ",", "using", "=", "None", ")", ":", "if", "isinstance", "(", "template_name", ",", "(", "list", ",", "tuple", ")", ")", ":", "template", "=", "select_template", "(", "template_name", ",", "using", "=", "using", ")", "else", ":", "template", "=", "get_template", "(", "template_name", ",", "using", "=", "using", ")", "return", "template", ".", "render", "(", "context", ",", "request", ")" ]
Loads a template and renders it with a context. Returns a string. template_name may be a string or a list of strings.
[ "Loads", "a", "template", "and", "renders", "it", "with", "a", "context", ".", "Returns", "a", "string", ".", "template_name", "may", "be", "a", "string", "or", "a", "list", "of", "strings", "." ]
python
train
UCBerkeleySETI/blimpy
blimpy/filterbank.py
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/filterbank.py#L968-L974
def calibrate_band_pass_N1(self): """ One way to calibrate the band pass is to take the median value for every frequency fine channel, and divide by it. """ band_pass = np.median(self.data.squeeze(),axis=0) self.data = self.data/band_pass
[ "def", "calibrate_band_pass_N1", "(", "self", ")", ":", "band_pass", "=", "np", ".", "median", "(", "self", ".", "data", ".", "squeeze", "(", ")", ",", "axis", "=", "0", ")", "self", ".", "data", "=", "self", ".", "data", "/", "band_pass" ]
One way to calibrate the band pass is to take the median value for every frequency fine channel, and divide by it.
[ "One", "way", "to", "calibrate", "the", "band", "pass", "is", "to", "take", "the", "median", "value", "for", "every", "frequency", "fine", "channel", "and", "divide", "by", "it", "." ]
python
test
inasafe/inasafe
safe/gui/tools/wizard/step_fc00_functions1.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/wizard/step_fc00_functions1.py#L159-L166
def set_widgets(self): """Set widgets on the Impact Functions Table 1 tab.""" self.tblFunctions1.horizontalHeader().setSectionResizeMode( QHeaderView.Stretch) self.tblFunctions1.verticalHeader().setSectionResizeMode( QHeaderView.Stretch) self.populate_function_table_1()
[ "def", "set_widgets", "(", "self", ")", ":", "self", ".", "tblFunctions1", ".", "horizontalHeader", "(", ")", ".", "setSectionResizeMode", "(", "QHeaderView", ".", "Stretch", ")", "self", ".", "tblFunctions1", ".", "verticalHeader", "(", ")", ".", "setSectionResizeMode", "(", "QHeaderView", ".", "Stretch", ")", "self", ".", "populate_function_table_1", "(", ")" ]
Set widgets on the Impact Functions Table 1 tab.
[ "Set", "widgets", "on", "the", "Impact", "Functions", "Table", "1", "tab", "." ]
python
train
CivicSpleen/ambry
ambry/orm/column.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/orm/column.py#L285-L295
def label(self): """"Return first child of the column that is marked as a label. Returns self if the column is a label""" if self.valuetype_class.is_label(): return self for c in self.table.columns: if c.parent == self.name and c.valuetype_class.is_label(): return c return None
[ "def", "label", "(", "self", ")", ":", "if", "self", ".", "valuetype_class", ".", "is_label", "(", ")", ":", "return", "self", "for", "c", "in", "self", ".", "table", ".", "columns", ":", "if", "c", ".", "parent", "==", "self", ".", "name", "and", "c", ".", "valuetype_class", ".", "is_label", "(", ")", ":", "return", "c", "return", "None" ]
Return first child of the column that is marked as a label. Returns self if the column is a label
[ "Return", "first", "child", "of", "the", "column", "that", "is", "marked", "as", "a", "label", ".", "Returns", "self", "if", "the", "column", "is", "a", "label" ]
python
train
nmdp-bioinformatics/SeqAnn
seqann/util.py
https://github.com/nmdp-bioinformatics/SeqAnn/blob/5ce91559b0a4fbe4fb7758e034eb258202632463/seqann/util.py#L36-L45
def checkseq(sequence: str=None, code="ATGC") -> bool: """ :param sequence: The input sequence. :type sequence: Seq :rtype: bool """ for base in sequence: if base not in code: return False return True
[ "def", "checkseq", "(", "sequence", ":", "str", "=", "None", ",", "code", "=", "\"ATGC\"", ")", "->", "bool", ":", "for", "base", "in", "sequence", ":", "if", "base", "not", "in", "code", ":", "return", "False", "return", "True" ]
:param sequence: The input sequence. :type sequence: Seq :rtype: bool
[ ":", "param", "sequence", ":", "The", "input", "sequence", ".", ":", "type", "sequence", ":", "Seq", ":", "rtype", ":", "bool" ]
python
train
Autodesk/aomi
aomi/cli.py
https://github.com/Autodesk/aomi/blob/84da2dfb0424837adf9c4ddc1aa352e942bb7a4a/aomi/cli.py#L370-L385
def template_runner(client, parser, args): """Executes template related operations""" if args.builtin_list: aomi.template.builtin_list() elif args.builtin_info: aomi.template.builtin_info(args.builtin_info) elif args.template and args.destination and args.vault_paths: aomi.render.template(client, args.template, args.destination, args.vault_paths, args) else: parser.print_usage() sys.exit(2) sys.exit(0)
[ "def", "template_runner", "(", "client", ",", "parser", ",", "args", ")", ":", "if", "args", ".", "builtin_list", ":", "aomi", ".", "template", ".", "builtin_list", "(", ")", "elif", "args", ".", "builtin_info", ":", "aomi", ".", "template", ".", "builtin_info", "(", "args", ".", "builtin_info", ")", "elif", "args", ".", "template", "and", "args", ".", "destination", "and", "args", ".", "vault_paths", ":", "aomi", ".", "render", ".", "template", "(", "client", ",", "args", ".", "template", ",", "args", ".", "destination", ",", "args", ".", "vault_paths", ",", "args", ")", "else", ":", "parser", ".", "print_usage", "(", ")", "sys", ".", "exit", "(", "2", ")", "sys", ".", "exit", "(", "0", ")" ]
Executes template related operations
[ "Executes", "template", "related", "operations" ]
python
train
JohnDoee/thomas
thomas/outputs/http.py
https://github.com/JohnDoee/thomas/blob/51916dd110098b189a1c2fbcb71794fd9ec94832/thomas/outputs/http.py#L316-L334
def render_GET(self, request): """ Begin sending the contents of this L{File} (or a subset of the contents, based on the 'range' header) to the given request. """ request.setHeader(b'accept-ranges', b'bytes') producer = self.makeProducer(request, self.fileObject) if request.method == b'HEAD': return b'' def done(ign): producer.stopProducing() request.notifyFinish().addCallbacks(done, done) producer.start() # and make sure the connection doesn't get closed return server.NOT_DONE_YET
[ "def", "render_GET", "(", "self", ",", "request", ")", ":", "request", ".", "setHeader", "(", "b'accept-ranges'", ",", "b'bytes'", ")", "producer", "=", "self", ".", "makeProducer", "(", "request", ",", "self", ".", "fileObject", ")", "if", "request", ".", "method", "==", "b'HEAD'", ":", "return", "b''", "def", "done", "(", "ign", ")", ":", "producer", ".", "stopProducing", "(", ")", "request", ".", "notifyFinish", "(", ")", ".", "addCallbacks", "(", "done", ",", "done", ")", "producer", ".", "start", "(", ")", "# and make sure the connection doesn't get closed", "return", "server", ".", "NOT_DONE_YET" ]
Begin sending the contents of this L{File} (or a subset of the contents, based on the 'range' header) to the given request.
[ "Begin", "sending", "the", "contents", "of", "this", "L", "{", "File", "}", "(", "or", "a", "subset", "of", "the", "contents", "based", "on", "the", "range", "header", ")", "to", "the", "given", "request", "." ]
python
train
BD2KGenomics/protect
docker/pipelineWrapper.py
https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/docker/pipelineWrapper.py#L170-L188
def get_args(self): """ Use this context manager to add arguments to an argparse object with the add_argument method. Arguments must be defined before the command is defined. Note that no-clean and resume are added upon exit and should not be added in the context manager. For more info about these default arguments see below. """ parser = argparse.ArgumentParser(description=self._desc, formatter_class=MyUniversalHelpFormatter) # default args if self._no_clean: parser.add_argument('--no-clean', action='store_true', help='If this flag is used, temporary work directory is not ' 'cleaned.') if self._resume: parser.add_argument('--resume', action='store_true', help='If this flag is used, a previously uncleaned workflow in the' ' same directory will be resumed') return parser
[ "def", "get_args", "(", "self", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "self", ".", "_desc", ",", "formatter_class", "=", "MyUniversalHelpFormatter", ")", "# default args", "if", "self", ".", "_no_clean", ":", "parser", ".", "add_argument", "(", "'--no-clean'", ",", "action", "=", "'store_true'", ",", "help", "=", "'If this flag is used, temporary work directory is not '", "'cleaned.'", ")", "if", "self", ".", "_resume", ":", "parser", ".", "add_argument", "(", "'--resume'", ",", "action", "=", "'store_true'", ",", "help", "=", "'If this flag is used, a previously uncleaned workflow in the'", "' same directory will be resumed'", ")", "return", "parser" ]
Use this context manager to add arguments to an argparse object with the add_argument method. Arguments must be defined before the command is defined. Note that no-clean and resume are added upon exit and should not be added in the context manager. For more info about these default arguments see below.
[ "Use", "this", "context", "manager", "to", "add", "arguments", "to", "an", "argparse", "object", "with", "the", "add_argument", "method", ".", "Arguments", "must", "be", "defined", "before", "the", "command", "is", "defined", ".", "Note", "that", "no", "-", "clean", "and", "resume", "are", "added", "upon", "exit", "and", "should", "not", "be", "added", "in", "the", "context", "manager", ".", "For", "more", "info", "about", "these", "default", "arguments", "see", "below", "." ]
python
train
marrow/WebCore
web/server/fcgi.py
https://github.com/marrow/WebCore/blob/38d50f8022ca62976a1e5ff23f7714bd647b6532/web/server/fcgi.py#L18-L31
def serve(application, host='127.0.0.1', port=8080, socket=None, **options): """Basic FastCGI support via flup. This web server has many, many options. Please see the Flup project documentation for details. """ # Allow either on-disk socket (recommended) or TCP/IP socket use. if not socket: bindAddress = (host, int(port)) else: bindAddress = socket # Bind and start the blocking web server interface. WSGIServer(application, bindAddress=bindAddress, **options).run()
[ "def", "serve", "(", "application", ",", "host", "=", "'127.0.0.1'", ",", "port", "=", "8080", ",", "socket", "=", "None", ",", "*", "*", "options", ")", ":", "# Allow either on-disk socket (recommended) or TCP/IP socket use.", "if", "not", "socket", ":", "bindAddress", "=", "(", "host", ",", "int", "(", "port", ")", ")", "else", ":", "bindAddress", "=", "socket", "# Bind and start the blocking web server interface.", "WSGIServer", "(", "application", ",", "bindAddress", "=", "bindAddress", ",", "*", "*", "options", ")", ".", "run", "(", ")" ]
Basic FastCGI support via flup. This web server has many, many options. Please see the Flup project documentation for details.
[ "Basic", "FastCGI", "support", "via", "flup", ".", "This", "web", "server", "has", "many", "many", "options", ".", "Please", "see", "the", "Flup", "project", "documentation", "for", "details", "." ]
python
train
draperjames/qtpandas
qtpandas/ui/fallback/easygui/boxes/base_boxes.py
https://github.com/draperjames/qtpandas/blob/64294fb69f1839e53dee5ea453337266bfaf24f4/qtpandas/ui/fallback/easygui/boxes/base_boxes.py#L828-L916
def fileopenbox(msg=None, title=None, default='*', filetypes=None, multiple=False): """ A dialog to get a file name. **About the "default" argument** The "default" argument specifies a filepath that (normally) contains one or more wildcards. fileopenbox will display only files that match the default filepath. If omitted, defaults to "\*" (all files in the current directory). WINDOWS EXAMPLE:: ...default="c:/myjunk/*.py" will open in directory c:\\myjunk\\ and show all Python files. WINDOWS EXAMPLE:: ...default="c:/myjunk/test*.py" will open in directory c:\\myjunk\\ and show all Python files whose names begin with "test". Note that on Windows, fileopenbox automatically changes the path separator to the Windows path separator (backslash). **About the "filetypes" argument** If specified, it should contain a list of items, where each item is either: - a string containing a filemask # e.g. "\*.txt" - a list of strings, where all of the strings except the last one are filemasks (each beginning with "\*.", such as "\*.txt" for text files, "\*.py" for Python files, etc.). and the last string contains a filetype description EXAMPLE:: filetypes = ["*.css", ["*.htm", "*.html", "HTML files"] ] .. note:: If the filetypes list does not contain ("All files","*"), it will be added. If the filetypes list does not contain a filemask that includes the extension of the "default" argument, it will be added. For example, if default="\*abc.py" and no filetypes argument was specified, then "\*.py" will automatically be added to the filetypes argument. :param str msg: the msg to be displayed. :param str title: the window title :param str default: filepath with wildcards :param object filetypes: filemasks that a user can choose, e.g. "\*.txt" :param bool multiple: If true, more than one file can be selected :return: the name of a file, or None if user chose to cancel """ localRoot = Tk() localRoot.withdraw() initialbase, initialfile, initialdir, filetypes = fileboxSetup( default, filetypes) # ------------------------------------------------------------ # if initialfile contains no wildcards; we don't want an # initial file. It won't be used anyway. # Also: if initialbase is simply "*", we don't want an # initialfile; it is not doing any useful work. # ------------------------------------------------------------ if (initialfile.find("*") < 0) and (initialfile.find("?") < 0): initialfile = None elif initialbase == "*": initialfile = None func = ut.tk_FileDialog.askopenfilenames if multiple else ut.tk_FileDialog.askopenfilename ret_val = func(parent=localRoot, title=getFileDialogTitle(msg, title), initialdir=initialdir, initialfile=initialfile, filetypes=filetypes ) if multiple: f = [os.path.normpath(x) for x in localRoot.tk.splitlist(ret_val)] else: f = os.path.normpath(ret_val) localRoot.destroy() if not f: return None return f
[ "def", "fileopenbox", "(", "msg", "=", "None", ",", "title", "=", "None", ",", "default", "=", "'*'", ",", "filetypes", "=", "None", ",", "multiple", "=", "False", ")", ":", "localRoot", "=", "Tk", "(", ")", "localRoot", ".", "withdraw", "(", ")", "initialbase", ",", "initialfile", ",", "initialdir", ",", "filetypes", "=", "fileboxSetup", "(", "default", ",", "filetypes", ")", "# ------------------------------------------------------------", "# if initialfile contains no wildcards; we don't want an", "# initial file. It won't be used anyway.", "# Also: if initialbase is simply \"*\", we don't want an", "# initialfile; it is not doing any useful work.", "# ------------------------------------------------------------", "if", "(", "initialfile", ".", "find", "(", "\"*\"", ")", "<", "0", ")", "and", "(", "initialfile", ".", "find", "(", "\"?\"", ")", "<", "0", ")", ":", "initialfile", "=", "None", "elif", "initialbase", "==", "\"*\"", ":", "initialfile", "=", "None", "func", "=", "ut", ".", "tk_FileDialog", ".", "askopenfilenames", "if", "multiple", "else", "ut", ".", "tk_FileDialog", ".", "askopenfilename", "ret_val", "=", "func", "(", "parent", "=", "localRoot", ",", "title", "=", "getFileDialogTitle", "(", "msg", ",", "title", ")", ",", "initialdir", "=", "initialdir", ",", "initialfile", "=", "initialfile", ",", "filetypes", "=", "filetypes", ")", "if", "multiple", ":", "f", "=", "[", "os", ".", "path", ".", "normpath", "(", "x", ")", "for", "x", "in", "localRoot", ".", "tk", ".", "splitlist", "(", "ret_val", ")", "]", "else", ":", "f", "=", "os", ".", "path", ".", "normpath", "(", "ret_val", ")", "localRoot", ".", "destroy", "(", ")", "if", "not", "f", ":", "return", "None", "return", "f" ]
A dialog to get a file name. **About the "default" argument** The "default" argument specifies a filepath that (normally) contains one or more wildcards. fileopenbox will display only files that match the default filepath. If omitted, defaults to "\*" (all files in the current directory). WINDOWS EXAMPLE:: ...default="c:/myjunk/*.py" will open in directory c:\\myjunk\\ and show all Python files. WINDOWS EXAMPLE:: ...default="c:/myjunk/test*.py" will open in directory c:\\myjunk\\ and show all Python files whose names begin with "test". Note that on Windows, fileopenbox automatically changes the path separator to the Windows path separator (backslash). **About the "filetypes" argument** If specified, it should contain a list of items, where each item is either: - a string containing a filemask # e.g. "\*.txt" - a list of strings, where all of the strings except the last one are filemasks (each beginning with "\*.", such as "\*.txt" for text files, "\*.py" for Python files, etc.). and the last string contains a filetype description EXAMPLE:: filetypes = ["*.css", ["*.htm", "*.html", "HTML files"] ] .. note:: If the filetypes list does not contain ("All files","*"), it will be added. If the filetypes list does not contain a filemask that includes the extension of the "default" argument, it will be added. For example, if default="\*abc.py" and no filetypes argument was specified, then "\*.py" will automatically be added to the filetypes argument. :param str msg: the msg to be displayed. :param str title: the window title :param str default: filepath with wildcards :param object filetypes: filemasks that a user can choose, e.g. "\*.txt" :param bool multiple: If true, more than one file can be selected :return: the name of a file, or None if user chose to cancel
[ "A", "dialog", "to", "get", "a", "file", "name", "." ]
python
train
brian-rose/climlab
climlab/domain/xarray.py
https://github.com/brian-rose/climlab/blob/eae188a2ae9308229b8cbb8fe0b65f51b50ee1e6/climlab/domain/xarray.py#L32-L60
def state_to_xarray(state): '''Convert a dictionary of climlab.Field objects to xarray.Dataset Input: dictionary of climlab.Field objects (e.g. process.state or process.diagnostics dictionary) Output: xarray.Dataset object with all spatial axes, including 'bounds' axes indicating cell boundaries in each spatial dimension. Any items in the dictionary that are not instances of climlab.Field are ignored.''' from climlab.domain.field import Field ds = Dataset() for name, field in state.items(): if isinstance(field, Field): ds[name] = Field_to_xarray(field) dom = field.domain for axname, ax in dom.axes.items(): bounds_name = axname + '_bounds' ds.coords[bounds_name] = DataArray(ax.bounds, dims=[bounds_name], coords={bounds_name:ax.bounds}) try: ds[bounds_name].attrs['units'] = ax.units except: pass else: warnings.warn('{} excluded from Dataset because it is not a Field variable.'.format(name)) return ds
[ "def", "state_to_xarray", "(", "state", ")", ":", "from", "climlab", ".", "domain", ".", "field", "import", "Field", "ds", "=", "Dataset", "(", ")", "for", "name", ",", "field", "in", "state", ".", "items", "(", ")", ":", "if", "isinstance", "(", "field", ",", "Field", ")", ":", "ds", "[", "name", "]", "=", "Field_to_xarray", "(", "field", ")", "dom", "=", "field", ".", "domain", "for", "axname", ",", "ax", "in", "dom", ".", "axes", ".", "items", "(", ")", ":", "bounds_name", "=", "axname", "+", "'_bounds'", "ds", ".", "coords", "[", "bounds_name", "]", "=", "DataArray", "(", "ax", ".", "bounds", ",", "dims", "=", "[", "bounds_name", "]", ",", "coords", "=", "{", "bounds_name", ":", "ax", ".", "bounds", "}", ")", "try", ":", "ds", "[", "bounds_name", "]", ".", "attrs", "[", "'units'", "]", "=", "ax", ".", "units", "except", ":", "pass", "else", ":", "warnings", ".", "warn", "(", "'{} excluded from Dataset because it is not a Field variable.'", ".", "format", "(", "name", ")", ")", "return", "ds" ]
Convert a dictionary of climlab.Field objects to xarray.Dataset Input: dictionary of climlab.Field objects (e.g. process.state or process.diagnostics dictionary) Output: xarray.Dataset object with all spatial axes, including 'bounds' axes indicating cell boundaries in each spatial dimension. Any items in the dictionary that are not instances of climlab.Field are ignored.
[ "Convert", "a", "dictionary", "of", "climlab", ".", "Field", "objects", "to", "xarray", ".", "Dataset" ]
python
train
PMEAL/OpenPNM
openpnm/core/Base.py
https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/core/Base.py#L327-L395
def keys(self, element=None, mode=None): r""" This subclass works exactly like ``keys`` when no arguments are passed, but optionally accepts an ``element`` and/or a ``mode``, which filters the output to only the requested keys. The default behavior is exactly equivalent to the normal ``keys`` method. Parameters ---------- element : string Can be either 'pore' or 'throat', which limits the returned list of keys to only 'pore' or 'throat' keys. If neither is given, then both are assumed. mode : string (optional, default is 'skip') Controls which keys are returned. Options are: **``None``** : This mode (default) bypasses this subclassed method and just returns the normal KeysView object. **'labels'** : Limits the returned list of keys to only 'labels' (boolean arrays) **'props'** : Limits he return list of keys to only 'props' (numerical arrays). **'all'** : Returns both 'labels' and 'props'. This is equivalent to sending a list of both 'labels' and 'props'. See Also -------- props labels Notes ----- This subclass can be used to get dictionary keys of specific kinds of data. It's use augments ``props`` and ``labels`` by returning a list containing both types, but possibly limited by element type ('pores' or 'throats'.) Examples -------- >>> import openpnm as op >>> pn = op.network.Cubic([5, 5, 5]) >>> pn.keys(mode='props') # Get all props ['pore.coords', 'throat.conns'] >>> pn.keys(mode='props', element='pore') # Get only pore props ['pore.coords'] """ if mode is None: return super().keys() element = self._parse_element(element=element) allowed = ['props', 'labels'] if 'all' in mode: mode = allowed mode = self._parse_mode(mode=mode, allowed=allowed) keys = super().keys() temp = [] if 'props' in mode: temp.extend([i for i in keys if self.get(i).dtype != bool]) if 'labels' in mode: temp.extend([i for i in keys if self.get(i).dtype == bool]) if element: temp = [i for i in temp if i.split('.')[0] in element] return temp
[ "def", "keys", "(", "self", ",", "element", "=", "None", ",", "mode", "=", "None", ")", ":", "if", "mode", "is", "None", ":", "return", "super", "(", ")", ".", "keys", "(", ")", "element", "=", "self", ".", "_parse_element", "(", "element", "=", "element", ")", "allowed", "=", "[", "'props'", ",", "'labels'", "]", "if", "'all'", "in", "mode", ":", "mode", "=", "allowed", "mode", "=", "self", ".", "_parse_mode", "(", "mode", "=", "mode", ",", "allowed", "=", "allowed", ")", "keys", "=", "super", "(", ")", ".", "keys", "(", ")", "temp", "=", "[", "]", "if", "'props'", "in", "mode", ":", "temp", ".", "extend", "(", "[", "i", "for", "i", "in", "keys", "if", "self", ".", "get", "(", "i", ")", ".", "dtype", "!=", "bool", "]", ")", "if", "'labels'", "in", "mode", ":", "temp", ".", "extend", "(", "[", "i", "for", "i", "in", "keys", "if", "self", ".", "get", "(", "i", ")", ".", "dtype", "==", "bool", "]", ")", "if", "element", ":", "temp", "=", "[", "i", "for", "i", "in", "temp", "if", "i", ".", "split", "(", "'.'", ")", "[", "0", "]", "in", "element", "]", "return", "temp" ]
r""" This subclass works exactly like ``keys`` when no arguments are passed, but optionally accepts an ``element`` and/or a ``mode``, which filters the output to only the requested keys. The default behavior is exactly equivalent to the normal ``keys`` method. Parameters ---------- element : string Can be either 'pore' or 'throat', which limits the returned list of keys to only 'pore' or 'throat' keys. If neither is given, then both are assumed. mode : string (optional, default is 'skip') Controls which keys are returned. Options are: **``None``** : This mode (default) bypasses this subclassed method and just returns the normal KeysView object. **'labels'** : Limits the returned list of keys to only 'labels' (boolean arrays) **'props'** : Limits he return list of keys to only 'props' (numerical arrays). **'all'** : Returns both 'labels' and 'props'. This is equivalent to sending a list of both 'labels' and 'props'. See Also -------- props labels Notes ----- This subclass can be used to get dictionary keys of specific kinds of data. It's use augments ``props`` and ``labels`` by returning a list containing both types, but possibly limited by element type ('pores' or 'throats'.) Examples -------- >>> import openpnm as op >>> pn = op.network.Cubic([5, 5, 5]) >>> pn.keys(mode='props') # Get all props ['pore.coords', 'throat.conns'] >>> pn.keys(mode='props', element='pore') # Get only pore props ['pore.coords']
[ "r", "This", "subclass", "works", "exactly", "like", "keys", "when", "no", "arguments", "are", "passed", "but", "optionally", "accepts", "an", "element", "and", "/", "or", "a", "mode", "which", "filters", "the", "output", "to", "only", "the", "requested", "keys", "." ]
python
train
pywbem/pywbem
wbemcli.py
https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/wbemcli.py#L173-L241
def _remote_connection(server, opts, argparser_): """Initiate a remote connection, via PyWBEM. Arguments for the request are part of the command line arguments and include user name, password, namespace, etc. """ global CONN # pylint: disable=global-statement if opts.timeout is not None: if opts.timeout < 0 or opts.timeout > 300: argparser_.error('timeout option(%s) out of range' % opts.timeout) # mock only uses the namespace timeout and statistics options from the # original set of options. It ignores the url if opts.mock_server: CONN = FakedWBEMConnection( default_namespace=opts.namespace, timeout=opts.timeout, stats_enabled=opts.statistics) try: build_mock_repository(CONN, opts.mock_server, opts.verbose) except ValueError as ve: argparser_.error('Build Repository failed: %s' % ve) return CONN if server[0] == '/': url = server elif re.match(r"^https{0,1}://", server) is not None: url = server elif re.match(r"^[a-zA-Z0-9]+://", server) is not None: argparser_.error('Invalid scheme on server argument.' ' Use "http" or "https"') else: url = '%s://%s' % ('https', server) creds = None if opts.key_file is not None and opts.cert_file is None: argparser_.error('keyfile option requires certfile option') if opts.user is not None and opts.password is None: opts.password = _getpass.getpass('Enter password for %s: ' % opts.user) if opts.user is not None or opts.password is not None: creds = (opts.user, opts.password) # if client cert and key provided, create dictionary for # wbem connection x509_dict = None if opts.cert_file is not None: x509_dict = {"cert_file": opts.cert_file} if opts.key_file is not None: x509_dict.update({'key_file': opts.key_file}) CONN = WBEMConnection(url, creds, default_namespace=opts.namespace, no_verification=opts.no_verify_cert, x509=x509_dict, ca_certs=opts.ca_certs, timeout=opts.timeout, stats_enabled=opts.statistics) CONN.debug = True return CONN
[ "def", "_remote_connection", "(", "server", ",", "opts", ",", "argparser_", ")", ":", "global", "CONN", "# pylint: disable=global-statement", "if", "opts", ".", "timeout", "is", "not", "None", ":", "if", "opts", ".", "timeout", "<", "0", "or", "opts", ".", "timeout", ">", "300", ":", "argparser_", ".", "error", "(", "'timeout option(%s) out of range'", "%", "opts", ".", "timeout", ")", "# mock only uses the namespace timeout and statistics options from the", "# original set of options. It ignores the url", "if", "opts", ".", "mock_server", ":", "CONN", "=", "FakedWBEMConnection", "(", "default_namespace", "=", "opts", ".", "namespace", ",", "timeout", "=", "opts", ".", "timeout", ",", "stats_enabled", "=", "opts", ".", "statistics", ")", "try", ":", "build_mock_repository", "(", "CONN", ",", "opts", ".", "mock_server", ",", "opts", ".", "verbose", ")", "except", "ValueError", "as", "ve", ":", "argparser_", ".", "error", "(", "'Build Repository failed: %s'", "%", "ve", ")", "return", "CONN", "if", "server", "[", "0", "]", "==", "'/'", ":", "url", "=", "server", "elif", "re", ".", "match", "(", "r\"^https{0,1}://\"", ",", "server", ")", "is", "not", "None", ":", "url", "=", "server", "elif", "re", ".", "match", "(", "r\"^[a-zA-Z0-9]+://\"", ",", "server", ")", "is", "not", "None", ":", "argparser_", ".", "error", "(", "'Invalid scheme on server argument.'", "' Use \"http\" or \"https\"'", ")", "else", ":", "url", "=", "'%s://%s'", "%", "(", "'https'", ",", "server", ")", "creds", "=", "None", "if", "opts", ".", "key_file", "is", "not", "None", "and", "opts", ".", "cert_file", "is", "None", ":", "argparser_", ".", "error", "(", "'keyfile option requires certfile option'", ")", "if", "opts", ".", "user", "is", "not", "None", "and", "opts", ".", "password", "is", "None", ":", "opts", ".", "password", "=", "_getpass", ".", "getpass", "(", "'Enter password for %s: '", "%", "opts", ".", "user", ")", "if", "opts", ".", "user", "is", "not", "None", "or", "opts", ".", "password", "is", "not", "None", ":", "creds", "=", "(", "opts", ".", "user", ",", "opts", ".", "password", ")", "# if client cert and key provided, create dictionary for", "# wbem connection", "x509_dict", "=", "None", "if", "opts", ".", "cert_file", "is", "not", "None", ":", "x509_dict", "=", "{", "\"cert_file\"", ":", "opts", ".", "cert_file", "}", "if", "opts", ".", "key_file", "is", "not", "None", ":", "x509_dict", ".", "update", "(", "{", "'key_file'", ":", "opts", ".", "key_file", "}", ")", "CONN", "=", "WBEMConnection", "(", "url", ",", "creds", ",", "default_namespace", "=", "opts", ".", "namespace", ",", "no_verification", "=", "opts", ".", "no_verify_cert", ",", "x509", "=", "x509_dict", ",", "ca_certs", "=", "opts", ".", "ca_certs", ",", "timeout", "=", "opts", ".", "timeout", ",", "stats_enabled", "=", "opts", ".", "statistics", ")", "CONN", ".", "debug", "=", "True", "return", "CONN" ]
Initiate a remote connection, via PyWBEM. Arguments for the request are part of the command line arguments and include user name, password, namespace, etc.
[ "Initiate", "a", "remote", "connection", "via", "PyWBEM", ".", "Arguments", "for", "the", "request", "are", "part", "of", "the", "command", "line", "arguments", "and", "include", "user", "name", "password", "namespace", "etc", "." ]
python
train
nats-io/asyncio-nats
nats/aio/client.py
https://github.com/nats-io/asyncio-nats/blob/39e840be0b12ce326edac0bba69aeb1be930dcb8/nats/aio/client.py#L338-L380
def drain(self, sid=None): """ Drain will put a connection into a drain state. All subscriptions will immediately be put into a drain state. Upon completion, the publishers will be drained and can not publish any additional messages. Upon draining of the publishers, the connection will be closed. Use the `closed_cb' option to know when the connection has moved from draining to closed. If a sid is passed, just the subscription with that sid will be drained without closing the connection. """ if self.is_draining: return if self.is_closed: raise ErrConnectionClosed if self.is_connecting or self.is_reconnecting: raise ErrConnectionReconnecting if sid is not None: return self._drain_sub(sid) # Start draining the subscriptions self._status = Client.DRAINING_SUBS drain_tasks = [] for ssid, sub in self._subs.items(): task = self._drain_sub(ssid) drain_tasks.append(task) drain_is_done = asyncio.gather(*drain_tasks) try: yield from asyncio.wait_for(drain_is_done, self.options["drain_timeout"]) except asyncio.TimeoutError: drain_is_done.exception() drain_is_done.cancel() if self._error_cb is not None: yield from self._error_cb(ErrDrainTimeout) except asyncio.CancelledError: pass finally: self._status = Client.DRAINING_PUBS yield from self.flush() yield from self._close(Client.CLOSED)
[ "def", "drain", "(", "self", ",", "sid", "=", "None", ")", ":", "if", "self", ".", "is_draining", ":", "return", "if", "self", ".", "is_closed", ":", "raise", "ErrConnectionClosed", "if", "self", ".", "is_connecting", "or", "self", ".", "is_reconnecting", ":", "raise", "ErrConnectionReconnecting", "if", "sid", "is", "not", "None", ":", "return", "self", ".", "_drain_sub", "(", "sid", ")", "# Start draining the subscriptions", "self", ".", "_status", "=", "Client", ".", "DRAINING_SUBS", "drain_tasks", "=", "[", "]", "for", "ssid", ",", "sub", "in", "self", ".", "_subs", ".", "items", "(", ")", ":", "task", "=", "self", ".", "_drain_sub", "(", "ssid", ")", "drain_tasks", ".", "append", "(", "task", ")", "drain_is_done", "=", "asyncio", ".", "gather", "(", "*", "drain_tasks", ")", "try", ":", "yield", "from", "asyncio", ".", "wait_for", "(", "drain_is_done", ",", "self", ".", "options", "[", "\"drain_timeout\"", "]", ")", "except", "asyncio", ".", "TimeoutError", ":", "drain_is_done", ".", "exception", "(", ")", "drain_is_done", ".", "cancel", "(", ")", "if", "self", ".", "_error_cb", "is", "not", "None", ":", "yield", "from", "self", ".", "_error_cb", "(", "ErrDrainTimeout", ")", "except", "asyncio", ".", "CancelledError", ":", "pass", "finally", ":", "self", ".", "_status", "=", "Client", ".", "DRAINING_PUBS", "yield", "from", "self", ".", "flush", "(", ")", "yield", "from", "self", ".", "_close", "(", "Client", ".", "CLOSED", ")" ]
Drain will put a connection into a drain state. All subscriptions will immediately be put into a drain state. Upon completion, the publishers will be drained and can not publish any additional messages. Upon draining of the publishers, the connection will be closed. Use the `closed_cb' option to know when the connection has moved from draining to closed. If a sid is passed, just the subscription with that sid will be drained without closing the connection.
[ "Drain", "will", "put", "a", "connection", "into", "a", "drain", "state", ".", "All", "subscriptions", "will", "immediately", "be", "put", "into", "a", "drain", "state", ".", "Upon", "completion", "the", "publishers", "will", "be", "drained", "and", "can", "not", "publish", "any", "additional", "messages", ".", "Upon", "draining", "of", "the", "publishers", "the", "connection", "will", "be", "closed", ".", "Use", "the", "closed_cb", "option", "to", "know", "when", "the", "connection", "has", "moved", "from", "draining", "to", "closed", "." ]
python
test
skulumani/kinematics
kinematics/attitude.py
https://github.com/skulumani/kinematics/blob/e8cb45efb40539982025ed0f85d6561f9f10fef0/kinematics/attitude.py#L388-L509
def normalize(num_in, lower=0, upper=360, b=False): """Normalize number to range [lower, upper) or [lower, upper]. Parameters ---------- num : float The number to be normalized. lower : int Lower limit of range. Default is 0. upper : int Upper limit of range. Default is 360. b : bool Type of normalization. Default is False. See notes. When b=True, the range must be symmetric about 0. When b=False, the range must be symmetric about 0 or ``lower`` must be equal to 0. Returns ------- n : float A number in the range [lower, upper) or [lower, upper]. Raises ------ ValueError If lower >= upper. Notes ----- If the keyword `b == False`, then the normalization is done in the following way. Consider the numbers to be arranged in a circle, with the lower and upper ends sitting on top of each other. Moving past one limit, takes the number into the beginning of the other end. For example, if range is [0 - 360), then 361 becomes 1 and 360 becomes 0. Negative numbers move from higher to lower numbers. So, -1 normalized to [0 - 360) becomes 359. When b=False range must be symmetric about 0 or lower=0. If the keyword `b == True`, then the given number is considered to "bounce" between the two limits. So, -91 normalized to [-90, 90], becomes -89, instead of 89. In this case the range is [lower, upper]. This code is based on the function `fmt_delta` of `TPM`. When b=True range must be symmetric about 0. Examples -------- >>> normalize(-270,-180,180) 90.0 >>> import math >>> math.degrees(normalize(-2*math.pi,-math.pi,math.pi)) 0.0 >>> normalize(-180, -180, 180) -180.0 >>> normalize(180, -180, 180) -180.0 >>> normalize(180, -180, 180, b=True) 180.0 >>> normalize(181,-180,180) -179.0 >>> normalize(181, -180, 180, b=True) 179.0 >>> normalize(-180,0,360) 180.0 >>> normalize(36,0,24) 12.0 >>> normalize(368.5,-180,180) 8.5 >>> normalize(-100, -90, 90) 80.0 >>> normalize(-100, -90, 90, b=True) -80.0 >>> normalize(100, -90, 90, b=True) 80.0 >>> normalize(181, -90, 90, b=True) -1.0 >>> normalize(270, -90, 90, b=True) -90.0 >>> normalize(271, -90, 90, b=True) -89.0 """ if lower >= upper: ValueError("lower must be lesser than upper") if not b: if not ((lower + upper == 0) or (lower == 0)): raise ValueError('When b=False lower=0 or range must be symmetric about 0.') else: if not (lower + upper == 0): raise ValueError('When b=True range must be symmetric about 0.') # abs(num + upper) and abs(num - lower) are needed, instead of # abs(num), since the lower and upper limits need not be 0. We need # to add half size of the range, so that the final result is lower + # <value> or upper - <value>, respectively. if not hasattr(num_in, "__iter__"): num_in = np.asarray([num_in], dtype=np.float) res = [] for num in num_in: if not b: if num > upper or num == lower: num = lower + abs(num + upper) % (abs(lower) + abs(upper)) if num < lower or num == upper: num = upper - abs(num - lower) % (abs(lower) + abs(upper)) res.append(lower if num == upper else num) else: total_length = abs(lower) + abs(upper) if num < -total_length: num += ceil(num / (-2 * total_length)) * 2 * total_length if num > total_length: num -= floor(num / (2 * total_length)) * 2 * total_length if num > upper: num = total_length - num if num < lower: num = -total_length - num res.append(num) return np.asarray(res, dtype=np.float)
[ "def", "normalize", "(", "num_in", ",", "lower", "=", "0", ",", "upper", "=", "360", ",", "b", "=", "False", ")", ":", "if", "lower", ">=", "upper", ":", "ValueError", "(", "\"lower must be lesser than upper\"", ")", "if", "not", "b", ":", "if", "not", "(", "(", "lower", "+", "upper", "==", "0", ")", "or", "(", "lower", "==", "0", ")", ")", ":", "raise", "ValueError", "(", "'When b=False lower=0 or range must be symmetric about 0.'", ")", "else", ":", "if", "not", "(", "lower", "+", "upper", "==", "0", ")", ":", "raise", "ValueError", "(", "'When b=True range must be symmetric about 0.'", ")", "# abs(num + upper) and abs(num - lower) are needed, instead of", "# abs(num), since the lower and upper limits need not be 0. We need", "# to add half size of the range, so that the final result is lower +", "# <value> or upper - <value>, respectively.", "if", "not", "hasattr", "(", "num_in", ",", "\"__iter__\"", ")", ":", "num_in", "=", "np", ".", "asarray", "(", "[", "num_in", "]", ",", "dtype", "=", "np", ".", "float", ")", "res", "=", "[", "]", "for", "num", "in", "num_in", ":", "if", "not", "b", ":", "if", "num", ">", "upper", "or", "num", "==", "lower", ":", "num", "=", "lower", "+", "abs", "(", "num", "+", "upper", ")", "%", "(", "abs", "(", "lower", ")", "+", "abs", "(", "upper", ")", ")", "if", "num", "<", "lower", "or", "num", "==", "upper", ":", "num", "=", "upper", "-", "abs", "(", "num", "-", "lower", ")", "%", "(", "abs", "(", "lower", ")", "+", "abs", "(", "upper", ")", ")", "res", ".", "append", "(", "lower", "if", "num", "==", "upper", "else", "num", ")", "else", ":", "total_length", "=", "abs", "(", "lower", ")", "+", "abs", "(", "upper", ")", "if", "num", "<", "-", "total_length", ":", "num", "+=", "ceil", "(", "num", "/", "(", "-", "2", "*", "total_length", ")", ")", "*", "2", "*", "total_length", "if", "num", ">", "total_length", ":", "num", "-=", "floor", "(", "num", "/", "(", "2", "*", "total_length", ")", ")", "*", "2", "*", "total_length", "if", "num", ">", "upper", ":", "num", "=", "total_length", "-", "num", "if", "num", "<", "lower", ":", "num", "=", "-", "total_length", "-", "num", "res", ".", "append", "(", "num", ")", "return", "np", ".", "asarray", "(", "res", ",", "dtype", "=", "np", ".", "float", ")" ]
Normalize number to range [lower, upper) or [lower, upper]. Parameters ---------- num : float The number to be normalized. lower : int Lower limit of range. Default is 0. upper : int Upper limit of range. Default is 360. b : bool Type of normalization. Default is False. See notes. When b=True, the range must be symmetric about 0. When b=False, the range must be symmetric about 0 or ``lower`` must be equal to 0. Returns ------- n : float A number in the range [lower, upper) or [lower, upper]. Raises ------ ValueError If lower >= upper. Notes ----- If the keyword `b == False`, then the normalization is done in the following way. Consider the numbers to be arranged in a circle, with the lower and upper ends sitting on top of each other. Moving past one limit, takes the number into the beginning of the other end. For example, if range is [0 - 360), then 361 becomes 1 and 360 becomes 0. Negative numbers move from higher to lower numbers. So, -1 normalized to [0 - 360) becomes 359. When b=False range must be symmetric about 0 or lower=0. If the keyword `b == True`, then the given number is considered to "bounce" between the two limits. So, -91 normalized to [-90, 90], becomes -89, instead of 89. In this case the range is [lower, upper]. This code is based on the function `fmt_delta` of `TPM`. When b=True range must be symmetric about 0. Examples -------- >>> normalize(-270,-180,180) 90.0 >>> import math >>> math.degrees(normalize(-2*math.pi,-math.pi,math.pi)) 0.0 >>> normalize(-180, -180, 180) -180.0 >>> normalize(180, -180, 180) -180.0 >>> normalize(180, -180, 180, b=True) 180.0 >>> normalize(181,-180,180) -179.0 >>> normalize(181, -180, 180, b=True) 179.0 >>> normalize(-180,0,360) 180.0 >>> normalize(36,0,24) 12.0 >>> normalize(368.5,-180,180) 8.5 >>> normalize(-100, -90, 90) 80.0 >>> normalize(-100, -90, 90, b=True) -80.0 >>> normalize(100, -90, 90, b=True) 80.0 >>> normalize(181, -90, 90, b=True) -1.0 >>> normalize(270, -90, 90, b=True) -90.0 >>> normalize(271, -90, 90, b=True) -89.0
[ "Normalize", "number", "to", "range", "[", "lower", "upper", ")", "or", "[", "lower", "upper", "]", "." ]
python
train
InfoAgeTech/django-core
django_core/forms/mixins/common.py
https://github.com/InfoAgeTech/django-core/blob/9664a145473b75120bf71e1644e9c8086e7e8955/django_core/forms/mixins/common.py#L36-L58
def get_default_prefix(self, instance=None): """Gets the prefix for this form. :param instance: the form model instance. When calling this method directly this should almost always stay None so it looks for self.instance. """ if instance is None and hasattr(self, 'instance'): instance = self.instance if instance and instance.id is not None: # it's an existing instance, use the instance prefix instance_prefix = self.default_instance_prefix if instance_prefix is None: instance_prefix = self.__class__.__name__.lower() + 'i-' return '{0}{1}'.format(instance_prefix, instance.id) if self.default_new_prefix is not None: return self.default_new_prefix return self.__class__.__name__.lower() + 'new-'
[ "def", "get_default_prefix", "(", "self", ",", "instance", "=", "None", ")", ":", "if", "instance", "is", "None", "and", "hasattr", "(", "self", ",", "'instance'", ")", ":", "instance", "=", "self", ".", "instance", "if", "instance", "and", "instance", ".", "id", "is", "not", "None", ":", "# it's an existing instance, use the instance prefix", "instance_prefix", "=", "self", ".", "default_instance_prefix", "if", "instance_prefix", "is", "None", ":", "instance_prefix", "=", "self", ".", "__class__", ".", "__name__", ".", "lower", "(", ")", "+", "'i-'", "return", "'{0}{1}'", ".", "format", "(", "instance_prefix", ",", "instance", ".", "id", ")", "if", "self", ".", "default_new_prefix", "is", "not", "None", ":", "return", "self", ".", "default_new_prefix", "return", "self", ".", "__class__", ".", "__name__", ".", "lower", "(", ")", "+", "'new-'" ]
Gets the prefix for this form. :param instance: the form model instance. When calling this method directly this should almost always stay None so it looks for self.instance.
[ "Gets", "the", "prefix", "for", "this", "form", "." ]
python
train
dwavesystems/dwave_networkx
dwave_networkx/drawing/qubit_layout.py
https://github.com/dwavesystems/dwave_networkx/blob/9ea1223ddbc7e86db2f90b8b23e250e6642c3d68/dwave_networkx/drawing/qubit_layout.py#L44-L148
def draw_qubit_graph(G, layout, linear_biases={}, quadratic_biases={}, nodelist=None, edgelist=None, cmap=None, edge_cmap=None, vmin=None, vmax=None, edge_vmin=None, edge_vmax=None, **kwargs): """Draws graph G according to layout. If `linear_biases` and/or `quadratic_biases` are provided, these are visualized on the plot. Parameters ---------- G : NetworkX graph The graph to be drawn layout : dict A dict of coordinates associated with each node in G. Should be of the form {node: coordinate, ...}. Coordinates will be treated as vectors, and should all have the same length. linear_biases : dict (optional, default {}) A dict of biases associated with each node in G. Should be of form {node: bias, ...}. Each bias should be numeric. quadratic_biases : dict (optional, default {}) A dict of biases associated with each edge in G. Should be of form {edge: bias, ...}. Each bias should be numeric. Self-loop edges (i.e., :math:`i=j`) are treated as linear biases. kwargs : optional keywords See networkx.draw_networkx() for a description of optional keywords, with the exception of the `pos` parameter which is not used by this function. If `linear_biases` or `quadratic_biases` are provided, any provided `node_color` or `edge_color` arguments are ignored. """ if linear_biases or quadratic_biases: # if linear biases and/or quadratic biases are provided, then color accordingly. try: import matplotlib.pyplot as plt import matplotlib as mpl except ImportError: raise ImportError("Matplotlib and numpy required for draw_qubit_graph()") if nodelist is None: nodelist = G.nodes() if edgelist is None: edgelist = G.edges() if cmap is None: cmap = plt.get_cmap('coolwarm') if edge_cmap is None: edge_cmap = plt.get_cmap('coolwarm') # any edges or nodes with an unspecified bias default to 0 def edge_color(u, v): c = 0. if (u, v) in quadratic_biases: c += quadratic_biases[(u, v)] if (v, u) in quadratic_biases: c += quadratic_biases[(v, u)] return c def node_color(v): c = 0. if v in linear_biases: c += linear_biases[v] if (v, v) in quadratic_biases: c += quadratic_biases[(v, v)] return c node_color = [node_color(v) for v in nodelist] edge_color = [edge_color(u, v) for u, v in edgelist] kwargs['edge_color'] = edge_color kwargs['node_color'] = node_color # the range of the color map is shared for nodes/edges and is symmetric # around 0. vmag = max(max(abs(c) for c in node_color), max(abs(c) for c in edge_color)) if vmin is None: vmin = -1 * vmag if vmax is None: vmax = vmag if edge_vmin is None: edge_vmin = -1 * vmag if edge_vmax is None: edge_vmax = vmag draw(G, layout, nodelist=nodelist, edgelist=edgelist, cmap=cmap, edge_cmap=edge_cmap, vmin=vmin, vmax=vmax, edge_vmin=edge_vmin, edge_vmax=edge_vmax, **kwargs) # if the biases are provided, then add a legend explaining the color map if linear_biases or quadratic_biases: fig = plt.figure(1) # cax = fig.add_axes([]) cax = fig.add_axes([.9, 0.2, 0.04, 0.6]) # left, bottom, width, height mpl.colorbar.ColorbarBase(cax, cmap=cmap, norm=mpl.colors.Normalize(vmin=-1 * vmag, vmax=vmag, clip=False), orientation='vertical')
[ "def", "draw_qubit_graph", "(", "G", ",", "layout", ",", "linear_biases", "=", "{", "}", ",", "quadratic_biases", "=", "{", "}", ",", "nodelist", "=", "None", ",", "edgelist", "=", "None", ",", "cmap", "=", "None", ",", "edge_cmap", "=", "None", ",", "vmin", "=", "None", ",", "vmax", "=", "None", ",", "edge_vmin", "=", "None", ",", "edge_vmax", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "linear_biases", "or", "quadratic_biases", ":", "# if linear biases and/or quadratic biases are provided, then color accordingly.", "try", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "import", "matplotlib", "as", "mpl", "except", "ImportError", ":", "raise", "ImportError", "(", "\"Matplotlib and numpy required for draw_qubit_graph()\"", ")", "if", "nodelist", "is", "None", ":", "nodelist", "=", "G", ".", "nodes", "(", ")", "if", "edgelist", "is", "None", ":", "edgelist", "=", "G", ".", "edges", "(", ")", "if", "cmap", "is", "None", ":", "cmap", "=", "plt", ".", "get_cmap", "(", "'coolwarm'", ")", "if", "edge_cmap", "is", "None", ":", "edge_cmap", "=", "plt", ".", "get_cmap", "(", "'coolwarm'", ")", "# any edges or nodes with an unspecified bias default to 0", "def", "edge_color", "(", "u", ",", "v", ")", ":", "c", "=", "0.", "if", "(", "u", ",", "v", ")", "in", "quadratic_biases", ":", "c", "+=", "quadratic_biases", "[", "(", "u", ",", "v", ")", "]", "if", "(", "v", ",", "u", ")", "in", "quadratic_biases", ":", "c", "+=", "quadratic_biases", "[", "(", "v", ",", "u", ")", "]", "return", "c", "def", "node_color", "(", "v", ")", ":", "c", "=", "0.", "if", "v", "in", "linear_biases", ":", "c", "+=", "linear_biases", "[", "v", "]", "if", "(", "v", ",", "v", ")", "in", "quadratic_biases", ":", "c", "+=", "quadratic_biases", "[", "(", "v", ",", "v", ")", "]", "return", "c", "node_color", "=", "[", "node_color", "(", "v", ")", "for", "v", "in", "nodelist", "]", "edge_color", "=", "[", "edge_color", "(", "u", ",", "v", ")", "for", "u", ",", "v", "in", "edgelist", "]", "kwargs", "[", "'edge_color'", "]", "=", "edge_color", "kwargs", "[", "'node_color'", "]", "=", "node_color", "# the range of the color map is shared for nodes/edges and is symmetric", "# around 0.", "vmag", "=", "max", "(", "max", "(", "abs", "(", "c", ")", "for", "c", "in", "node_color", ")", ",", "max", "(", "abs", "(", "c", ")", "for", "c", "in", "edge_color", ")", ")", "if", "vmin", "is", "None", ":", "vmin", "=", "-", "1", "*", "vmag", "if", "vmax", "is", "None", ":", "vmax", "=", "vmag", "if", "edge_vmin", "is", "None", ":", "edge_vmin", "=", "-", "1", "*", "vmag", "if", "edge_vmax", "is", "None", ":", "edge_vmax", "=", "vmag", "draw", "(", "G", ",", "layout", ",", "nodelist", "=", "nodelist", ",", "edgelist", "=", "edgelist", ",", "cmap", "=", "cmap", ",", "edge_cmap", "=", "edge_cmap", ",", "vmin", "=", "vmin", ",", "vmax", "=", "vmax", ",", "edge_vmin", "=", "edge_vmin", ",", "edge_vmax", "=", "edge_vmax", ",", "*", "*", "kwargs", ")", "# if the biases are provided, then add a legend explaining the color map", "if", "linear_biases", "or", "quadratic_biases", ":", "fig", "=", "plt", ".", "figure", "(", "1", ")", "# cax = fig.add_axes([])", "cax", "=", "fig", ".", "add_axes", "(", "[", ".9", ",", "0.2", ",", "0.04", ",", "0.6", "]", ")", "# left, bottom, width, height", "mpl", ".", "colorbar", ".", "ColorbarBase", "(", "cax", ",", "cmap", "=", "cmap", ",", "norm", "=", "mpl", ".", "colors", ".", "Normalize", "(", "vmin", "=", "-", "1", "*", "vmag", ",", "vmax", "=", "vmag", ",", "clip", "=", "False", ")", ",", "orientation", "=", "'vertical'", ")" ]
Draws graph G according to layout. If `linear_biases` and/or `quadratic_biases` are provided, these are visualized on the plot. Parameters ---------- G : NetworkX graph The graph to be drawn layout : dict A dict of coordinates associated with each node in G. Should be of the form {node: coordinate, ...}. Coordinates will be treated as vectors, and should all have the same length. linear_biases : dict (optional, default {}) A dict of biases associated with each node in G. Should be of form {node: bias, ...}. Each bias should be numeric. quadratic_biases : dict (optional, default {}) A dict of biases associated with each edge in G. Should be of form {edge: bias, ...}. Each bias should be numeric. Self-loop edges (i.e., :math:`i=j`) are treated as linear biases. kwargs : optional keywords See networkx.draw_networkx() for a description of optional keywords, with the exception of the `pos` parameter which is not used by this function. If `linear_biases` or `quadratic_biases` are provided, any provided `node_color` or `edge_color` arguments are ignored.
[ "Draws", "graph", "G", "according", "to", "layout", "." ]
python
train
senaite/senaite.jsonapi
src/senaite/jsonapi/api.py
https://github.com/senaite/senaite.jsonapi/blob/871959f4b1c9edbb477e9456325527ca78e13ec6/src/senaite/jsonapi/api.py#L1447-L1463
def validate_object(brain_or_object, data): """Validate the entire object :param brain_or_object: A single catalog brain or content object :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :param data: The sharing dictionary as returned from the API :type data: dict :returns: invalidity status :rtype: dict """ obj = get_object(brain_or_object) # Call the validator of AT Content Types if is_at_content(obj): return obj.validate(data=data) return {}
[ "def", "validate_object", "(", "brain_or_object", ",", "data", ")", ":", "obj", "=", "get_object", "(", "brain_or_object", ")", "# Call the validator of AT Content Types", "if", "is_at_content", "(", "obj", ")", ":", "return", "obj", ".", "validate", "(", "data", "=", "data", ")", "return", "{", "}" ]
Validate the entire object :param brain_or_object: A single catalog brain or content object :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :param data: The sharing dictionary as returned from the API :type data: dict :returns: invalidity status :rtype: dict
[ "Validate", "the", "entire", "object" ]
python
train
andycasey/sick
sick/models/base.py
https://github.com/andycasey/sick/blob/6c37686182794c4cafea45abf7062b30b789b1a2/sick/models/base.py#L250-L265
def _overlapping_channels(self, wavelengths): """ Return the channels that match the given wavelength array. """ sizes = self.meta["channel_sizes"] min_a, max_a = wavelengths.min(), wavelengths.max() matched_channel_names = [] for i, (name, size) in enumerate(zip(self.channel_names, sizes)): si = sum(sizes[:i]) min_b, max_b = self.wavelengths[[si, si + size - 1]] if max_a > min_b and min_a < max_b: matched_channel_names.append(name) return matched_channel_names
[ "def", "_overlapping_channels", "(", "self", ",", "wavelengths", ")", ":", "sizes", "=", "self", ".", "meta", "[", "\"channel_sizes\"", "]", "min_a", ",", "max_a", "=", "wavelengths", ".", "min", "(", ")", ",", "wavelengths", ".", "max", "(", ")", "matched_channel_names", "=", "[", "]", "for", "i", ",", "(", "name", ",", "size", ")", "in", "enumerate", "(", "zip", "(", "self", ".", "channel_names", ",", "sizes", ")", ")", ":", "si", "=", "sum", "(", "sizes", "[", ":", "i", "]", ")", "min_b", ",", "max_b", "=", "self", ".", "wavelengths", "[", "[", "si", ",", "si", "+", "size", "-", "1", "]", "]", "if", "max_a", ">", "min_b", "and", "min_a", "<", "max_b", ":", "matched_channel_names", ".", "append", "(", "name", ")", "return", "matched_channel_names" ]
Return the channels that match the given wavelength array.
[ "Return", "the", "channels", "that", "match", "the", "given", "wavelength", "array", "." ]
python
train
materialsproject/pymatgen
pymatgen/core/molecular_orbitals.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/core/molecular_orbitals.py#L83-L119
def obtain_band_edges(self): ''' Fill up the atomic orbitals with available electrons. Return HOMO, LUMO, and whether it's a metal. ''' orbitals = self.aos_as_list() electrons = Composition(self.composition).total_electrons partial_filled = [] for orbital in orbitals: if electrons <= 0: break if 's' in orbital[1]: electrons += -2 elif 'p' in orbital[1]: electrons += -6 elif 'd' in orbital[1]: electrons += -10 elif 'f' in orbital[1]: electrons += -14 partial_filled.append(orbital) if electrons != 0: homo = partial_filled[-1] lumo = partial_filled[-1] else: homo = partial_filled[-1] try: lumo = orbitals[len(partial_filled)] except: lumo = None if homo == lumo: metal = True else: metal = False return {'HOMO': homo, 'LUMO': lumo, 'metal': metal}
[ "def", "obtain_band_edges", "(", "self", ")", ":", "orbitals", "=", "self", ".", "aos_as_list", "(", ")", "electrons", "=", "Composition", "(", "self", ".", "composition", ")", ".", "total_electrons", "partial_filled", "=", "[", "]", "for", "orbital", "in", "orbitals", ":", "if", "electrons", "<=", "0", ":", "break", "if", "'s'", "in", "orbital", "[", "1", "]", ":", "electrons", "+=", "-", "2", "elif", "'p'", "in", "orbital", "[", "1", "]", ":", "electrons", "+=", "-", "6", "elif", "'d'", "in", "orbital", "[", "1", "]", ":", "electrons", "+=", "-", "10", "elif", "'f'", "in", "orbital", "[", "1", "]", ":", "electrons", "+=", "-", "14", "partial_filled", ".", "append", "(", "orbital", ")", "if", "electrons", "!=", "0", ":", "homo", "=", "partial_filled", "[", "-", "1", "]", "lumo", "=", "partial_filled", "[", "-", "1", "]", "else", ":", "homo", "=", "partial_filled", "[", "-", "1", "]", "try", ":", "lumo", "=", "orbitals", "[", "len", "(", "partial_filled", ")", "]", "except", ":", "lumo", "=", "None", "if", "homo", "==", "lumo", ":", "metal", "=", "True", "else", ":", "metal", "=", "False", "return", "{", "'HOMO'", ":", "homo", ",", "'LUMO'", ":", "lumo", ",", "'metal'", ":", "metal", "}" ]
Fill up the atomic orbitals with available electrons. Return HOMO, LUMO, and whether it's a metal.
[ "Fill", "up", "the", "atomic", "orbitals", "with", "available", "electrons", ".", "Return", "HOMO", "LUMO", "and", "whether", "it", "s", "a", "metal", "." ]
python
train
rycus86/prometheus_flask_exporter
prometheus_flask_exporter/__init__.py
https://github.com/rycus86/prometheus_flask_exporter/blob/678dbf3097e82a0ddb697268406004cc1f4a26bc/prometheus_flask_exporter/__init__.py#L353-L370
def gauge(self, name, description, labels=None, **kwargs): """ Use a Gauge to track the number of invocations in progress for the method. :param name: the name of the metric :param description: the description of the metric :param labels: a dictionary of `{labelname: callable_or_value}` for labels :param kwargs: additional keyword arguments for creating the Gauge """ return self._track( Gauge, lambda metric, time: metric.dec(), kwargs, name, description, labels, registry=self.registry, before=lambda metric: metric.inc() )
[ "def", "gauge", "(", "self", ",", "name", ",", "description", ",", "labels", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_track", "(", "Gauge", ",", "lambda", "metric", ",", "time", ":", "metric", ".", "dec", "(", ")", ",", "kwargs", ",", "name", ",", "description", ",", "labels", ",", "registry", "=", "self", ".", "registry", ",", "before", "=", "lambda", "metric", ":", "metric", ".", "inc", "(", ")", ")" ]
Use a Gauge to track the number of invocations in progress for the method. :param name: the name of the metric :param description: the description of the metric :param labels: a dictionary of `{labelname: callable_or_value}` for labels :param kwargs: additional keyword arguments for creating the Gauge
[ "Use", "a", "Gauge", "to", "track", "the", "number", "of", "invocations", "in", "progress", "for", "the", "method", "." ]
python
train
line/line-bot-sdk-python
linebot/api.py
https://github.com/line/line-bot-sdk-python/blob/1b38bfc2497ff3e3c75be4b50e0f1b7425a07ce0/linebot/api.py#L293-L314
def get_message_content(self, message_id, timeout=None): """Call get content API. https://devdocs.line.me/en/#get-content Retrieve image, video, and audio data sent by users. :param str message_id: Message ID :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a (connect timeout, read timeout) float tuple. Default is self.http_client.timeout :type timeout: float | tuple(float, float) :rtype: :py:class:`linebot.models.responses.Content` :return: Content instance """ response = self._get( '/v2/bot/message/{message_id}/content'.format(message_id=message_id), stream=True, timeout=timeout ) return Content(response)
[ "def", "get_message_content", "(", "self", ",", "message_id", ",", "timeout", "=", "None", ")", ":", "response", "=", "self", ".", "_get", "(", "'/v2/bot/message/{message_id}/content'", ".", "format", "(", "message_id", "=", "message_id", ")", ",", "stream", "=", "True", ",", "timeout", "=", "timeout", ")", "return", "Content", "(", "response", ")" ]
Call get content API. https://devdocs.line.me/en/#get-content Retrieve image, video, and audio data sent by users. :param str message_id: Message ID :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a (connect timeout, read timeout) float tuple. Default is self.http_client.timeout :type timeout: float | tuple(float, float) :rtype: :py:class:`linebot.models.responses.Content` :return: Content instance
[ "Call", "get", "content", "API", "." ]
python
train
christophertbrown/bioscripts
ctbBio/16SfromHMM.py
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/16SfromHMM.py#L51-L61
def check_overlap(current, hit, overlap = 200): """ determine if sequence has already hit the same part of the model, indicating that this hit is for another 16S rRNA gene """ for prev in current: p_coords = prev[2:4] coords = hit[2:4] if get_overlap(coords, p_coords) >= overlap: return True return False
[ "def", "check_overlap", "(", "current", ",", "hit", ",", "overlap", "=", "200", ")", ":", "for", "prev", "in", "current", ":", "p_coords", "=", "prev", "[", "2", ":", "4", "]", "coords", "=", "hit", "[", "2", ":", "4", "]", "if", "get_overlap", "(", "coords", ",", "p_coords", ")", ">=", "overlap", ":", "return", "True", "return", "False" ]
determine if sequence has already hit the same part of the model, indicating that this hit is for another 16S rRNA gene
[ "determine", "if", "sequence", "has", "already", "hit", "the", "same", "part", "of", "the", "model", "indicating", "that", "this", "hit", "is", "for", "another", "16S", "rRNA", "gene" ]
python
train
archman/beamline
beamline/lattice.py
https://github.com/archman/beamline/blob/417bc5dc13e754bc89d246427984590fced64d07/beamline/lattice.py#L165-L211
def getKw(self, kw): """ Extract doc snippet for element configuration, :param kw: element name :return: instance itself 1 call getKwAsDict() to return config as a dict 2 call getKwAsJson() to return config as json string 3 call getKwAsString() to return config as a raw string USAGE: getKw('Q10') """ ikw = kw.lower() line_continue_flag = '' appendflag = False try: for line in self.file_lines: if line.strip() == '': continue line = ' '.join(line.strip().split()).strip('\n; ') if line.startswith('!'): continue if line.lower().startswith(ikw + ' :') or line.lower().startswith(ikw + ':'): conflist = [] # list to put into element configuration conflist.append(line) appendflag = True elif appendflag and line_continue_flag == '&': conflist.append(line) line_continue_flag = line[-1] if line_continue_flag != '&': appendflag = False conf_str = ''.join(conflist).replace('&', ',') if 'line' in conf_str.lower().split('=')[0]: # if bl defines lattice conf_str = conf_str.lower().replace(',', ' ')[::-1].replace('enil', 'beamline,lattice'[::-1], 1)[ ::-1] # avoid the case with bl keyword has 'line' except: conf_str = '' # print conf_str # split('!epics'): second part is epics control conf splitedparts = conf_str.split('!epics') self.confstr = splitedparts[0] try: self.confstr_epics = splitedparts[1].strip() except IndexError: self.confstr_epics = '' return self
[ "def", "getKw", "(", "self", ",", "kw", ")", ":", "ikw", "=", "kw", ".", "lower", "(", ")", "line_continue_flag", "=", "''", "appendflag", "=", "False", "try", ":", "for", "line", "in", "self", ".", "file_lines", ":", "if", "line", ".", "strip", "(", ")", "==", "''", ":", "continue", "line", "=", "' '", ".", "join", "(", "line", ".", "strip", "(", ")", ".", "split", "(", ")", ")", ".", "strip", "(", "'\\n; '", ")", "if", "line", ".", "startswith", "(", "'!'", ")", ":", "continue", "if", "line", ".", "lower", "(", ")", ".", "startswith", "(", "ikw", "+", "' :'", ")", "or", "line", ".", "lower", "(", ")", ".", "startswith", "(", "ikw", "+", "':'", ")", ":", "conflist", "=", "[", "]", "# list to put into element configuration", "conflist", ".", "append", "(", "line", ")", "appendflag", "=", "True", "elif", "appendflag", "and", "line_continue_flag", "==", "'&'", ":", "conflist", ".", "append", "(", "line", ")", "line_continue_flag", "=", "line", "[", "-", "1", "]", "if", "line_continue_flag", "!=", "'&'", ":", "appendflag", "=", "False", "conf_str", "=", "''", ".", "join", "(", "conflist", ")", ".", "replace", "(", "'&'", ",", "','", ")", "if", "'line'", "in", "conf_str", ".", "lower", "(", ")", ".", "split", "(", "'='", ")", "[", "0", "]", ":", "# if bl defines lattice", "conf_str", "=", "conf_str", ".", "lower", "(", ")", ".", "replace", "(", "','", ",", "' '", ")", "[", ":", ":", "-", "1", "]", ".", "replace", "(", "'enil'", ",", "'beamline,lattice'", "[", ":", ":", "-", "1", "]", ",", "1", ")", "[", ":", ":", "-", "1", "]", "# avoid the case with bl keyword has 'line'", "except", ":", "conf_str", "=", "''", "# print conf_str", "# split('!epics'): second part is epics control conf", "splitedparts", "=", "conf_str", ".", "split", "(", "'!epics'", ")", "self", ".", "confstr", "=", "splitedparts", "[", "0", "]", "try", ":", "self", ".", "confstr_epics", "=", "splitedparts", "[", "1", "]", ".", "strip", "(", ")", "except", "IndexError", ":", "self", ".", "confstr_epics", "=", "''", "return", "self" ]
Extract doc snippet for element configuration, :param kw: element name :return: instance itself 1 call getKwAsDict() to return config as a dict 2 call getKwAsJson() to return config as json string 3 call getKwAsString() to return config as a raw string USAGE: getKw('Q10')
[ "Extract", "doc", "snippet", "for", "element", "configuration", ":", "param", "kw", ":", "element", "name", ":", "return", ":", "instance", "itself", "1", "call", "getKwAsDict", "()", "to", "return", "config", "as", "a", "dict", "2", "call", "getKwAsJson", "()", "to", "return", "config", "as", "json", "string", "3", "call", "getKwAsString", "()", "to", "return", "config", "as", "a", "raw", "string" ]
python
train
mhe/pynrrd
nrrd/reader.py
https://github.com/mhe/pynrrd/blob/96dd875b302031ea27e2d3aaa611dc6f2dfc7979/nrrd/reader.py#L301-L466
def read_data(header, fh=None, filename=None, index_order='F'): """Read data from file into :class:`numpy.ndarray` The two parameters :obj:`fh` and :obj:`filename` are optional depending on the parameters but it never hurts to specify both. The file handle (:obj:`fh`) is necessary if the header is attached with the NRRD data. However, if the NRRD data is detached from the header, then the :obj:`filename` parameter is required to obtain the absolute path to the data file. See :ref:`user-guide:Reading NRRD files` for more information on reading NRRD files. Parameters ---------- header : :class:`dict` (:class:`str`, :obj:`Object`) Parsed fields/values obtained from :meth:`read_header` function fh : file-object, optional File object pointing to first byte of data. Only necessary if data is attached to header. filename : :class:`str`, optional Filename of the header file. Only necessary if data is detached from the header. This is used to get the absolute data path. index_order : {'C', 'F'}, optional Specifies the index order of the resulting data array. Either 'C' (C-order) where the dimensions are ordered from slowest-varying to fastest-varying (e.g. (z, y, x)), or 'F' (Fortran-order) where the dimensions are ordered from fastest-varying to slowest-varying (e.g. (x, y, z)). Returns ------- data : :class:`numpy.ndarray` Data read from NRRD file See Also -------- :meth:`read`, :meth:`read_header` """ if index_order not in ['F', 'C']: raise NRRDError('Invalid index order') # Check that the required fields are in the header for field in _NRRD_REQUIRED_FIELDS: if field not in header: raise NRRDError('Header is missing required field: "%s".' % field) if header['dimension'] != len(header['sizes']): raise NRRDError('Number of elements in sizes does not match dimension. Dimension: %i, len(sizes): %i' % ( header['dimension'], len(header['sizes']))) # Determine the data type from the header dtype = _determine_datatype(header) # Determine the byte skip, line skip and the data file # These all can be written with or without the space according to the NRRD spec, so we check them both line_skip = header.get('lineskip', header.get('line skip', 0)) byte_skip = header.get('byteskip', header.get('byte skip', 0)) data_filename = header.get('datafile', header.get('data file', None)) # If the data file is separate from the header file, then open the data file to read from that instead if data_filename is not None: # If the pathname is relative, then append the current directory from the filename if not os.path.isabs(data_filename): if filename is None: raise NRRDError('Filename parameter must be specified when a relative data file path is given') data_filename = os.path.join(os.path.dirname(filename), data_filename) # Override the fh parameter with the data filename # Note that this is opened without a "with" block, thus it must be closed manually in all circumstances fh = open(data_filename, 'rb') # Get the total number of data points by multiplying the size of each dimension together total_data_points = header['sizes'].prod() # Skip the number of lines requested when line_skip >= 0 # Irrespective of the NRRD file having attached/detached header # Lines are skipped before getting to the beginning of the data if line_skip >= 0: for _ in range(line_skip): fh.readline() else: # Must close the file because if the file was opened above from detached filename, there is no "with" block to # close it for us fh.close() raise NRRDError('Invalid lineskip, allowed values are greater than or equal to 0') # Skip the requested number of bytes or seek backward, and then parse the data using NumPy if byte_skip < -1: # Must close the file because if the file was opened above from detached filename, there is no "with" block to # close it for us fh.close() raise NRRDError('Invalid byteskip, allowed values are greater than or equal to -1') elif byte_skip >= 0: fh.seek(byte_skip, os.SEEK_CUR) elif byte_skip == -1 and header['encoding'] not in ['gzip', 'gz', 'bzip2', 'bz2']: fh.seek(-dtype.itemsize * total_data_points, os.SEEK_END) else: # The only case left should be: byte_skip == -1 and header['encoding'] == 'gzip' byte_skip = -dtype.itemsize * total_data_points # If a compression encoding is used, then byte skip AFTER decompressing if header['encoding'] == 'raw': data = np.fromfile(fh, dtype) elif header['encoding'] in ['ASCII', 'ascii', 'text', 'txt']: data = np.fromfile(fh, dtype, sep=' ') else: # Handle compressed data now # Construct the decompression object based on encoding if header['encoding'] in ['gzip', 'gz']: decompobj = zlib.decompressobj(zlib.MAX_WBITS | 16) elif header['encoding'] in ['bzip2', 'bz2']: decompobj = bz2.BZ2Decompressor() else: # Must close the file because if the file was opened above from detached filename, there is no "with" block # to close it for us fh.close() raise NRRDError('Unsupported encoding: "%s"' % header['encoding']) # Loop through the file and read a chunk at a time (see _READ_CHUNKSIZE why it is read in chunks) decompressed_data = bytearray() # Read all of the remaining data from the file # Obtain the length of the compressed data since we will be using it repeatedly, more efficient compressed_data = fh.read() compressed_data_len = len(compressed_data) start_index = 0 # Loop through data and decompress it chunk by chunk while start_index < compressed_data_len: # Calculate the end index = start index plus chunk size # Set to the string length to read the remaining chunk at the end end_index = min(start_index + _READ_CHUNKSIZE, compressed_data_len) # Decompress and append data decompressed_data += decompobj.decompress(compressed_data[start_index:end_index]) # Update start index start_index = end_index # Delete the compressed data since we do not need it anymore # This could potentially be using a lot of memory del compressed_data # Byte skip is applied AFTER the decompression. Skip first x bytes of the decompressed data and parse it using # NumPy data = np.frombuffer(decompressed_data[byte_skip:], dtype) # Close the file, even if opened using "with" block, closing it manually does not hurt fh.close() if total_data_points != data.size: raise NRRDError('Size of the data does not equal the product of all the dimensions: {0}-{1}={2}' .format(total_data_points, data.size, total_data_points - data.size)) # In the NRRD header, the fields are specified in Fortran order, i.e, the first index is the one that changes # fastest and last index changes slowest. This needs to be taken into consideration since numpy uses C-order # indexing. # The array shape from NRRD (x,y,z) needs to be reversed as numpy expects (z,y,x). data = np.reshape(data, tuple(header['sizes'][::-1])) # Transpose data to enable Fortran indexing if requested. if index_order == 'F': data = data.T return data
[ "def", "read_data", "(", "header", ",", "fh", "=", "None", ",", "filename", "=", "None", ",", "index_order", "=", "'F'", ")", ":", "if", "index_order", "not", "in", "[", "'F'", ",", "'C'", "]", ":", "raise", "NRRDError", "(", "'Invalid index order'", ")", "# Check that the required fields are in the header", "for", "field", "in", "_NRRD_REQUIRED_FIELDS", ":", "if", "field", "not", "in", "header", ":", "raise", "NRRDError", "(", "'Header is missing required field: \"%s\".'", "%", "field", ")", "if", "header", "[", "'dimension'", "]", "!=", "len", "(", "header", "[", "'sizes'", "]", ")", ":", "raise", "NRRDError", "(", "'Number of elements in sizes does not match dimension. Dimension: %i, len(sizes): %i'", "%", "(", "header", "[", "'dimension'", "]", ",", "len", "(", "header", "[", "'sizes'", "]", ")", ")", ")", "# Determine the data type from the header", "dtype", "=", "_determine_datatype", "(", "header", ")", "# Determine the byte skip, line skip and the data file", "# These all can be written with or without the space according to the NRRD spec, so we check them both", "line_skip", "=", "header", ".", "get", "(", "'lineskip'", ",", "header", ".", "get", "(", "'line skip'", ",", "0", ")", ")", "byte_skip", "=", "header", ".", "get", "(", "'byteskip'", ",", "header", ".", "get", "(", "'byte skip'", ",", "0", ")", ")", "data_filename", "=", "header", ".", "get", "(", "'datafile'", ",", "header", ".", "get", "(", "'data file'", ",", "None", ")", ")", "# If the data file is separate from the header file, then open the data file to read from that instead", "if", "data_filename", "is", "not", "None", ":", "# If the pathname is relative, then append the current directory from the filename", "if", "not", "os", ".", "path", ".", "isabs", "(", "data_filename", ")", ":", "if", "filename", "is", "None", ":", "raise", "NRRDError", "(", "'Filename parameter must be specified when a relative data file path is given'", ")", "data_filename", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "filename", ")", ",", "data_filename", ")", "# Override the fh parameter with the data filename", "# Note that this is opened without a \"with\" block, thus it must be closed manually in all circumstances", "fh", "=", "open", "(", "data_filename", ",", "'rb'", ")", "# Get the total number of data points by multiplying the size of each dimension together", "total_data_points", "=", "header", "[", "'sizes'", "]", ".", "prod", "(", ")", "# Skip the number of lines requested when line_skip >= 0", "# Irrespective of the NRRD file having attached/detached header", "# Lines are skipped before getting to the beginning of the data", "if", "line_skip", ">=", "0", ":", "for", "_", "in", "range", "(", "line_skip", ")", ":", "fh", ".", "readline", "(", ")", "else", ":", "# Must close the file because if the file was opened above from detached filename, there is no \"with\" block to", "# close it for us", "fh", ".", "close", "(", ")", "raise", "NRRDError", "(", "'Invalid lineskip, allowed values are greater than or equal to 0'", ")", "# Skip the requested number of bytes or seek backward, and then parse the data using NumPy", "if", "byte_skip", "<", "-", "1", ":", "# Must close the file because if the file was opened above from detached filename, there is no \"with\" block to", "# close it for us", "fh", ".", "close", "(", ")", "raise", "NRRDError", "(", "'Invalid byteskip, allowed values are greater than or equal to -1'", ")", "elif", "byte_skip", ">=", "0", ":", "fh", ".", "seek", "(", "byte_skip", ",", "os", ".", "SEEK_CUR", ")", "elif", "byte_skip", "==", "-", "1", "and", "header", "[", "'encoding'", "]", "not", "in", "[", "'gzip'", ",", "'gz'", ",", "'bzip2'", ",", "'bz2'", "]", ":", "fh", ".", "seek", "(", "-", "dtype", ".", "itemsize", "*", "total_data_points", ",", "os", ".", "SEEK_END", ")", "else", ":", "# The only case left should be: byte_skip == -1 and header['encoding'] == 'gzip'", "byte_skip", "=", "-", "dtype", ".", "itemsize", "*", "total_data_points", "# If a compression encoding is used, then byte skip AFTER decompressing", "if", "header", "[", "'encoding'", "]", "==", "'raw'", ":", "data", "=", "np", ".", "fromfile", "(", "fh", ",", "dtype", ")", "elif", "header", "[", "'encoding'", "]", "in", "[", "'ASCII'", ",", "'ascii'", ",", "'text'", ",", "'txt'", "]", ":", "data", "=", "np", ".", "fromfile", "(", "fh", ",", "dtype", ",", "sep", "=", "' '", ")", "else", ":", "# Handle compressed data now", "# Construct the decompression object based on encoding", "if", "header", "[", "'encoding'", "]", "in", "[", "'gzip'", ",", "'gz'", "]", ":", "decompobj", "=", "zlib", ".", "decompressobj", "(", "zlib", ".", "MAX_WBITS", "|", "16", ")", "elif", "header", "[", "'encoding'", "]", "in", "[", "'bzip2'", ",", "'bz2'", "]", ":", "decompobj", "=", "bz2", ".", "BZ2Decompressor", "(", ")", "else", ":", "# Must close the file because if the file was opened above from detached filename, there is no \"with\" block", "# to close it for us", "fh", ".", "close", "(", ")", "raise", "NRRDError", "(", "'Unsupported encoding: \"%s\"'", "%", "header", "[", "'encoding'", "]", ")", "# Loop through the file and read a chunk at a time (see _READ_CHUNKSIZE why it is read in chunks)", "decompressed_data", "=", "bytearray", "(", ")", "# Read all of the remaining data from the file", "# Obtain the length of the compressed data since we will be using it repeatedly, more efficient", "compressed_data", "=", "fh", ".", "read", "(", ")", "compressed_data_len", "=", "len", "(", "compressed_data", ")", "start_index", "=", "0", "# Loop through data and decompress it chunk by chunk", "while", "start_index", "<", "compressed_data_len", ":", "# Calculate the end index = start index plus chunk size", "# Set to the string length to read the remaining chunk at the end", "end_index", "=", "min", "(", "start_index", "+", "_READ_CHUNKSIZE", ",", "compressed_data_len", ")", "# Decompress and append data", "decompressed_data", "+=", "decompobj", ".", "decompress", "(", "compressed_data", "[", "start_index", ":", "end_index", "]", ")", "# Update start index", "start_index", "=", "end_index", "# Delete the compressed data since we do not need it anymore", "# This could potentially be using a lot of memory", "del", "compressed_data", "# Byte skip is applied AFTER the decompression. Skip first x bytes of the decompressed data and parse it using", "# NumPy", "data", "=", "np", ".", "frombuffer", "(", "decompressed_data", "[", "byte_skip", ":", "]", ",", "dtype", ")", "# Close the file, even if opened using \"with\" block, closing it manually does not hurt", "fh", ".", "close", "(", ")", "if", "total_data_points", "!=", "data", ".", "size", ":", "raise", "NRRDError", "(", "'Size of the data does not equal the product of all the dimensions: {0}-{1}={2}'", ".", "format", "(", "total_data_points", ",", "data", ".", "size", ",", "total_data_points", "-", "data", ".", "size", ")", ")", "# In the NRRD header, the fields are specified in Fortran order, i.e, the first index is the one that changes", "# fastest and last index changes slowest. This needs to be taken into consideration since numpy uses C-order", "# indexing.", "# The array shape from NRRD (x,y,z) needs to be reversed as numpy expects (z,y,x).", "data", "=", "np", ".", "reshape", "(", "data", ",", "tuple", "(", "header", "[", "'sizes'", "]", "[", ":", ":", "-", "1", "]", ")", ")", "# Transpose data to enable Fortran indexing if requested.", "if", "index_order", "==", "'F'", ":", "data", "=", "data", ".", "T", "return", "data" ]
Read data from file into :class:`numpy.ndarray` The two parameters :obj:`fh` and :obj:`filename` are optional depending on the parameters but it never hurts to specify both. The file handle (:obj:`fh`) is necessary if the header is attached with the NRRD data. However, if the NRRD data is detached from the header, then the :obj:`filename` parameter is required to obtain the absolute path to the data file. See :ref:`user-guide:Reading NRRD files` for more information on reading NRRD files. Parameters ---------- header : :class:`dict` (:class:`str`, :obj:`Object`) Parsed fields/values obtained from :meth:`read_header` function fh : file-object, optional File object pointing to first byte of data. Only necessary if data is attached to header. filename : :class:`str`, optional Filename of the header file. Only necessary if data is detached from the header. This is used to get the absolute data path. index_order : {'C', 'F'}, optional Specifies the index order of the resulting data array. Either 'C' (C-order) where the dimensions are ordered from slowest-varying to fastest-varying (e.g. (z, y, x)), or 'F' (Fortran-order) where the dimensions are ordered from fastest-varying to slowest-varying (e.g. (x, y, z)). Returns ------- data : :class:`numpy.ndarray` Data read from NRRD file See Also -------- :meth:`read`, :meth:`read_header`
[ "Read", "data", "from", "file", "into", ":", "class", ":", "numpy", ".", "ndarray" ]
python
train