repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
facelessuser/soupsieve
soupsieve/css_match.py
https://github.com/facelessuser/soupsieve/blob/24859cc3e756ebf46b75547d49c6b4a7bf35ee82/soupsieve/css_match.py#L611-L629
def match_attributes(self, el, attributes): """Match attributes.""" match = True if attributes: for a in attributes: value = self.match_attribute_name(el, a.attribute, a.prefix) pattern = a.xml_type_pattern if self.is_xml and a.xml_type_pattern else a.pattern if isinstance(value, list): value = ' '.join(value) if value is None: match = False break elif pattern is None: continue elif pattern.match(value) is None: match = False break return match
[ "def", "match_attributes", "(", "self", ",", "el", ",", "attributes", ")", ":", "match", "=", "True", "if", "attributes", ":", "for", "a", "in", "attributes", ":", "value", "=", "self", ".", "match_attribute_name", "(", "el", ",", "a", ".", "attribute", ",", "a", ".", "prefix", ")", "pattern", "=", "a", ".", "xml_type_pattern", "if", "self", ".", "is_xml", "and", "a", ".", "xml_type_pattern", "else", "a", ".", "pattern", "if", "isinstance", "(", "value", ",", "list", ")", ":", "value", "=", "' '", ".", "join", "(", "value", ")", "if", "value", "is", "None", ":", "match", "=", "False", "break", "elif", "pattern", "is", "None", ":", "continue", "elif", "pattern", ".", "match", "(", "value", ")", "is", "None", ":", "match", "=", "False", "break", "return", "match" ]
Match attributes.
[ "Match", "attributes", "." ]
python
train
pkgw/pwkit
pwkit/phoenix.py
https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/phoenix.py#L44-L103
def load_spectrum(path, smoothing=181, DF=-8.): """Load a Phoenix model atmosphere spectrum. path : string The file path to load. smoothing : integer Smoothing to apply. If None, do not smooth. If an integer, smooth with a Hamming window. Otherwise, the variable is assumed to be a different smoothing window, and the data will be convolved with it. DF: float Numerical factor used to compute the emergent flux density. Returns a Pandas DataFrame containing the columns: wlen Sample wavelength in Angstrom. flam Flux density in erg/cm²/s/Å. See `pwkit.synphot` for related tools. The values of *flam* returned by this function are computed from the second column of the data file as specified in the documentation: ``flam = 10**(col2 + DF)``. The documentation states that the default value, -8, is appropriate for most modern models; but some older models use other values. Loading takes about 5 seconds on my current laptop. Un-smoothed spectra have about 630,000 samples. """ try: ang, lflam = np.loadtxt(path, usecols=(0,1)).T except ValueError: # In some files, the numbers in the first columns fill up the # whole 12-character column width, and are given in exponential # notation with a 'D' character, so we must be more careful: with open(path, 'rb') as f: def lines(): for line in f: yield line.replace(b'D', b'e') ang, lflam = np.genfromtxt(lines(), delimiter=(13, 12)).T # Data files do not come sorted! z = ang.argsort() ang = ang[z] flam = 10**(lflam[z] + DF) del z if smoothing is not None: if isinstance(smoothing, int): smoothing = np.hamming(smoothing) else: smoothing = np.asarray(smoothing) wnorm = np.convolve(np.ones_like(smoothing), smoothing, mode='valid') smoothing = smoothing / wnorm # do not alter original array. smooth = lambda a: np.convolve(a, smoothing, mode='valid')[::smoothing.size] ang = smooth(ang) flam = smooth(flam) return pd.DataFrame({'wlen': ang, 'flam': flam})
[ "def", "load_spectrum", "(", "path", ",", "smoothing", "=", "181", ",", "DF", "=", "-", "8.", ")", ":", "try", ":", "ang", ",", "lflam", "=", "np", ".", "loadtxt", "(", "path", ",", "usecols", "=", "(", "0", ",", "1", ")", ")", ".", "T", "except", "ValueError", ":", "# In some files, the numbers in the first columns fill up the", "# whole 12-character column width, and are given in exponential", "# notation with a 'D' character, so we must be more careful:", "with", "open", "(", "path", ",", "'rb'", ")", "as", "f", ":", "def", "lines", "(", ")", ":", "for", "line", "in", "f", ":", "yield", "line", ".", "replace", "(", "b'D'", ",", "b'e'", ")", "ang", ",", "lflam", "=", "np", ".", "genfromtxt", "(", "lines", "(", ")", ",", "delimiter", "=", "(", "13", ",", "12", ")", ")", ".", "T", "# Data files do not come sorted!", "z", "=", "ang", ".", "argsort", "(", ")", "ang", "=", "ang", "[", "z", "]", "flam", "=", "10", "**", "(", "lflam", "[", "z", "]", "+", "DF", ")", "del", "z", "if", "smoothing", "is", "not", "None", ":", "if", "isinstance", "(", "smoothing", ",", "int", ")", ":", "smoothing", "=", "np", ".", "hamming", "(", "smoothing", ")", "else", ":", "smoothing", "=", "np", ".", "asarray", "(", "smoothing", ")", "wnorm", "=", "np", ".", "convolve", "(", "np", ".", "ones_like", "(", "smoothing", ")", ",", "smoothing", ",", "mode", "=", "'valid'", ")", "smoothing", "=", "smoothing", "/", "wnorm", "# do not alter original array.", "smooth", "=", "lambda", "a", ":", "np", ".", "convolve", "(", "a", ",", "smoothing", ",", "mode", "=", "'valid'", ")", "[", ":", ":", "smoothing", ".", "size", "]", "ang", "=", "smooth", "(", "ang", ")", "flam", "=", "smooth", "(", "flam", ")", "return", "pd", ".", "DataFrame", "(", "{", "'wlen'", ":", "ang", ",", "'flam'", ":", "flam", "}", ")" ]
Load a Phoenix model atmosphere spectrum. path : string The file path to load. smoothing : integer Smoothing to apply. If None, do not smooth. If an integer, smooth with a Hamming window. Otherwise, the variable is assumed to be a different smoothing window, and the data will be convolved with it. DF: float Numerical factor used to compute the emergent flux density. Returns a Pandas DataFrame containing the columns: wlen Sample wavelength in Angstrom. flam Flux density in erg/cm²/s/Å. See `pwkit.synphot` for related tools. The values of *flam* returned by this function are computed from the second column of the data file as specified in the documentation: ``flam = 10**(col2 + DF)``. The documentation states that the default value, -8, is appropriate for most modern models; but some older models use other values. Loading takes about 5 seconds on my current laptop. Un-smoothed spectra have about 630,000 samples.
[ "Load", "a", "Phoenix", "model", "atmosphere", "spectrum", "." ]
python
train
StanfordVL/robosuite
robosuite/models/tasks/table_top_task.py
https://github.com/StanfordVL/robosuite/blob/65cd16810e2ed647e3ec88746af3412065b7f278/robosuite/models/tasks/table_top_task.py#L65-L70
def place_objects(self): """Places objects randomly until no collisions or max iterations hit.""" pos_arr, quat_arr = self.initializer.sample() for i in range(len(self.objects)): self.objects[i].set("pos", array_to_string(pos_arr[i])) self.objects[i].set("quat", array_to_string(quat_arr[i]))
[ "def", "place_objects", "(", "self", ")", ":", "pos_arr", ",", "quat_arr", "=", "self", ".", "initializer", ".", "sample", "(", ")", "for", "i", "in", "range", "(", "len", "(", "self", ".", "objects", ")", ")", ":", "self", ".", "objects", "[", "i", "]", ".", "set", "(", "\"pos\"", ",", "array_to_string", "(", "pos_arr", "[", "i", "]", ")", ")", "self", ".", "objects", "[", "i", "]", ".", "set", "(", "\"quat\"", ",", "array_to_string", "(", "quat_arr", "[", "i", "]", ")", ")" ]
Places objects randomly until no collisions or max iterations hit.
[ "Places", "objects", "randomly", "until", "no", "collisions", "or", "max", "iterations", "hit", "." ]
python
train
abseil/abseil-py
absl/flags/_validators.py
https://github.com/abseil/abseil-py/blob/9d73fdaa23a6b6726aa5731390f388c0c6250ee5/absl/flags/_validators.py#L387-L421
def mark_flags_as_mutual_exclusive(flag_names, required=False, flag_values=_flagvalues.FLAGS): """Ensures that only one flag among flag_names is not None. Important note: This validator checks if flag values are None, and it does not distinguish between default and explicit values. Therefore, this validator does not make sense when applied to flags with default values other than None, including other false values (e.g. False, 0, '', []). That includes multi flags with a default value of [] instead of None. Args: flag_names: [str], names of the flags. required: bool. If true, exactly one of the flags must have a value other than None. Otherwise, at most one of the flags can have a value other than None, and it is valid for all of the flags to be None. flag_values: flags.FlagValues, optional FlagValues instance where the flags are defined. """ for flag_name in flag_names: if flag_values[flag_name].default is not None: warnings.warn( 'Flag --{} has a non-None default value. That does not make sense ' 'with mark_flags_as_mutual_exclusive, which checks whether the ' 'listed flags have a value other than None.'.format(flag_name)) def validate_mutual_exclusion(flags_dict): flag_count = sum(1 for val in flags_dict.values() if val is not None) if flag_count == 1 or (not required and flag_count == 0): return True raise _exceptions.ValidationError( '{} one of ({}) must have a value other than None.'.format( 'Exactly' if required else 'At most', ', '.join(flag_names))) register_multi_flags_validator( flag_names, validate_mutual_exclusion, flag_values=flag_values)
[ "def", "mark_flags_as_mutual_exclusive", "(", "flag_names", ",", "required", "=", "False", ",", "flag_values", "=", "_flagvalues", ".", "FLAGS", ")", ":", "for", "flag_name", "in", "flag_names", ":", "if", "flag_values", "[", "flag_name", "]", ".", "default", "is", "not", "None", ":", "warnings", ".", "warn", "(", "'Flag --{} has a non-None default value. That does not make sense '", "'with mark_flags_as_mutual_exclusive, which checks whether the '", "'listed flags have a value other than None.'", ".", "format", "(", "flag_name", ")", ")", "def", "validate_mutual_exclusion", "(", "flags_dict", ")", ":", "flag_count", "=", "sum", "(", "1", "for", "val", "in", "flags_dict", ".", "values", "(", ")", "if", "val", "is", "not", "None", ")", "if", "flag_count", "==", "1", "or", "(", "not", "required", "and", "flag_count", "==", "0", ")", ":", "return", "True", "raise", "_exceptions", ".", "ValidationError", "(", "'{} one of ({}) must have a value other than None.'", ".", "format", "(", "'Exactly'", "if", "required", "else", "'At most'", ",", "', '", ".", "join", "(", "flag_names", ")", ")", ")", "register_multi_flags_validator", "(", "flag_names", ",", "validate_mutual_exclusion", ",", "flag_values", "=", "flag_values", ")" ]
Ensures that only one flag among flag_names is not None. Important note: This validator checks if flag values are None, and it does not distinguish between default and explicit values. Therefore, this validator does not make sense when applied to flags with default values other than None, including other false values (e.g. False, 0, '', []). That includes multi flags with a default value of [] instead of None. Args: flag_names: [str], names of the flags. required: bool. If true, exactly one of the flags must have a value other than None. Otherwise, at most one of the flags can have a value other than None, and it is valid for all of the flags to be None. flag_values: flags.FlagValues, optional FlagValues instance where the flags are defined.
[ "Ensures", "that", "only", "one", "flag", "among", "flag_names", "is", "not", "None", "." ]
python
train
arne-cl/discoursegraphs
src/discoursegraphs/readwrite/exmaralda.py
https://github.com/arne-cl/discoursegraphs/blob/842f0068a3190be2c75905754521b176b25a54fb/src/discoursegraphs/readwrite/exmaralda.py#L437-L449
def gen_token_range(self, start_id, stop_id): """ returns a list of all token IDs in the given, left-closed, right-open interval (i.e. includes start_id, but excludes stop_id) >>> gen_token_range('T0', 'T1') ['T0'] >>> gen_token_range('T1', 'T5') ['T1', 'T2', 'T3', 'T4'] """ index_range = range(self.tokenid2index(start_id), self.tokenid2index(stop_id)) return ["T{}".format(index) for index in index_range]
[ "def", "gen_token_range", "(", "self", ",", "start_id", ",", "stop_id", ")", ":", "index_range", "=", "range", "(", "self", ".", "tokenid2index", "(", "start_id", ")", ",", "self", ".", "tokenid2index", "(", "stop_id", ")", ")", "return", "[", "\"T{}\"", ".", "format", "(", "index", ")", "for", "index", "in", "index_range", "]" ]
returns a list of all token IDs in the given, left-closed, right-open interval (i.e. includes start_id, but excludes stop_id) >>> gen_token_range('T0', 'T1') ['T0'] >>> gen_token_range('T1', 'T5') ['T1', 'T2', 'T3', 'T4']
[ "returns", "a", "list", "of", "all", "token", "IDs", "in", "the", "given", "left", "-", "closed", "right", "-", "open", "interval", "(", "i", ".", "e", ".", "includes", "start_id", "but", "excludes", "stop_id", ")" ]
python
train
ewels/MultiQC
multiqc/utils/mqc_colour.py
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/utils/mqc_colour.py#L41-L64
def get_colour(self, val, colformat='hex'): """ Given a value, return a colour within the colour scale """ try: # Sanity checks val = re.sub("[^0-9\.]", "", str(val)) if val == '': val = self.minval val = float(val) val = max(val, self.minval) val = min(val, self.maxval) domain_nums = list( np.linspace(self.minval, self.maxval, len(self.colours)) ) my_scale = spectra.scale(self.colours).domain(domain_nums) # Weird, I know. I ported this from the original JavaScript for continuity # Seems to work better than adjusting brightness / saturation / luminosity rgb_converter = lambda x: max(0, min(1, 1+((x-1)*0.3))) thecolour = spectra.rgb( *[rgb_converter(v) for v in my_scale(val).rgb] ) return thecolour.hexcode except: # Shouldn't crash all of MultiQC just for colours return ''
[ "def", "get_colour", "(", "self", ",", "val", ",", "colformat", "=", "'hex'", ")", ":", "try", ":", "# Sanity checks", "val", "=", "re", ".", "sub", "(", "\"[^0-9\\.]\"", ",", "\"\"", ",", "str", "(", "val", ")", ")", "if", "val", "==", "''", ":", "val", "=", "self", ".", "minval", "val", "=", "float", "(", "val", ")", "val", "=", "max", "(", "val", ",", "self", ".", "minval", ")", "val", "=", "min", "(", "val", ",", "self", ".", "maxval", ")", "domain_nums", "=", "list", "(", "np", ".", "linspace", "(", "self", ".", "minval", ",", "self", ".", "maxval", ",", "len", "(", "self", ".", "colours", ")", ")", ")", "my_scale", "=", "spectra", ".", "scale", "(", "self", ".", "colours", ")", ".", "domain", "(", "domain_nums", ")", "# Weird, I know. I ported this from the original JavaScript for continuity", "# Seems to work better than adjusting brightness / saturation / luminosity", "rgb_converter", "=", "lambda", "x", ":", "max", "(", "0", ",", "min", "(", "1", ",", "1", "+", "(", "(", "x", "-", "1", ")", "*", "0.3", ")", ")", ")", "thecolour", "=", "spectra", ".", "rgb", "(", "*", "[", "rgb_converter", "(", "v", ")", "for", "v", "in", "my_scale", "(", "val", ")", ".", "rgb", "]", ")", "return", "thecolour", ".", "hexcode", "except", ":", "# Shouldn't crash all of MultiQC just for colours", "return", "''" ]
Given a value, return a colour within the colour scale
[ "Given", "a", "value", "return", "a", "colour", "within", "the", "colour", "scale" ]
python
train
SheffieldML/GPy
GPy/likelihoods/likelihood.py
https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/likelihoods/likelihood.py#L597-L622
def d2logpdf_df2(self, f, y, Y_metadata=None): """ Evaluates the link function link(f) then computes the second derivative of log likelihood using it Uses the Faa di Bruno's formula for the chain rule .. math:: \\frac{d^{2}\\log p(y|\\lambda(f))}{df^{2}} = \\frac{d^{2}\\log p(y|\\lambda(f))}{d^{2}\\lambda(f)}\\left(\\frac{d\\lambda(f)}{df}\\right)^{2} + \\frac{d\\log p(y|\\lambda(f))}{d\\lambda(f)}\\frac{d^{2}\\lambda(f)}{df^{2}} :param f: latent variables f :type f: Nx1 array :param y: data :type y: Nx1 array :param Y_metadata: Y_metadata which is not used in student t distribution - not used :returns: second derivative of log likelihood evaluated for this point (diagonal only) :rtype: 1xN array """ if isinstance(self.gp_link, link_functions.Identity): d2logpdf_df2 = self.d2logpdf_dlink2(f, y, Y_metadata=Y_metadata) else: inv_link_f = self.gp_link.transf(f) d2logpdf_dlink2 = self.d2logpdf_dlink2(inv_link_f, y, Y_metadata=Y_metadata) dlink_df = self.gp_link.dtransf_df(f) dlogpdf_dlink = self.dlogpdf_dlink(inv_link_f, y, Y_metadata=Y_metadata) d2link_df2 = self.gp_link.d2transf_df2(f) d2logpdf_df2 = chain_2(d2logpdf_dlink2, dlink_df, dlogpdf_dlink, d2link_df2) return d2logpdf_df2
[ "def", "d2logpdf_df2", "(", "self", ",", "f", ",", "y", ",", "Y_metadata", "=", "None", ")", ":", "if", "isinstance", "(", "self", ".", "gp_link", ",", "link_functions", ".", "Identity", ")", ":", "d2logpdf_df2", "=", "self", ".", "d2logpdf_dlink2", "(", "f", ",", "y", ",", "Y_metadata", "=", "Y_metadata", ")", "else", ":", "inv_link_f", "=", "self", ".", "gp_link", ".", "transf", "(", "f", ")", "d2logpdf_dlink2", "=", "self", ".", "d2logpdf_dlink2", "(", "inv_link_f", ",", "y", ",", "Y_metadata", "=", "Y_metadata", ")", "dlink_df", "=", "self", ".", "gp_link", ".", "dtransf_df", "(", "f", ")", "dlogpdf_dlink", "=", "self", ".", "dlogpdf_dlink", "(", "inv_link_f", ",", "y", ",", "Y_metadata", "=", "Y_metadata", ")", "d2link_df2", "=", "self", ".", "gp_link", ".", "d2transf_df2", "(", "f", ")", "d2logpdf_df2", "=", "chain_2", "(", "d2logpdf_dlink2", ",", "dlink_df", ",", "dlogpdf_dlink", ",", "d2link_df2", ")", "return", "d2logpdf_df2" ]
Evaluates the link function link(f) then computes the second derivative of log likelihood using it Uses the Faa di Bruno's formula for the chain rule .. math:: \\frac{d^{2}\\log p(y|\\lambda(f))}{df^{2}} = \\frac{d^{2}\\log p(y|\\lambda(f))}{d^{2}\\lambda(f)}\\left(\\frac{d\\lambda(f)}{df}\\right)^{2} + \\frac{d\\log p(y|\\lambda(f))}{d\\lambda(f)}\\frac{d^{2}\\lambda(f)}{df^{2}} :param f: latent variables f :type f: Nx1 array :param y: data :type y: Nx1 array :param Y_metadata: Y_metadata which is not used in student t distribution - not used :returns: second derivative of log likelihood evaluated for this point (diagonal only) :rtype: 1xN array
[ "Evaluates", "the", "link", "function", "link", "(", "f", ")", "then", "computes", "the", "second", "derivative", "of", "log", "likelihood", "using", "it", "Uses", "the", "Faa", "di", "Bruno", "s", "formula", "for", "the", "chain", "rule" ]
python
train
apple/turicreate
src/unity/python/turicreate/toolkits/nearest_neighbors/_nearest_neighbors.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/nearest_neighbors/_nearest_neighbors.py#L937-L1060
def similarity_graph(self, k=5, radius=None, include_self_edges=False, output_type='SGraph', verbose=True): """ Construct the similarity graph on the reference dataset, which is already stored in the model. This is conceptually very similar to running `query` with the reference set, but this method is optimized for the purpose, syntactically simpler, and automatically removes self-edges. Parameters ---------- k : int, optional Maximum number of neighbors to return for each point in the dataset. Setting this to ``None`` deactivates the constraint, so that all neighbors are returned within ``radius`` of a given point. radius : float, optional For a given point, only neighbors within this distance are returned. The default is ``None``, in which case the ``k`` nearest neighbors are returned for each query point, regardless of distance. include_self_edges : bool, optional For most distance functions, each point in the model's reference dataset is its own nearest neighbor. If this parameter is set to False, this result is ignored, and the nearest neighbors are returned *excluding* the point itself. output_type : {'SGraph', 'SFrame'}, optional By default, the results are returned in the form of an SGraph, where each point in the reference dataset is a vertex and an edge A -> B indicates that vertex B is a nearest neighbor of vertex A. If 'output_type' is set to 'SFrame', the output is in the same form as the results of the 'query' method: an SFrame with columns indicating the query label (in this case the query data is the same as the reference data), reference label, distance between the two points, and the rank of the neighbor. verbose : bool, optional If True, print progress updates and model details. Returns ------- out : SFrame or SGraph The type of the output object depends on the 'output_type' parameter. See the parameter description for more detail. Notes ----- - If both ``k`` and ``radius`` are set to ``None``, each data point is matched to the entire dataset. If the reference dataset has :math:`n` rows, the output is an SFrame with :math:`n^2` rows (or an SGraph with :math:`n^2` edges). - For models created with the 'lsh' method, the output similarity graph may have fewer vertices than there are data points in the original reference set. Because LSH is an approximate method, a query point may have fewer than 'k' neighbors. If LSH returns no neighbors at all for a query and self-edges are excluded, the query point is omitted from the results. Examples -------- First construct an SFrame and create a nearest neighbors model: >>> sf = turicreate.SFrame({'x1': [0.98, 0.62, 0.11], ... 'x2': [0.69, 0.58, 0.36]}) ... >>> model = turicreate.nearest_neighbors.create(sf, distance='euclidean') Unlike the ``query`` method, there is no need for a second dataset with ``similarity_graph``. >>> g = model.similarity_graph(k=1) # an SGraph >>> g.edges +----------+----------+----------------+------+ | __src_id | __dst_id | distance | rank | +----------+----------+----------------+------+ | 0 | 1 | 0.376430604494 | 1 | | 2 | 1 | 0.55542776308 | 1 | | 1 | 0 | 0.376430604494 | 1 | +----------+----------+----------------+------+ """ ## Validate inputs. if k is not None: if not isinstance(k, int): raise ValueError("Input 'k' must be an integer.") if k <= 0: raise ValueError("Input 'k' must be larger than 0.") if radius is not None: if not isinstance(radius, (int, float)): raise ValueError("Input 'radius' must be an integer or float.") if radius < 0: raise ValueError("Input 'radius' must be non-negative.") ## Set k and radius to special values to indicate 'None' if k is None: k = -1 if radius is None: radius = -1.0 opts = {'model': self.__proxy__, 'model_name': self.__name__, 'k': k, 'radius': radius, 'include_self_edges': include_self_edges} with QuietProgress(verbose): result = _turicreate.extensions._nearest_neighbors.similarity_graph(opts) knn = result['neighbors'] if output_type == "SFrame": return knn else: sg = _SGraph(edges=knn, src_field='query_label', dst_field='reference_label') return sg
[ "def", "similarity_graph", "(", "self", ",", "k", "=", "5", ",", "radius", "=", "None", ",", "include_self_edges", "=", "False", ",", "output_type", "=", "'SGraph'", ",", "verbose", "=", "True", ")", ":", "## Validate inputs.", "if", "k", "is", "not", "None", ":", "if", "not", "isinstance", "(", "k", ",", "int", ")", ":", "raise", "ValueError", "(", "\"Input 'k' must be an integer.\"", ")", "if", "k", "<=", "0", ":", "raise", "ValueError", "(", "\"Input 'k' must be larger than 0.\"", ")", "if", "radius", "is", "not", "None", ":", "if", "not", "isinstance", "(", "radius", ",", "(", "int", ",", "float", ")", ")", ":", "raise", "ValueError", "(", "\"Input 'radius' must be an integer or float.\"", ")", "if", "radius", "<", "0", ":", "raise", "ValueError", "(", "\"Input 'radius' must be non-negative.\"", ")", "## Set k and radius to special values to indicate 'None'", "if", "k", "is", "None", ":", "k", "=", "-", "1", "if", "radius", "is", "None", ":", "radius", "=", "-", "1.0", "opts", "=", "{", "'model'", ":", "self", ".", "__proxy__", ",", "'model_name'", ":", "self", ".", "__name__", ",", "'k'", ":", "k", ",", "'radius'", ":", "radius", ",", "'include_self_edges'", ":", "include_self_edges", "}", "with", "QuietProgress", "(", "verbose", ")", ":", "result", "=", "_turicreate", ".", "extensions", ".", "_nearest_neighbors", ".", "similarity_graph", "(", "opts", ")", "knn", "=", "result", "[", "'neighbors'", "]", "if", "output_type", "==", "\"SFrame\"", ":", "return", "knn", "else", ":", "sg", "=", "_SGraph", "(", "edges", "=", "knn", ",", "src_field", "=", "'query_label'", ",", "dst_field", "=", "'reference_label'", ")", "return", "sg" ]
Construct the similarity graph on the reference dataset, which is already stored in the model. This is conceptually very similar to running `query` with the reference set, but this method is optimized for the purpose, syntactically simpler, and automatically removes self-edges. Parameters ---------- k : int, optional Maximum number of neighbors to return for each point in the dataset. Setting this to ``None`` deactivates the constraint, so that all neighbors are returned within ``radius`` of a given point. radius : float, optional For a given point, only neighbors within this distance are returned. The default is ``None``, in which case the ``k`` nearest neighbors are returned for each query point, regardless of distance. include_self_edges : bool, optional For most distance functions, each point in the model's reference dataset is its own nearest neighbor. If this parameter is set to False, this result is ignored, and the nearest neighbors are returned *excluding* the point itself. output_type : {'SGraph', 'SFrame'}, optional By default, the results are returned in the form of an SGraph, where each point in the reference dataset is a vertex and an edge A -> B indicates that vertex B is a nearest neighbor of vertex A. If 'output_type' is set to 'SFrame', the output is in the same form as the results of the 'query' method: an SFrame with columns indicating the query label (in this case the query data is the same as the reference data), reference label, distance between the two points, and the rank of the neighbor. verbose : bool, optional If True, print progress updates and model details. Returns ------- out : SFrame or SGraph The type of the output object depends on the 'output_type' parameter. See the parameter description for more detail. Notes ----- - If both ``k`` and ``radius`` are set to ``None``, each data point is matched to the entire dataset. If the reference dataset has :math:`n` rows, the output is an SFrame with :math:`n^2` rows (or an SGraph with :math:`n^2` edges). - For models created with the 'lsh' method, the output similarity graph may have fewer vertices than there are data points in the original reference set. Because LSH is an approximate method, a query point may have fewer than 'k' neighbors. If LSH returns no neighbors at all for a query and self-edges are excluded, the query point is omitted from the results. Examples -------- First construct an SFrame and create a nearest neighbors model: >>> sf = turicreate.SFrame({'x1': [0.98, 0.62, 0.11], ... 'x2': [0.69, 0.58, 0.36]}) ... >>> model = turicreate.nearest_neighbors.create(sf, distance='euclidean') Unlike the ``query`` method, there is no need for a second dataset with ``similarity_graph``. >>> g = model.similarity_graph(k=1) # an SGraph >>> g.edges +----------+----------+----------------+------+ | __src_id | __dst_id | distance | rank | +----------+----------+----------------+------+ | 0 | 1 | 0.376430604494 | 1 | | 2 | 1 | 0.55542776308 | 1 | | 1 | 0 | 0.376430604494 | 1 | +----------+----------+----------------+------+
[ "Construct", "the", "similarity", "graph", "on", "the", "reference", "dataset", "which", "is", "already", "stored", "in", "the", "model", ".", "This", "is", "conceptually", "very", "similar", "to", "running", "query", "with", "the", "reference", "set", "but", "this", "method", "is", "optimized", "for", "the", "purpose", "syntactically", "simpler", "and", "automatically", "removes", "self", "-", "edges", "." ]
python
train
tsroten/dragonmapper
dragonmapper/transcriptions.py
https://github.com/tsroten/dragonmapper/blob/68eaf43c32725f4b4923c01284cfc0112079e8ab/dragonmapper/transcriptions.py#L438-L448
def to_zhuyin(s): """Convert *s* to Zhuyin.""" identity = identify(s) if identity == ZHUYIN: return s elif identity == PINYIN: return pinyin_to_zhuyin(s) elif identity == IPA: return ipa_to_zhuyin(s) else: raise ValueError("String is not a valid Chinese transcription.")
[ "def", "to_zhuyin", "(", "s", ")", ":", "identity", "=", "identify", "(", "s", ")", "if", "identity", "==", "ZHUYIN", ":", "return", "s", "elif", "identity", "==", "PINYIN", ":", "return", "pinyin_to_zhuyin", "(", "s", ")", "elif", "identity", "==", "IPA", ":", "return", "ipa_to_zhuyin", "(", "s", ")", "else", ":", "raise", "ValueError", "(", "\"String is not a valid Chinese transcription.\"", ")" ]
Convert *s* to Zhuyin.
[ "Convert", "*", "s", "*", "to", "Zhuyin", "." ]
python
train
bcbio/bcbio-nextgen
bcbio/chipseq/__init__.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/chipseq/__init__.py#L40-L57
def _keep_assembled_chrom(bam_file, genome, config): """Remove contigs from the BAM file""" fai = "%s.fai" % genome chrom = [] with open(fai) as inh: for line in inh: c = line.split("\t")[0] if c.find("_") < 0: chrom.append(c) chroms = " ".join(chrom) out_file = utils.append_stem(bam_file, '_chrom') samtools = config_utils.get_program("samtools", config) if not utils.file_exists(out_file): with file_transaction(out_file) as tx_out: cmd = "{samtools} view -b {bam_file} {chroms} > {tx_out}" do.run(cmd.format(**locals()), "Remove contigs from %s" % bam_file) bam.index(out_file, config) return out_file
[ "def", "_keep_assembled_chrom", "(", "bam_file", ",", "genome", ",", "config", ")", ":", "fai", "=", "\"%s.fai\"", "%", "genome", "chrom", "=", "[", "]", "with", "open", "(", "fai", ")", "as", "inh", ":", "for", "line", "in", "inh", ":", "c", "=", "line", ".", "split", "(", "\"\\t\"", ")", "[", "0", "]", "if", "c", ".", "find", "(", "\"_\"", ")", "<", "0", ":", "chrom", ".", "append", "(", "c", ")", "chroms", "=", "\" \"", ".", "join", "(", "chrom", ")", "out_file", "=", "utils", ".", "append_stem", "(", "bam_file", ",", "'_chrom'", ")", "samtools", "=", "config_utils", ".", "get_program", "(", "\"samtools\"", ",", "config", ")", "if", "not", "utils", ".", "file_exists", "(", "out_file", ")", ":", "with", "file_transaction", "(", "out_file", ")", "as", "tx_out", ":", "cmd", "=", "\"{samtools} view -b {bam_file} {chroms} > {tx_out}\"", "do", ".", "run", "(", "cmd", ".", "format", "(", "*", "*", "locals", "(", ")", ")", ",", "\"Remove contigs from %s\"", "%", "bam_file", ")", "bam", ".", "index", "(", "out_file", ",", "config", ")", "return", "out_file" ]
Remove contigs from the BAM file
[ "Remove", "contigs", "from", "the", "BAM", "file" ]
python
train
lmjohns3/theanets
theanets/layers/base.py
https://github.com/lmjohns3/theanets/blob/79db9f878ef2071f2f576a1cf5d43a752a55894a/theanets/layers/base.py#L396-L430
def add_weights(self, name, nin, nout, mean=0, std=0, sparsity=0, diagonal=0): '''Helper method to create a new weight matrix. Parameters ---------- name : str Name of the parameter to add. nin : int Size of "input" for this weight matrix. nout : int Size of "output" for this weight matrix. mean : float, optional Mean value for randomly-initialized weights. Defaults to 0. std : float, optional Standard deviation of initial matrix values. Defaults to :math:`1 / sqrt(n_i + n_o)`. sparsity : float, optional Fraction of weights to be set to zero. Defaults to 0. diagonal : float, optional Initialize weights to a matrix of zeros with this value along the diagonal. Defaults to None, which initializes all weights randomly. ''' glorot = 1 / np.sqrt(nin + nout) m = self.kwargs.get( 'mean_{}'.format(name), self.kwargs.get('mean', mean)) s = self.kwargs.get( 'std_{}'.format(name), self.kwargs.get('std', std or glorot)) p = self.kwargs.get( 'sparsity_{}'.format(name), self.kwargs.get('sparsity', sparsity)) d = self.kwargs.get( 'diagonal_{}'.format(name), self.kwargs.get('diagonal', diagonal)) self._params.append(theano.shared( util.random_matrix(nin, nout, mean=m, std=s, sparsity=p, diagonal=d, rng=self.rng), name=self._fmt(name)))
[ "def", "add_weights", "(", "self", ",", "name", ",", "nin", ",", "nout", ",", "mean", "=", "0", ",", "std", "=", "0", ",", "sparsity", "=", "0", ",", "diagonal", "=", "0", ")", ":", "glorot", "=", "1", "/", "np", ".", "sqrt", "(", "nin", "+", "nout", ")", "m", "=", "self", ".", "kwargs", ".", "get", "(", "'mean_{}'", ".", "format", "(", "name", ")", ",", "self", ".", "kwargs", ".", "get", "(", "'mean'", ",", "mean", ")", ")", "s", "=", "self", ".", "kwargs", ".", "get", "(", "'std_{}'", ".", "format", "(", "name", ")", ",", "self", ".", "kwargs", ".", "get", "(", "'std'", ",", "std", "or", "glorot", ")", ")", "p", "=", "self", ".", "kwargs", ".", "get", "(", "'sparsity_{}'", ".", "format", "(", "name", ")", ",", "self", ".", "kwargs", ".", "get", "(", "'sparsity'", ",", "sparsity", ")", ")", "d", "=", "self", ".", "kwargs", ".", "get", "(", "'diagonal_{}'", ".", "format", "(", "name", ")", ",", "self", ".", "kwargs", ".", "get", "(", "'diagonal'", ",", "diagonal", ")", ")", "self", ".", "_params", ".", "append", "(", "theano", ".", "shared", "(", "util", ".", "random_matrix", "(", "nin", ",", "nout", ",", "mean", "=", "m", ",", "std", "=", "s", ",", "sparsity", "=", "p", ",", "diagonal", "=", "d", ",", "rng", "=", "self", ".", "rng", ")", ",", "name", "=", "self", ".", "_fmt", "(", "name", ")", ")", ")" ]
Helper method to create a new weight matrix. Parameters ---------- name : str Name of the parameter to add. nin : int Size of "input" for this weight matrix. nout : int Size of "output" for this weight matrix. mean : float, optional Mean value for randomly-initialized weights. Defaults to 0. std : float, optional Standard deviation of initial matrix values. Defaults to :math:`1 / sqrt(n_i + n_o)`. sparsity : float, optional Fraction of weights to be set to zero. Defaults to 0. diagonal : float, optional Initialize weights to a matrix of zeros with this value along the diagonal. Defaults to None, which initializes all weights randomly.
[ "Helper", "method", "to", "create", "a", "new", "weight", "matrix", "." ]
python
test
wandb/client
wandb/vendor/prompt_toolkit/buffer_mapping.py
https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/buffer_mapping.py#L85-L92
def pop_focus(self, cli): """ Pop buffer from the focus stack. """ if len(self.focus_stack) > 1: self.focus_stack.pop() else: raise IndexError('Cannot pop last item from the focus stack.')
[ "def", "pop_focus", "(", "self", ",", "cli", ")", ":", "if", "len", "(", "self", ".", "focus_stack", ")", ">", "1", ":", "self", ".", "focus_stack", ".", "pop", "(", ")", "else", ":", "raise", "IndexError", "(", "'Cannot pop last item from the focus stack.'", ")" ]
Pop buffer from the focus stack.
[ "Pop", "buffer", "from", "the", "focus", "stack", "." ]
python
train
JukeboxPipeline/jukebox-core
src/jukeboxcore/addons/guerilla/guerillamgmt.py
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/addons/guerilla/guerillamgmt.py#L1218-L1230
def prj_create_atype(self, *args, **kwargs): """Create a new project :returns: None :rtype: None :raises: None """ if not self.cur_prj: return atype = self.create_atype(projects=[self.cur_prj]) if atype: atypedata = djitemdata.AtypeItemData(atype) treemodel.TreeItem(atypedata, self.prj_atype_model.root)
[ "def", "prj_create_atype", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "self", ".", "cur_prj", ":", "return", "atype", "=", "self", ".", "create_atype", "(", "projects", "=", "[", "self", ".", "cur_prj", "]", ")", "if", "atype", ":", "atypedata", "=", "djitemdata", ".", "AtypeItemData", "(", "atype", ")", "treemodel", ".", "TreeItem", "(", "atypedata", ",", "self", ".", "prj_atype_model", ".", "root", ")" ]
Create a new project :returns: None :rtype: None :raises: None
[ "Create", "a", "new", "project" ]
python
train
DLR-RM/RAFCON
source/rafcon/core/states/container_state.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/core/states/container_state.py#L904-L934
def related_linkage_states_and_scoped_variables(self, state_ids, scoped_variables): """ TODO: document """ # find all related transitions related_transitions = {'enclosed': [], 'ingoing': [], 'outgoing': []} for t in self.transitions.values(): # check if internal of new hierarchy state if t.from_state in state_ids and t.to_state in state_ids: related_transitions['enclosed'].append(t) elif t.to_state in state_ids: related_transitions['ingoing'].append(t) elif t.from_state in state_ids: related_transitions['outgoing'].append(t) # find all related data flows related_data_flows = {'enclosed': [], 'ingoing': [], 'outgoing': []} for df in self.data_flows.values(): # check if internal of new hierarchy state if df.from_state in state_ids and df.to_state in state_ids or \ df.from_state in state_ids and self.state_id == df.to_state and df.to_key in scoped_variables or \ self.state_id == df.from_state and df.from_key in scoped_variables and df.to_state in state_ids: related_data_flows['enclosed'].append(df) elif df.to_state in state_ids or \ self.state_id == df.to_state and df.to_key in scoped_variables: related_data_flows['ingoing'].append(df) elif df.from_state in state_ids or \ self.state_id == df.from_state and df.from_key in scoped_variables: related_data_flows['outgoing'].append(df) return related_transitions, related_data_flows
[ "def", "related_linkage_states_and_scoped_variables", "(", "self", ",", "state_ids", ",", "scoped_variables", ")", ":", "# find all related transitions", "related_transitions", "=", "{", "'enclosed'", ":", "[", "]", ",", "'ingoing'", ":", "[", "]", ",", "'outgoing'", ":", "[", "]", "}", "for", "t", "in", "self", ".", "transitions", ".", "values", "(", ")", ":", "# check if internal of new hierarchy state", "if", "t", ".", "from_state", "in", "state_ids", "and", "t", ".", "to_state", "in", "state_ids", ":", "related_transitions", "[", "'enclosed'", "]", ".", "append", "(", "t", ")", "elif", "t", ".", "to_state", "in", "state_ids", ":", "related_transitions", "[", "'ingoing'", "]", ".", "append", "(", "t", ")", "elif", "t", ".", "from_state", "in", "state_ids", ":", "related_transitions", "[", "'outgoing'", "]", ".", "append", "(", "t", ")", "# find all related data flows", "related_data_flows", "=", "{", "'enclosed'", ":", "[", "]", ",", "'ingoing'", ":", "[", "]", ",", "'outgoing'", ":", "[", "]", "}", "for", "df", "in", "self", ".", "data_flows", ".", "values", "(", ")", ":", "# check if internal of new hierarchy state", "if", "df", ".", "from_state", "in", "state_ids", "and", "df", ".", "to_state", "in", "state_ids", "or", "df", ".", "from_state", "in", "state_ids", "and", "self", ".", "state_id", "==", "df", ".", "to_state", "and", "df", ".", "to_key", "in", "scoped_variables", "or", "self", ".", "state_id", "==", "df", ".", "from_state", "and", "df", ".", "from_key", "in", "scoped_variables", "and", "df", ".", "to_state", "in", "state_ids", ":", "related_data_flows", "[", "'enclosed'", "]", ".", "append", "(", "df", ")", "elif", "df", ".", "to_state", "in", "state_ids", "or", "self", ".", "state_id", "==", "df", ".", "to_state", "and", "df", ".", "to_key", "in", "scoped_variables", ":", "related_data_flows", "[", "'ingoing'", "]", ".", "append", "(", "df", ")", "elif", "df", ".", "from_state", "in", "state_ids", "or", "self", ".", "state_id", "==", "df", ".", "from_state", "and", "df", ".", "from_key", "in", "scoped_variables", ":", "related_data_flows", "[", "'outgoing'", "]", ".", "append", "(", "df", ")", "return", "related_transitions", ",", "related_data_flows" ]
TODO: document
[ "TODO", ":", "document" ]
python
train
abilian/abilian-core
abilian/core/sqlalchemy.py
https://github.com/abilian/abilian-core/blob/0a71275bf108c3d51e13ca9e093c0249235351e3/abilian/core/sqlalchemy.py#L187-L196
def coerce(cls, key, value): """Convert list to MutationList.""" if not isinstance(value, MutationList): if isinstance(value, list): return MutationList(value) # this call will raise ValueError return Mutable.coerce(key, value) else: return value
[ "def", "coerce", "(", "cls", ",", "key", ",", "value", ")", ":", "if", "not", "isinstance", "(", "value", ",", "MutationList", ")", ":", "if", "isinstance", "(", "value", ",", "list", ")", ":", "return", "MutationList", "(", "value", ")", "# this call will raise ValueError", "return", "Mutable", ".", "coerce", "(", "key", ",", "value", ")", "else", ":", "return", "value" ]
Convert list to MutationList.
[ "Convert", "list", "to", "MutationList", "." ]
python
train
JamesRamm/longclaw
longclaw/basket/forms.py
https://github.com/JamesRamm/longclaw/blob/8bbf2e6d703271b815ec111813c7c5d1d4e4e810/longclaw/basket/forms.py#L13-L19
def clean(self): """ Check user has cookies enabled """ if self.request: if not self.request.session.test_cookie_worked(): raise forms.ValidationError("Cookies must be enabled.") return self.cleaned_data
[ "def", "clean", "(", "self", ")", ":", "if", "self", ".", "request", ":", "if", "not", "self", ".", "request", ".", "session", ".", "test_cookie_worked", "(", ")", ":", "raise", "forms", ".", "ValidationError", "(", "\"Cookies must be enabled.\"", ")", "return", "self", ".", "cleaned_data" ]
Check user has cookies enabled
[ "Check", "user", "has", "cookies", "enabled" ]
python
train
WalletGuild/desw
desw/server.py
https://github.com/WalletGuild/desw/blob/f966c612e675961d9dbd8268749e349ba10a47c2/desw/server.py#L51-L58
def get_user_by_key(app, key): """ An SQLAlchemy User getting function. Get a user by public key. :param str key: the public key the user belongs to """ user = ses.query(um.User).join(um.UserKey).filter(um.UserKey.key==key).first() return user
[ "def", "get_user_by_key", "(", "app", ",", "key", ")", ":", "user", "=", "ses", ".", "query", "(", "um", ".", "User", ")", ".", "join", "(", "um", ".", "UserKey", ")", ".", "filter", "(", "um", ".", "UserKey", ".", "key", "==", "key", ")", ".", "first", "(", ")", "return", "user" ]
An SQLAlchemy User getting function. Get a user by public key. :param str key: the public key the user belongs to
[ "An", "SQLAlchemy", "User", "getting", "function", ".", "Get", "a", "user", "by", "public", "key", "." ]
python
train
bcbio/bcbio-nextgen
bcbio/server/main.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/server/main.py#L8-L15
def start(args): """Run server with provided command line arguments. """ application = tornado.web.Application([(r"/run", run.get_handler(args)), (r"/status", run.StatusHandler)]) application.runmonitor = RunMonitor() application.listen(args.port) tornado.ioloop.IOLoop.instance().start()
[ "def", "start", "(", "args", ")", ":", "application", "=", "tornado", ".", "web", ".", "Application", "(", "[", "(", "r\"/run\"", ",", "run", ".", "get_handler", "(", "args", ")", ")", ",", "(", "r\"/status\"", ",", "run", ".", "StatusHandler", ")", "]", ")", "application", ".", "runmonitor", "=", "RunMonitor", "(", ")", "application", ".", "listen", "(", "args", ".", "port", ")", "tornado", ".", "ioloop", ".", "IOLoop", ".", "instance", "(", ")", ".", "start", "(", ")" ]
Run server with provided command line arguments.
[ "Run", "server", "with", "provided", "command", "line", "arguments", "." ]
python
train
softlayer/softlayer-python
SoftLayer/CLI/cdn/origin_list.py
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/CLI/cdn/origin_list.py#L14-L28
def cli(env, account_id): """List origin pull mappings.""" manager = SoftLayer.CDNManager(env.client) origins = manager.get_origins(account_id) table = formatting.Table(['id', 'media_type', 'cname', 'origin_url']) for origin in origins: table.add_row([origin['id'], origin['mediaType'], origin.get('cname', formatting.blank()), origin['originUrl']]) env.fout(table)
[ "def", "cli", "(", "env", ",", "account_id", ")", ":", "manager", "=", "SoftLayer", ".", "CDNManager", "(", "env", ".", "client", ")", "origins", "=", "manager", ".", "get_origins", "(", "account_id", ")", "table", "=", "formatting", ".", "Table", "(", "[", "'id'", ",", "'media_type'", ",", "'cname'", ",", "'origin_url'", "]", ")", "for", "origin", "in", "origins", ":", "table", ".", "add_row", "(", "[", "origin", "[", "'id'", "]", ",", "origin", "[", "'mediaType'", "]", ",", "origin", ".", "get", "(", "'cname'", ",", "formatting", ".", "blank", "(", ")", ")", ",", "origin", "[", "'originUrl'", "]", "]", ")", "env", ".", "fout", "(", "table", ")" ]
List origin pull mappings.
[ "List", "origin", "pull", "mappings", "." ]
python
train
kislyuk/aegea
aegea/audit.py
https://github.com/kislyuk/aegea/blob/94957e9dba036eae3052e2662c208b259c08399a/aegea/audit.py#L79-L87
def audit_1_4(self): """1.4 Ensure access keys are rotated every 90 days or less (Scored)""" for row in self.credential_report: for access_key in "1", "2": if json.loads(row["access_key_{}_active".format(access_key)]): last_rotated = row["access_key_{}_last_rotated".format(access_key)] if self.parse_date(last_rotated) < datetime.now(tzutc()) - timedelta(days=90): msg = "Active access key {} in account {} last rotated over 90 days ago" raise Exception(msg.format(access_key, row["user"]))
[ "def", "audit_1_4", "(", "self", ")", ":", "for", "row", "in", "self", ".", "credential_report", ":", "for", "access_key", "in", "\"1\"", ",", "\"2\"", ":", "if", "json", ".", "loads", "(", "row", "[", "\"access_key_{}_active\"", ".", "format", "(", "access_key", ")", "]", ")", ":", "last_rotated", "=", "row", "[", "\"access_key_{}_last_rotated\"", ".", "format", "(", "access_key", ")", "]", "if", "self", ".", "parse_date", "(", "last_rotated", ")", "<", "datetime", ".", "now", "(", "tzutc", "(", ")", ")", "-", "timedelta", "(", "days", "=", "90", ")", ":", "msg", "=", "\"Active access key {} in account {} last rotated over 90 days ago\"", "raise", "Exception", "(", "msg", ".", "format", "(", "access_key", ",", "row", "[", "\"user\"", "]", ")", ")" ]
1.4 Ensure access keys are rotated every 90 days or less (Scored)
[ "1", ".", "4", "Ensure", "access", "keys", "are", "rotated", "every", "90", "days", "or", "less", "(", "Scored", ")" ]
python
train
esheldon/fitsio
fitsio/hdu/table.py
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L562-L606
def read(self, **keys): """ read data from this HDU By default, all data are read. send columns= and rows= to select subsets of the data. Table data are read into a recarray; use read_column() to get a single column as an ordinary array. You can alternatively use slice notation fits=fitsio.FITS(filename) fits[ext][:] fits[ext][2:5] fits[ext][200:235:2] fits[ext][rows] fits[ext][cols][rows] parameters ---------- columns: optional An optional set of columns to read from table HDUs. Default is to read all. Can be string or number. If a sequence, a recarray is always returned. If a scalar, an ordinary array is returned. rows: optional An optional list of rows to read from table HDUS. Default is to read all. vstorage: string, optional Over-ride the default method to store variable length columns. Can be 'fixed' or 'object'. See docs on fitsio.FITS for details. """ columns = keys.get('columns', None) rows = keys.get('rows', None) if columns is not None: if 'columns' in keys: del keys['columns'] data = self.read_columns(columns, **keys) elif rows is not None: if 'rows' in keys: del keys['rows'] data = self.read_rows(rows, **keys) else: data = self._read_all(**keys) return data
[ "def", "read", "(", "self", ",", "*", "*", "keys", ")", ":", "columns", "=", "keys", ".", "get", "(", "'columns'", ",", "None", ")", "rows", "=", "keys", ".", "get", "(", "'rows'", ",", "None", ")", "if", "columns", "is", "not", "None", ":", "if", "'columns'", "in", "keys", ":", "del", "keys", "[", "'columns'", "]", "data", "=", "self", ".", "read_columns", "(", "columns", ",", "*", "*", "keys", ")", "elif", "rows", "is", "not", "None", ":", "if", "'rows'", "in", "keys", ":", "del", "keys", "[", "'rows'", "]", "data", "=", "self", ".", "read_rows", "(", "rows", ",", "*", "*", "keys", ")", "else", ":", "data", "=", "self", ".", "_read_all", "(", "*", "*", "keys", ")", "return", "data" ]
read data from this HDU By default, all data are read. send columns= and rows= to select subsets of the data. Table data are read into a recarray; use read_column() to get a single column as an ordinary array. You can alternatively use slice notation fits=fitsio.FITS(filename) fits[ext][:] fits[ext][2:5] fits[ext][200:235:2] fits[ext][rows] fits[ext][cols][rows] parameters ---------- columns: optional An optional set of columns to read from table HDUs. Default is to read all. Can be string or number. If a sequence, a recarray is always returned. If a scalar, an ordinary array is returned. rows: optional An optional list of rows to read from table HDUS. Default is to read all. vstorage: string, optional Over-ride the default method to store variable length columns. Can be 'fixed' or 'object'. See docs on fitsio.FITS for details.
[ "read", "data", "from", "this", "HDU" ]
python
train
PMEAL/OpenPNM
openpnm/models/phases/viscosity.py
https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/models/phases/viscosity.py#L4-L67
def water(target, temperature='pore.temperature', salinity='pore.salinity'): r""" Calculates viscosity of pure water or seawater at atmospheric pressure using Eq. (22) given by Sharqawy et. al [1]. Values at temperature higher than the normal boiling temperature are calculated at the saturation pressure. Parameters ---------- target : OpenPNM Object The object for which these values are being calculated. This controls the length of the calculated array, and also provides access to other necessary thermofluid properties. temperature : string The dictionary key containing the temperature values. Temperature must be in Kelvin for this emperical equation to work. Can be either a pore or throat array. salinity : string The dictionary key containing the salinity values. Salinity must be expressed in g of salt per kg of solution (ppt). Can be either a pore or throat array, but must be consistent with ``temperature``. Returns ------- mu_sw, the viscosity of water/seawater in [kg/m.s] Notes ----- T must be in K, and S in g of salt per kg of phase, or ppt (parts per thousand) VALIDITY: 273 < T < 453 K; 0 < S < 150 g/kg; ACCURACY: 1.5 % References ---------- [1] Sharqawy M. H., Lienhard J. H., and Zubair, S. M., Desalination and Water Treatment, 2010. """ T = target[temperature] if salinity in target.keys(): S = target[salinity] else: S = 0 TC = T-273.15 S = S/1000 a1 = 1.5700386464E-01 a2 = 6.4992620050E+01 a3 = -9.1296496657E+01 a4 = 4.2844324477E-05 mu_w = a4 + 1/(a1*(TC+a2)**2+a3) a5 = 1.5409136040E+00 a6 = 1.9981117208E-02 a7 = -9.5203865864E-05 a8 = 7.9739318223E+00 a9 = -7.5614568881E-02 a10 = 4.7237011074E-04 A = a5 + a6*T + a7*T**2 B = a8 + a9*T + a10*T**2 mu_sw = mu_w*(1 + A*S + B*S**2) value = mu_sw return value
[ "def", "water", "(", "target", ",", "temperature", "=", "'pore.temperature'", ",", "salinity", "=", "'pore.salinity'", ")", ":", "T", "=", "target", "[", "temperature", "]", "if", "salinity", "in", "target", ".", "keys", "(", ")", ":", "S", "=", "target", "[", "salinity", "]", "else", ":", "S", "=", "0", "TC", "=", "T", "-", "273.15", "S", "=", "S", "/", "1000", "a1", "=", "1.5700386464E-01", "a2", "=", "6.4992620050E+01", "a3", "=", "-", "9.1296496657E+01", "a4", "=", "4.2844324477E-05", "mu_w", "=", "a4", "+", "1", "/", "(", "a1", "*", "(", "TC", "+", "a2", ")", "**", "2", "+", "a3", ")", "a5", "=", "1.5409136040E+00", "a6", "=", "1.9981117208E-02", "a7", "=", "-", "9.5203865864E-05", "a8", "=", "7.9739318223E+00", "a9", "=", "-", "7.5614568881E-02", "a10", "=", "4.7237011074E-04", "A", "=", "a5", "+", "a6", "*", "T", "+", "a7", "*", "T", "**", "2", "B", "=", "a8", "+", "a9", "*", "T", "+", "a10", "*", "T", "**", "2", "mu_sw", "=", "mu_w", "*", "(", "1", "+", "A", "*", "S", "+", "B", "*", "S", "**", "2", ")", "value", "=", "mu_sw", "return", "value" ]
r""" Calculates viscosity of pure water or seawater at atmospheric pressure using Eq. (22) given by Sharqawy et. al [1]. Values at temperature higher than the normal boiling temperature are calculated at the saturation pressure. Parameters ---------- target : OpenPNM Object The object for which these values are being calculated. This controls the length of the calculated array, and also provides access to other necessary thermofluid properties. temperature : string The dictionary key containing the temperature values. Temperature must be in Kelvin for this emperical equation to work. Can be either a pore or throat array. salinity : string The dictionary key containing the salinity values. Salinity must be expressed in g of salt per kg of solution (ppt). Can be either a pore or throat array, but must be consistent with ``temperature``. Returns ------- mu_sw, the viscosity of water/seawater in [kg/m.s] Notes ----- T must be in K, and S in g of salt per kg of phase, or ppt (parts per thousand) VALIDITY: 273 < T < 453 K; 0 < S < 150 g/kg; ACCURACY: 1.5 % References ---------- [1] Sharqawy M. H., Lienhard J. H., and Zubair, S. M., Desalination and Water Treatment, 2010.
[ "r", "Calculates", "viscosity", "of", "pure", "water", "or", "seawater", "at", "atmospheric", "pressure", "using", "Eq", ".", "(", "22", ")", "given", "by", "Sharqawy", "et", ".", "al", "[", "1", "]", ".", "Values", "at", "temperature", "higher", "than", "the", "normal", "boiling", "temperature", "are", "calculated", "at", "the", "saturation", "pressure", "." ]
python
train
ambv/flake8-pyi
pyi.py
https://github.com/ambv/flake8-pyi/blob/19e8028b44b6305dff1bfb9a51a23a029c546993/pyi.py#L29-L43
def ASSIGN(self, node): """This is a custom implementation of ASSIGN derived from handleChildren() in pyflakes 1.3.0. The point here is that on module level, there's type aliases that we want to bind eagerly, but defer computation of the values of the assignments (the type aliases might have forward references). """ if not isinstance(self.scope, ModuleScope): return super().ASSIGN(node) for target in node.targets: self.handleNode(target, node) self.deferHandleNode(node.value, node)
[ "def", "ASSIGN", "(", "self", ",", "node", ")", ":", "if", "not", "isinstance", "(", "self", ".", "scope", ",", "ModuleScope", ")", ":", "return", "super", "(", ")", ".", "ASSIGN", "(", "node", ")", "for", "target", "in", "node", ".", "targets", ":", "self", ".", "handleNode", "(", "target", ",", "node", ")", "self", ".", "deferHandleNode", "(", "node", ".", "value", ",", "node", ")" ]
This is a custom implementation of ASSIGN derived from handleChildren() in pyflakes 1.3.0. The point here is that on module level, there's type aliases that we want to bind eagerly, but defer computation of the values of the assignments (the type aliases might have forward references).
[ "This", "is", "a", "custom", "implementation", "of", "ASSIGN", "derived", "from", "handleChildren", "()", "in", "pyflakes", "1", ".", "3", ".", "0", "." ]
python
train
biolink/ontobio
ontobio/golr/golr_associations.py
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/golr/golr_associations.py#L126-L152
def bulk_fetch(subject_category, object_category, taxon, rows=MAX_ROWS, **kwargs): """ Fetch associations for a species and pair of categories in bulk. Arguments: - subject_category: String (not None) - object_category: String (not None) - taxon: String - rows: int Additionally, any argument for search_associations can be passed """ assert subject_category is not None assert object_category is not None time.sleep(1) logging.info("Bulk query: {} {} {}".format(subject_category, object_category, taxon)) assocs = search_associations_compact(subject_category=subject_category, object_category=object_category, subject_taxon=taxon, rows=rows, iterate=True, **kwargs) logging.info("Rows retrieved: {}".format(len(assocs))) if len(assocs) == 0: logging.error("No associations returned for query: {} {} {}".format(subject_category, object_category, taxon)) return assocs
[ "def", "bulk_fetch", "(", "subject_category", ",", "object_category", ",", "taxon", ",", "rows", "=", "MAX_ROWS", ",", "*", "*", "kwargs", ")", ":", "assert", "subject_category", "is", "not", "None", "assert", "object_category", "is", "not", "None", "time", ".", "sleep", "(", "1", ")", "logging", ".", "info", "(", "\"Bulk query: {} {} {}\"", ".", "format", "(", "subject_category", ",", "object_category", ",", "taxon", ")", ")", "assocs", "=", "search_associations_compact", "(", "subject_category", "=", "subject_category", ",", "object_category", "=", "object_category", ",", "subject_taxon", "=", "taxon", ",", "rows", "=", "rows", ",", "iterate", "=", "True", ",", "*", "*", "kwargs", ")", "logging", ".", "info", "(", "\"Rows retrieved: {}\"", ".", "format", "(", "len", "(", "assocs", ")", ")", ")", "if", "len", "(", "assocs", ")", "==", "0", ":", "logging", ".", "error", "(", "\"No associations returned for query: {} {} {}\"", ".", "format", "(", "subject_category", ",", "object_category", ",", "taxon", ")", ")", "return", "assocs" ]
Fetch associations for a species and pair of categories in bulk. Arguments: - subject_category: String (not None) - object_category: String (not None) - taxon: String - rows: int Additionally, any argument for search_associations can be passed
[ "Fetch", "associations", "for", "a", "species", "and", "pair", "of", "categories", "in", "bulk", "." ]
python
train
knipknap/exscript
Exscript/util/ipv4.py
https://github.com/knipknap/exscript/blob/72718eee3e87b345d5a5255be9824e867e42927b/Exscript/util/ipv4.py#L221-L235
def remote_ip(local_ip): """ Given an IP address, this function calculates the remaining available IP address under the assumption that it is a /30 network. In other words, given one link net address, this function returns the other link net address. :type local_ip: string :param local_ip: An IP address. :rtype: string :return: The other IP address of the link address pair. """ local_ip = ip2int(local_ip) network = local_ip & pfxlen2mask_int(30) return int2ip(network + 3 - (local_ip - network))
[ "def", "remote_ip", "(", "local_ip", ")", ":", "local_ip", "=", "ip2int", "(", "local_ip", ")", "network", "=", "local_ip", "&", "pfxlen2mask_int", "(", "30", ")", "return", "int2ip", "(", "network", "+", "3", "-", "(", "local_ip", "-", "network", ")", ")" ]
Given an IP address, this function calculates the remaining available IP address under the assumption that it is a /30 network. In other words, given one link net address, this function returns the other link net address. :type local_ip: string :param local_ip: An IP address. :rtype: string :return: The other IP address of the link address pair.
[ "Given", "an", "IP", "address", "this", "function", "calculates", "the", "remaining", "available", "IP", "address", "under", "the", "assumption", "that", "it", "is", "a", "/", "30", "network", ".", "In", "other", "words", "given", "one", "link", "net", "address", "this", "function", "returns", "the", "other", "link", "net", "address", "." ]
python
train
Azure/blobxfer
blobxfer/util.py
https://github.com/Azure/blobxfer/blob/3eccbe7530cc6a20ab2d30f9e034b6f021817f34/blobxfer/util.py#L267-L277
def page_align_content_length(length): # type: (int) -> int """Compute page boundary alignment :param int length: content length :rtype: int :return: aligned byte boundary """ mod = length % _PAGEBLOB_BOUNDARY if mod != 0: return length + (_PAGEBLOB_BOUNDARY - mod) return length
[ "def", "page_align_content_length", "(", "length", ")", ":", "# type: (int) -> int", "mod", "=", "length", "%", "_PAGEBLOB_BOUNDARY", "if", "mod", "!=", "0", ":", "return", "length", "+", "(", "_PAGEBLOB_BOUNDARY", "-", "mod", ")", "return", "length" ]
Compute page boundary alignment :param int length: content length :rtype: int :return: aligned byte boundary
[ "Compute", "page", "boundary", "alignment", ":", "param", "int", "length", ":", "content", "length", ":", "rtype", ":", "int", ":", "return", ":", "aligned", "byte", "boundary" ]
python
train
HazyResearch/metal
metal/tuners/tuner.py
https://github.com/HazyResearch/metal/blob/c24e3772e25ac6d0917b8b7af4c1bcb92928f84a/metal/tuners/tuner.py#L258-L351
def config_generator(search_space, max_search, rng, shuffle=True): """Generates config dicts from the given search space Args: search_space: (dict) A dictionary of parameters to search over. See note below for more details. max_search: (int) The maximum number of configurations to search. If max_search is None, do a full grid search of all discrete parameters, filling in range parameters as needed. Otherwise, do a full grid search of all discrete parameters and then cycle through again filling in new range parameters values; if there are no range parameters, stop after yielding the full cross product of parameters once. shuffle: (bool) If True, shuffle the order of generated configs Yields: configs: each config is a dict of parameter values based on the provided search space The search_space dictionary may consist of two types of parameters: --discrete: a discrete parameter is either a single value or a list of values. Use single values, for example, to override a default model parameter or set a flag such as 'verbose'=True. --range: a range parameter is a dict of the form: {'range': [<min>, <max>], 'scale': <scale>} where <min> and <max> are the min/max values to search between and scale is one of ['linear', 'log'] (defaulting to 'linear') representing the scale to use when searching the given range Example: search_space = { 'verbose': True, # discrete 'n_epochs': 100, # discrete 'momentum': [0.0, 0.9, 0.99], # discrete 'l2': {'range': [0.0001, 10]} # linear range 'lr': {'range': [0.001, 1], 'scale': 'log'}, # log range } If max_search is None, this will return 3 configurations (enough to just cover the full cross-product of discrete values, filled in with sampled range values) Otherewise, this will return max_search configurations (cycling through the discrete value combinations multiple time if necessary) """ def dict_product(d): keys = d.keys() for element in product(*d.values()): yield dict(zip(keys, element)) def range_param_func(v): scale = v.get("scale", "linear") mini = min(v["range"]) maxi = max(v["range"]) if scale == "linear": func = lambda rand: mini + (maxi - mini) * rand elif scale == "log": mini = np.log(mini) maxi = np.log(maxi) func = lambda rand: np.exp(mini + (maxi - mini) * rand) else: raise ValueError(f"Unrecognized scale '{scale}' for " "parameter {k}") return func discretes = {} ranges = {} for k, v in search_space.items(): if isinstance(v, dict): ranges[k] = range_param_func(v) elif isinstance(v, list): discretes[k] = v else: discretes[k] = [v] discrete_configs = list(dict_product(discretes)) if shuffle: rng.shuffle(discrete_configs) # If there are range parameters and a non-None max_search, cycle # through the discrete_configs (with new range values) until # max_search is met if ranges and max_search: discrete_configs = cycle(discrete_configs) for i, config in enumerate(discrete_configs): # We may see the same config twice due to cycle config = config.copy() if max_search and i == max_search: break for k, v in ranges.items(): config[k] = float(v(rng.random())) yield config
[ "def", "config_generator", "(", "search_space", ",", "max_search", ",", "rng", ",", "shuffle", "=", "True", ")", ":", "def", "dict_product", "(", "d", ")", ":", "keys", "=", "d", ".", "keys", "(", ")", "for", "element", "in", "product", "(", "*", "d", ".", "values", "(", ")", ")", ":", "yield", "dict", "(", "zip", "(", "keys", ",", "element", ")", ")", "def", "range_param_func", "(", "v", ")", ":", "scale", "=", "v", ".", "get", "(", "\"scale\"", ",", "\"linear\"", ")", "mini", "=", "min", "(", "v", "[", "\"range\"", "]", ")", "maxi", "=", "max", "(", "v", "[", "\"range\"", "]", ")", "if", "scale", "==", "\"linear\"", ":", "func", "=", "lambda", "rand", ":", "mini", "+", "(", "maxi", "-", "mini", ")", "*", "rand", "elif", "scale", "==", "\"log\"", ":", "mini", "=", "np", ".", "log", "(", "mini", ")", "maxi", "=", "np", ".", "log", "(", "maxi", ")", "func", "=", "lambda", "rand", ":", "np", ".", "exp", "(", "mini", "+", "(", "maxi", "-", "mini", ")", "*", "rand", ")", "else", ":", "raise", "ValueError", "(", "f\"Unrecognized scale '{scale}' for \"", "\"parameter {k}\"", ")", "return", "func", "discretes", "=", "{", "}", "ranges", "=", "{", "}", "for", "k", ",", "v", "in", "search_space", ".", "items", "(", ")", ":", "if", "isinstance", "(", "v", ",", "dict", ")", ":", "ranges", "[", "k", "]", "=", "range_param_func", "(", "v", ")", "elif", "isinstance", "(", "v", ",", "list", ")", ":", "discretes", "[", "k", "]", "=", "v", "else", ":", "discretes", "[", "k", "]", "=", "[", "v", "]", "discrete_configs", "=", "list", "(", "dict_product", "(", "discretes", ")", ")", "if", "shuffle", ":", "rng", ".", "shuffle", "(", "discrete_configs", ")", "# If there are range parameters and a non-None max_search, cycle", "# through the discrete_configs (with new range values) until", "# max_search is met", "if", "ranges", "and", "max_search", ":", "discrete_configs", "=", "cycle", "(", "discrete_configs", ")", "for", "i", ",", "config", "in", "enumerate", "(", "discrete_configs", ")", ":", "# We may see the same config twice due to cycle", "config", "=", "config", ".", "copy", "(", ")", "if", "max_search", "and", "i", "==", "max_search", ":", "break", "for", "k", ",", "v", "in", "ranges", ".", "items", "(", ")", ":", "config", "[", "k", "]", "=", "float", "(", "v", "(", "rng", ".", "random", "(", ")", ")", ")", "yield", "config" ]
Generates config dicts from the given search space Args: search_space: (dict) A dictionary of parameters to search over. See note below for more details. max_search: (int) The maximum number of configurations to search. If max_search is None, do a full grid search of all discrete parameters, filling in range parameters as needed. Otherwise, do a full grid search of all discrete parameters and then cycle through again filling in new range parameters values; if there are no range parameters, stop after yielding the full cross product of parameters once. shuffle: (bool) If True, shuffle the order of generated configs Yields: configs: each config is a dict of parameter values based on the provided search space The search_space dictionary may consist of two types of parameters: --discrete: a discrete parameter is either a single value or a list of values. Use single values, for example, to override a default model parameter or set a flag such as 'verbose'=True. --range: a range parameter is a dict of the form: {'range': [<min>, <max>], 'scale': <scale>} where <min> and <max> are the min/max values to search between and scale is one of ['linear', 'log'] (defaulting to 'linear') representing the scale to use when searching the given range Example: search_space = { 'verbose': True, # discrete 'n_epochs': 100, # discrete 'momentum': [0.0, 0.9, 0.99], # discrete 'l2': {'range': [0.0001, 10]} # linear range 'lr': {'range': [0.001, 1], 'scale': 'log'}, # log range } If max_search is None, this will return 3 configurations (enough to just cover the full cross-product of discrete values, filled in with sampled range values) Otherewise, this will return max_search configurations (cycling through the discrete value combinations multiple time if necessary)
[ "Generates", "config", "dicts", "from", "the", "given", "search", "space" ]
python
train
iotile/coretools
transport_plugins/awsiot/iotile_transport_awsiot/connection_manager.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/transport_plugins/awsiot/iotile_transport_awsiot/connection_manager.py#L205-L234
def _get_connection(self, conn_or_int_id): """Get the data for a connection by either conn_id or internal_id Args: conn_or_int_id (int, string): The external integer connection id or and internal string connection id Returns: dict: The context data associated with that connection or None if it cannot be found. Raises: ArgumentError: When the key is not found in the list of active connections or is invalid. """ key = conn_or_int_id if isinstance(key, str): table = self._int_connections elif isinstance(key, int): table = self._connections else: return None try: data = table[key] except KeyError: return None return data
[ "def", "_get_connection", "(", "self", ",", "conn_or_int_id", ")", ":", "key", "=", "conn_or_int_id", "if", "isinstance", "(", "key", ",", "str", ")", ":", "table", "=", "self", ".", "_int_connections", "elif", "isinstance", "(", "key", ",", "int", ")", ":", "table", "=", "self", ".", "_connections", "else", ":", "return", "None", "try", ":", "data", "=", "table", "[", "key", "]", "except", "KeyError", ":", "return", "None", "return", "data" ]
Get the data for a connection by either conn_id or internal_id Args: conn_or_int_id (int, string): The external integer connection id or and internal string connection id Returns: dict: The context data associated with that connection or None if it cannot be found. Raises: ArgumentError: When the key is not found in the list of active connections or is invalid.
[ "Get", "the", "data", "for", "a", "connection", "by", "either", "conn_id", "or", "internal_id" ]
python
train
Kortemme-Lab/klab
klab/bio/bonsai.py
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/bonsai.py#L506-L544
def find_sidechain_atoms_within_radius_of_residue_objects(self, source_residues, search_radius, find_ATOM_atoms = True, find_HETATM_atoms = False, restrict_to_CA = False): '''for residue in source_residues: for all heavy atoms in residue find all heavy atoms within radius which are within residues (ATOM records) for all heavy atoms found determing the associated residue for all found residues not in source_residues identify all non-backbone atoms return the non-backbone atoms''' atom_hit_cache = set() for residue in source_residues: if find_ATOM_atoms: for aatom in residue.get('ATOM'): self.find_heavy_atoms_near_atom(aatom, search_radius, atom_hit_cache = atom_hit_cache, restrict_to_CA = restrict_to_CA) if find_HETATM_atoms: for hatom in residue.get('HETATM'): self.find_heavy_atoms_near_atom(hatom, search_radius, atom_hit_cache = atom_hit_cache, restrict_to_CA = restrict_to_CA) # Get the list of source_residues loop_residue_ids = set() for sres in source_residues: loop_residue_ids.add(sres.id()) # Get the list of atoms to be removed (all sidechain atoms - including non-heavy atoms - of the found residues which are not in source_residues) sidechain_atom_serial_numbers = set() nearby_residues = set() nearby_residue_ids = set() for a in atom_hit_cache: residue_id = a.residue.id() if residue_id not in loop_residue_ids: nearby_residues.add(a.residue) nearby_residue_ids.add(residue_id) for nearby_residue in nearby_residues: for aatom in nearby_residue.get('ATOM'): if aatom.name not in backbone_atoms: sidechain_atom_serial_numbers.add(aatom.serial_number) assert(len(nearby_residue_ids.intersection(loop_residue_ids)) == 0) return sidechain_atom_serial_numbers
[ "def", "find_sidechain_atoms_within_radius_of_residue_objects", "(", "self", ",", "source_residues", ",", "search_radius", ",", "find_ATOM_atoms", "=", "True", ",", "find_HETATM_atoms", "=", "False", ",", "restrict_to_CA", "=", "False", ")", ":", "atom_hit_cache", "=", "set", "(", ")", "for", "residue", "in", "source_residues", ":", "if", "find_ATOM_atoms", ":", "for", "aatom", "in", "residue", ".", "get", "(", "'ATOM'", ")", ":", "self", ".", "find_heavy_atoms_near_atom", "(", "aatom", ",", "search_radius", ",", "atom_hit_cache", "=", "atom_hit_cache", ",", "restrict_to_CA", "=", "restrict_to_CA", ")", "if", "find_HETATM_atoms", ":", "for", "hatom", "in", "residue", ".", "get", "(", "'HETATM'", ")", ":", "self", ".", "find_heavy_atoms_near_atom", "(", "hatom", ",", "search_radius", ",", "atom_hit_cache", "=", "atom_hit_cache", ",", "restrict_to_CA", "=", "restrict_to_CA", ")", "# Get the list of source_residues", "loop_residue_ids", "=", "set", "(", ")", "for", "sres", "in", "source_residues", ":", "loop_residue_ids", ".", "add", "(", "sres", ".", "id", "(", ")", ")", "# Get the list of atoms to be removed (all sidechain atoms - including non-heavy atoms - of the found residues which are not in source_residues)", "sidechain_atom_serial_numbers", "=", "set", "(", ")", "nearby_residues", "=", "set", "(", ")", "nearby_residue_ids", "=", "set", "(", ")", "for", "a", "in", "atom_hit_cache", ":", "residue_id", "=", "a", ".", "residue", ".", "id", "(", ")", "if", "residue_id", "not", "in", "loop_residue_ids", ":", "nearby_residues", ".", "add", "(", "a", ".", "residue", ")", "nearby_residue_ids", ".", "add", "(", "residue_id", ")", "for", "nearby_residue", "in", "nearby_residues", ":", "for", "aatom", "in", "nearby_residue", ".", "get", "(", "'ATOM'", ")", ":", "if", "aatom", ".", "name", "not", "in", "backbone_atoms", ":", "sidechain_atom_serial_numbers", ".", "add", "(", "aatom", ".", "serial_number", ")", "assert", "(", "len", "(", "nearby_residue_ids", ".", "intersection", "(", "loop_residue_ids", ")", ")", "==", "0", ")", "return", "sidechain_atom_serial_numbers" ]
for residue in source_residues: for all heavy atoms in residue find all heavy atoms within radius which are within residues (ATOM records) for all heavy atoms found determing the associated residue for all found residues not in source_residues identify all non-backbone atoms return the non-backbone atoms
[ "for", "residue", "in", "source_residues", ":", "for", "all", "heavy", "atoms", "in", "residue", "find", "all", "heavy", "atoms", "within", "radius", "which", "are", "within", "residues", "(", "ATOM", "records", ")", "for", "all", "heavy", "atoms", "found", "determing", "the", "associated", "residue", "for", "all", "found", "residues", "not", "in", "source_residues", "identify", "all", "non", "-", "backbone", "atoms", "return", "the", "non", "-", "backbone", "atoms" ]
python
train
csparpa/pyowm
pyowm/stationsapi30/stations_manager.py
https://github.com/csparpa/pyowm/blob/cdd59eb72f32f7238624ceef9b2e2329a5ebd472/pyowm/stationsapi30/stations_manager.py#L138-L153
def send_measurement(self, measurement): """ Posts the provided Measurement object's data to the Station API. :param measurement: the *pyowm.stationsapi30.measurement.Measurement* object to be posted :type measurement: *pyowm.stationsapi30.measurement.Measurement* instance :returns: `None` if creation is successful, an exception otherwise """ assert measurement is not None assert measurement.station_id is not None status, _ = self.http_client.post( MEASUREMENTS_URI, params={'appid': self.API_key}, data=[self._structure_dict(measurement)], headers={'Content-Type': 'application/json'})
[ "def", "send_measurement", "(", "self", ",", "measurement", ")", ":", "assert", "measurement", "is", "not", "None", "assert", "measurement", ".", "station_id", "is", "not", "None", "status", ",", "_", "=", "self", ".", "http_client", ".", "post", "(", "MEASUREMENTS_URI", ",", "params", "=", "{", "'appid'", ":", "self", ".", "API_key", "}", ",", "data", "=", "[", "self", ".", "_structure_dict", "(", "measurement", ")", "]", ",", "headers", "=", "{", "'Content-Type'", ":", "'application/json'", "}", ")" ]
Posts the provided Measurement object's data to the Station API. :param measurement: the *pyowm.stationsapi30.measurement.Measurement* object to be posted :type measurement: *pyowm.stationsapi30.measurement.Measurement* instance :returns: `None` if creation is successful, an exception otherwise
[ "Posts", "the", "provided", "Measurement", "object", "s", "data", "to", "the", "Station", "API", "." ]
python
train
rosenbrockc/fortpy
fortpy/parsers/executable.py
https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/parsers/executable.py#L348-L362
def _remove_dependency(self, dependlist, i, isSubroutine, anexec): """Removes the specified dependency from the executable if it exists and matches the call signature.""" if dependlist[i] in anexec.dependencies: all_depends = anexec.dependencies[dependlist[i]] if len(all_depends) > 0: clean_args = all_depends[0].clean(dependlist[i + 1]) for idepend in range(len(all_depends)): #Make sure we match across all relevant parameters if (all_depends[idepend].argslist == clean_args and all_depends[idepend].isSubroutine == isSubroutine): del anexec.dependencies[dependlist[i]][idepend] #We only need to delete one, even if there are multiple #identical calls from elsewhere in the body. break
[ "def", "_remove_dependency", "(", "self", ",", "dependlist", ",", "i", ",", "isSubroutine", ",", "anexec", ")", ":", "if", "dependlist", "[", "i", "]", "in", "anexec", ".", "dependencies", ":", "all_depends", "=", "anexec", ".", "dependencies", "[", "dependlist", "[", "i", "]", "]", "if", "len", "(", "all_depends", ")", ">", "0", ":", "clean_args", "=", "all_depends", "[", "0", "]", ".", "clean", "(", "dependlist", "[", "i", "+", "1", "]", ")", "for", "idepend", "in", "range", "(", "len", "(", "all_depends", ")", ")", ":", "#Make sure we match across all relevant parameters", "if", "(", "all_depends", "[", "idepend", "]", ".", "argslist", "==", "clean_args", "and", "all_depends", "[", "idepend", "]", ".", "isSubroutine", "==", "isSubroutine", ")", ":", "del", "anexec", ".", "dependencies", "[", "dependlist", "[", "i", "]", "]", "[", "idepend", "]", "#We only need to delete one, even if there are multiple", "#identical calls from elsewhere in the body.", "break" ]
Removes the specified dependency from the executable if it exists and matches the call signature.
[ "Removes", "the", "specified", "dependency", "from", "the", "executable", "if", "it", "exists", "and", "matches", "the", "call", "signature", "." ]
python
train
bcbio/bcbio-nextgen
bcbio/ngsalign/alignprep.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/alignprep.py#L627-L657
def _bgzip_from_fastq(data): """Prepare a bgzipped file from a fastq input, potentially gzipped (or bgzipped already). """ in_file = data["in_file"] if isinstance(in_file, (list, tuple)): in_file = in_file[0] needs_convert = dd.get_quality_format(data).lower() == "illumina" # special case, empty files that have been cleaned if not objectstore.is_remote(in_file) and os.path.getsize(in_file) == 0: needs_bgzip, needs_gunzip = False, False elif in_file.endswith(".gz") and not objectstore.is_remote(in_file): if needs_convert or dd.get_trim_ends(data): needs_bgzip, needs_gunzip = True, True else: needs_bgzip, needs_gunzip = _check_gzipped_input(in_file, data) elif in_file.endswith(".bz2"): needs_bgzip, needs_gunzip = True, True elif objectstore.is_remote(in_file) and not tz.get_in(["config", "algorithm", "align_split_size"], data): needs_bgzip, needs_gunzip = False, False else: needs_bgzip, needs_gunzip = True, False work_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "align_prep")) if (needs_bgzip or needs_gunzip or needs_convert or dd.get_trim_ends(data) or objectstore.is_remote(in_file) or (isinstance(data["in_file"], (tuple, list)) and len(data["in_file"]) > 1)): out_file = _bgzip_file(data["in_file"], data["config"], work_dir, needs_bgzip, needs_gunzip, needs_convert, data) else: out_file = os.path.join(work_dir, "%s_%s" % (dd.get_sample_name(data), os.path.basename(in_file))) out_file = _symlink_or_copy_grabix(in_file, out_file, data) return out_file
[ "def", "_bgzip_from_fastq", "(", "data", ")", ":", "in_file", "=", "data", "[", "\"in_file\"", "]", "if", "isinstance", "(", "in_file", ",", "(", "list", ",", "tuple", ")", ")", ":", "in_file", "=", "in_file", "[", "0", "]", "needs_convert", "=", "dd", ".", "get_quality_format", "(", "data", ")", ".", "lower", "(", ")", "==", "\"illumina\"", "# special case, empty files that have been cleaned", "if", "not", "objectstore", ".", "is_remote", "(", "in_file", ")", "and", "os", ".", "path", ".", "getsize", "(", "in_file", ")", "==", "0", ":", "needs_bgzip", ",", "needs_gunzip", "=", "False", ",", "False", "elif", "in_file", ".", "endswith", "(", "\".gz\"", ")", "and", "not", "objectstore", ".", "is_remote", "(", "in_file", ")", ":", "if", "needs_convert", "or", "dd", ".", "get_trim_ends", "(", "data", ")", ":", "needs_bgzip", ",", "needs_gunzip", "=", "True", ",", "True", "else", ":", "needs_bgzip", ",", "needs_gunzip", "=", "_check_gzipped_input", "(", "in_file", ",", "data", ")", "elif", "in_file", ".", "endswith", "(", "\".bz2\"", ")", ":", "needs_bgzip", ",", "needs_gunzip", "=", "True", ",", "True", "elif", "objectstore", ".", "is_remote", "(", "in_file", ")", "and", "not", "tz", ".", "get_in", "(", "[", "\"config\"", ",", "\"algorithm\"", ",", "\"align_split_size\"", "]", ",", "data", ")", ":", "needs_bgzip", ",", "needs_gunzip", "=", "False", ",", "False", "else", ":", "needs_bgzip", ",", "needs_gunzip", "=", "True", ",", "False", "work_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "data", "[", "\"dirs\"", "]", "[", "\"work\"", "]", ",", "\"align_prep\"", ")", ")", "if", "(", "needs_bgzip", "or", "needs_gunzip", "or", "needs_convert", "or", "dd", ".", "get_trim_ends", "(", "data", ")", "or", "objectstore", ".", "is_remote", "(", "in_file", ")", "or", "(", "isinstance", "(", "data", "[", "\"in_file\"", "]", ",", "(", "tuple", ",", "list", ")", ")", "and", "len", "(", "data", "[", "\"in_file\"", "]", ")", ">", "1", ")", ")", ":", "out_file", "=", "_bgzip_file", "(", "data", "[", "\"in_file\"", "]", ",", "data", "[", "\"config\"", "]", ",", "work_dir", ",", "needs_bgzip", ",", "needs_gunzip", ",", "needs_convert", ",", "data", ")", "else", ":", "out_file", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"%s_%s\"", "%", "(", "dd", ".", "get_sample_name", "(", "data", ")", ",", "os", ".", "path", ".", "basename", "(", "in_file", ")", ")", ")", "out_file", "=", "_symlink_or_copy_grabix", "(", "in_file", ",", "out_file", ",", "data", ")", "return", "out_file" ]
Prepare a bgzipped file from a fastq input, potentially gzipped (or bgzipped already).
[ "Prepare", "a", "bgzipped", "file", "from", "a", "fastq", "input", "potentially", "gzipped", "(", "or", "bgzipped", "already", ")", "." ]
python
train
yinkaisheng/Python-UIAutomation-for-Windows
uiautomation/uiautomation.py
https://github.com/yinkaisheng/Python-UIAutomation-for-Windows/blob/2cc91060982cc8b777152e698d677cc2989bf263/uiautomation/uiautomation.py#L3056-L3066
def GetPixelColorsHorizontally(self, x: int, y: int, count: int) -> ctypes.Array: """ x: int. y: int. count: int. Return `ctypes.Array`, an iterable array of int values in argb form point x,y horizontally. """ arrayType = ctypes.c_uint32 * count values = arrayType() _DllClient.instance().dll.BitmapGetPixelsHorizontally(ctypes.c_size_t(self._bitmap), x, y, values, count) return values
[ "def", "GetPixelColorsHorizontally", "(", "self", ",", "x", ":", "int", ",", "y", ":", "int", ",", "count", ":", "int", ")", "->", "ctypes", ".", "Array", ":", "arrayType", "=", "ctypes", ".", "c_uint32", "*", "count", "values", "=", "arrayType", "(", ")", "_DllClient", ".", "instance", "(", ")", ".", "dll", ".", "BitmapGetPixelsHorizontally", "(", "ctypes", ".", "c_size_t", "(", "self", ".", "_bitmap", ")", ",", "x", ",", "y", ",", "values", ",", "count", ")", "return", "values" ]
x: int. y: int. count: int. Return `ctypes.Array`, an iterable array of int values in argb form point x,y horizontally.
[ "x", ":", "int", ".", "y", ":", "int", ".", "count", ":", "int", ".", "Return", "ctypes", ".", "Array", "an", "iterable", "array", "of", "int", "values", "in", "argb", "form", "point", "x", "y", "horizontally", "." ]
python
valid
mikedh/trimesh
trimesh/intersections.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/intersections.py#L340-L386
def planes_lines(plane_origins, plane_normals, line_origins, line_directions): """ Given one line per plane, find the intersection points. Parameters ----------- plane_origins : (n,3) float Point on each plane plane_normals : (n,3) float Normal vector of each plane line_origins : (n,3) float Point at origin of each line line_directions : (n,3) float Direction vector of each line Returns ---------- on_plane : (n,3) float Points on specified planes valid : (n,) bool Did plane intersect line or not """ # check input types plane_origins = np.asanyarray(plane_origins, dtype=np.float64) plane_normals = np.asanyarray(plane_normals, dtype=np.float64) line_origins = np.asanyarray(line_origins, dtype=np.float64) line_directions = np.asanyarray(line_directions, dtype=np.float64) # vector from line to plane origin_vectors = plane_origins - line_origins projection_ori = util.diagonal_dot(origin_vectors, plane_normals) projection_dir = util.diagonal_dot(line_directions, plane_normals) valid = np.abs(projection_dir) > tol.merge distance = np.divide(projection_ori[valid], projection_dir[valid]) on_plane = line_directions[valid] * distance.reshape((-1, 1)) on_plane += line_origins[valid] return on_plane, valid
[ "def", "planes_lines", "(", "plane_origins", ",", "plane_normals", ",", "line_origins", ",", "line_directions", ")", ":", "# check input types", "plane_origins", "=", "np", ".", "asanyarray", "(", "plane_origins", ",", "dtype", "=", "np", ".", "float64", ")", "plane_normals", "=", "np", ".", "asanyarray", "(", "plane_normals", ",", "dtype", "=", "np", ".", "float64", ")", "line_origins", "=", "np", ".", "asanyarray", "(", "line_origins", ",", "dtype", "=", "np", ".", "float64", ")", "line_directions", "=", "np", ".", "asanyarray", "(", "line_directions", ",", "dtype", "=", "np", ".", "float64", ")", "# vector from line to plane", "origin_vectors", "=", "plane_origins", "-", "line_origins", "projection_ori", "=", "util", ".", "diagonal_dot", "(", "origin_vectors", ",", "plane_normals", ")", "projection_dir", "=", "util", ".", "diagonal_dot", "(", "line_directions", ",", "plane_normals", ")", "valid", "=", "np", ".", "abs", "(", "projection_dir", ")", ">", "tol", ".", "merge", "distance", "=", "np", ".", "divide", "(", "projection_ori", "[", "valid", "]", ",", "projection_dir", "[", "valid", "]", ")", "on_plane", "=", "line_directions", "[", "valid", "]", "*", "distance", ".", "reshape", "(", "(", "-", "1", ",", "1", ")", ")", "on_plane", "+=", "line_origins", "[", "valid", "]", "return", "on_plane", ",", "valid" ]
Given one line per plane, find the intersection points. Parameters ----------- plane_origins : (n,3) float Point on each plane plane_normals : (n,3) float Normal vector of each plane line_origins : (n,3) float Point at origin of each line line_directions : (n,3) float Direction vector of each line Returns ---------- on_plane : (n,3) float Points on specified planes valid : (n,) bool Did plane intersect line or not
[ "Given", "one", "line", "per", "plane", "find", "the", "intersection", "points", "." ]
python
train
fermiPy/fermipy
fermipy/castro.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/castro.py#L404-L441
def create_from_table(cls, tab_e): """ Parameters ---------- tab_e : `~astropy.table.Table` EBOUNDS table. """ convert_sed_cols(tab_e) try: emin = np.array(tab_e['e_min'].to(u.MeV)) emax = np.array(tab_e['e_max'].to(u.MeV)) except: emin = np.array(tab_e['e_min']) emax = np.array(tab_e['e_max']) ne = len(emin) try: ref_dnde = np.array(tab_e['ref_dnde']) except: ref_dnde = np.ones((ne)) try: ref_flux = np.array(tab_e['ref_flux']) except: ref_flux = np.ones((ne)) try: ref_eflux = np.array(tab_e['ref_eflux']) except: ref_eflux = np.ones((ne)) try: ref_npred = np.array(tab_e['ref_npred']) except: ref_npred = np.ones((ne)) return cls(emin, emax, ref_dnde, ref_flux, ref_eflux, ref_npred)
[ "def", "create_from_table", "(", "cls", ",", "tab_e", ")", ":", "convert_sed_cols", "(", "tab_e", ")", "try", ":", "emin", "=", "np", ".", "array", "(", "tab_e", "[", "'e_min'", "]", ".", "to", "(", "u", ".", "MeV", ")", ")", "emax", "=", "np", ".", "array", "(", "tab_e", "[", "'e_max'", "]", ".", "to", "(", "u", ".", "MeV", ")", ")", "except", ":", "emin", "=", "np", ".", "array", "(", "tab_e", "[", "'e_min'", "]", ")", "emax", "=", "np", ".", "array", "(", "tab_e", "[", "'e_max'", "]", ")", "ne", "=", "len", "(", "emin", ")", "try", ":", "ref_dnde", "=", "np", ".", "array", "(", "tab_e", "[", "'ref_dnde'", "]", ")", "except", ":", "ref_dnde", "=", "np", ".", "ones", "(", "(", "ne", ")", ")", "try", ":", "ref_flux", "=", "np", ".", "array", "(", "tab_e", "[", "'ref_flux'", "]", ")", "except", ":", "ref_flux", "=", "np", ".", "ones", "(", "(", "ne", ")", ")", "try", ":", "ref_eflux", "=", "np", ".", "array", "(", "tab_e", "[", "'ref_eflux'", "]", ")", "except", ":", "ref_eflux", "=", "np", ".", "ones", "(", "(", "ne", ")", ")", "try", ":", "ref_npred", "=", "np", ".", "array", "(", "tab_e", "[", "'ref_npred'", "]", ")", "except", ":", "ref_npred", "=", "np", ".", "ones", "(", "(", "ne", ")", ")", "return", "cls", "(", "emin", ",", "emax", ",", "ref_dnde", ",", "ref_flux", ",", "ref_eflux", ",", "ref_npred", ")" ]
Parameters ---------- tab_e : `~astropy.table.Table` EBOUNDS table.
[ "Parameters", "----------", "tab_e", ":", "~astropy", ".", "table", ".", "Table", "EBOUNDS", "table", "." ]
python
train
algorithmiaio/algorithmia-python
Algorithmia/datadirectory.py
https://github.com/algorithmiaio/algorithmia-python/blob/fe33e6524272ff7ca11c43d1d6985890e6c48a79/Algorithmia/datadirectory.py#L40-L48
def create(self, acl=None): '''Creates a directory, optionally include Acl argument to set permissions''' parent, name = getParentAndBase(self.path) json = { 'name': name } if acl is not None: json['acl'] = acl.to_api_param() response = self.client.postJsonHelper(DataDirectory._getUrl(parent), json, False) if (response.status_code != 200): raise DataApiError("Directory creation failed: " + str(response.content))
[ "def", "create", "(", "self", ",", "acl", "=", "None", ")", ":", "parent", ",", "name", "=", "getParentAndBase", "(", "self", ".", "path", ")", "json", "=", "{", "'name'", ":", "name", "}", "if", "acl", "is", "not", "None", ":", "json", "[", "'acl'", "]", "=", "acl", ".", "to_api_param", "(", ")", "response", "=", "self", ".", "client", ".", "postJsonHelper", "(", "DataDirectory", ".", "_getUrl", "(", "parent", ")", ",", "json", ",", "False", ")", "if", "(", "response", ".", "status_code", "!=", "200", ")", ":", "raise", "DataApiError", "(", "\"Directory creation failed: \"", "+", "str", "(", "response", ".", "content", ")", ")" ]
Creates a directory, optionally include Acl argument to set permissions
[ "Creates", "a", "directory", "optionally", "include", "Acl", "argument", "to", "set", "permissions" ]
python
train
Bachmann1234/diff-cover
diff_cover/diff_quality_tool.py
https://github.com/Bachmann1234/diff-cover/blob/901cb3fc986982961785e841658085ead453c6c9/diff_cover/diff_quality_tool.py#L149-L178
def generate_quality_report(tool, compare_branch, html_report=None, css_file=None, ignore_staged=False, ignore_unstaged=False, exclude=None): """ Generate the quality report, using kwargs from `parse_args()`. """ diff = GitDiffReporter( compare_branch, git_diff=GitDiffTool(), ignore_staged=ignore_staged, ignore_unstaged=ignore_unstaged, supported_extensions=tool.driver.supported_extensions, exclude=exclude) if html_report is not None: css_url = css_file if css_url is not None: css_url = os.path.relpath(css_file, os.path.dirname(html_report)) reporter = HtmlQualityReportGenerator(tool, diff, css_url=css_url) with open(html_report, "wb") as output_file: reporter.generate_report(output_file) if css_file is not None: with open(css_file, "wb") as output_file: reporter.generate_css(output_file) # Generate the report for stdout reporter = StringQualityReportGenerator(tool, diff) output_file = sys.stdout if six.PY2 else sys.stdout.buffer reporter.generate_report(output_file) return reporter.total_percent_covered()
[ "def", "generate_quality_report", "(", "tool", ",", "compare_branch", ",", "html_report", "=", "None", ",", "css_file", "=", "None", ",", "ignore_staged", "=", "False", ",", "ignore_unstaged", "=", "False", ",", "exclude", "=", "None", ")", ":", "diff", "=", "GitDiffReporter", "(", "compare_branch", ",", "git_diff", "=", "GitDiffTool", "(", ")", ",", "ignore_staged", "=", "ignore_staged", ",", "ignore_unstaged", "=", "ignore_unstaged", ",", "supported_extensions", "=", "tool", ".", "driver", ".", "supported_extensions", ",", "exclude", "=", "exclude", ")", "if", "html_report", "is", "not", "None", ":", "css_url", "=", "css_file", "if", "css_url", "is", "not", "None", ":", "css_url", "=", "os", ".", "path", ".", "relpath", "(", "css_file", ",", "os", ".", "path", ".", "dirname", "(", "html_report", ")", ")", "reporter", "=", "HtmlQualityReportGenerator", "(", "tool", ",", "diff", ",", "css_url", "=", "css_url", ")", "with", "open", "(", "html_report", ",", "\"wb\"", ")", "as", "output_file", ":", "reporter", ".", "generate_report", "(", "output_file", ")", "if", "css_file", "is", "not", "None", ":", "with", "open", "(", "css_file", ",", "\"wb\"", ")", "as", "output_file", ":", "reporter", ".", "generate_css", "(", "output_file", ")", "# Generate the report for stdout", "reporter", "=", "StringQualityReportGenerator", "(", "tool", ",", "diff", ")", "output_file", "=", "sys", ".", "stdout", "if", "six", ".", "PY2", "else", "sys", ".", "stdout", ".", "buffer", "reporter", ".", "generate_report", "(", "output_file", ")", "return", "reporter", ".", "total_percent_covered", "(", ")" ]
Generate the quality report, using kwargs from `parse_args()`.
[ "Generate", "the", "quality", "report", "using", "kwargs", "from", "parse_args", "()", "." ]
python
train
googledatalab/pydatalab
google/datalab/bigquery/_table.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_table.py#L501-L529
def load(self, source, mode='create', source_format='csv', csv_options=None, ignore_unknown_values=False, max_bad_records=0): """ Load the table from GCS. Args: source: the URL of the source objects(s). Can include a wildcard '*' at the end of the item name. Can be a single source or a list. mode: one of 'create', 'append', or 'overwrite'. 'append' or 'overwrite' will fail if the table does not already exist, while 'create' will fail if it does. The default is 'create'. If 'create' the schema will be inferred if necessary. source_format: the format of the data, 'csv' or 'json'; default 'csv'. csv_options: if source format is 'csv', additional options as a CSVOptions object. ignore_unknown_values: if True, accept rows that contain values that do not match the schema; the unknown values are ignored (default False). max_bad_records: the maximum number of bad records that are allowed (and ignored) before returning an 'invalid' error in the Job result (default 0). Returns: A Job object for the completed load Job if it was started successfully; else None. """ job = self.load_async(source, mode=mode, source_format=source_format, csv_options=csv_options, ignore_unknown_values=ignore_unknown_values, max_bad_records=max_bad_records) if job is not None: job.wait() return job
[ "def", "load", "(", "self", ",", "source", ",", "mode", "=", "'create'", ",", "source_format", "=", "'csv'", ",", "csv_options", "=", "None", ",", "ignore_unknown_values", "=", "False", ",", "max_bad_records", "=", "0", ")", ":", "job", "=", "self", ".", "load_async", "(", "source", ",", "mode", "=", "mode", ",", "source_format", "=", "source_format", ",", "csv_options", "=", "csv_options", ",", "ignore_unknown_values", "=", "ignore_unknown_values", ",", "max_bad_records", "=", "max_bad_records", ")", "if", "job", "is", "not", "None", ":", "job", ".", "wait", "(", ")", "return", "job" ]
Load the table from GCS. Args: source: the URL of the source objects(s). Can include a wildcard '*' at the end of the item name. Can be a single source or a list. mode: one of 'create', 'append', or 'overwrite'. 'append' or 'overwrite' will fail if the table does not already exist, while 'create' will fail if it does. The default is 'create'. If 'create' the schema will be inferred if necessary. source_format: the format of the data, 'csv' or 'json'; default 'csv'. csv_options: if source format is 'csv', additional options as a CSVOptions object. ignore_unknown_values: if True, accept rows that contain values that do not match the schema; the unknown values are ignored (default False). max_bad_records: the maximum number of bad records that are allowed (and ignored) before returning an 'invalid' error in the Job result (default 0). Returns: A Job object for the completed load Job if it was started successfully; else None.
[ "Load", "the", "table", "from", "GCS", "." ]
python
train
JoeVirtual/KonFoo
konfoo/options.py
https://github.com/JoeVirtual/KonFoo/blob/0c62ef5c2bed4deaf908b34082e4de2544532fdc/konfoo/options.py#L49-L64
def nested_option(default=False): """ Attaches the option ``nested`` with its *default* value to the keyword arguments when the option does not exist. All positional arguments and keyword arguments are forwarded unchanged. """ def decorator(method): @wraps(method) def wrapper(*args, **kwargs): option = Option.nested.value kwargs[option] = kwargs.get(option, bool(default)) return method(*args, **kwargs) return wrapper return decorator
[ "def", "nested_option", "(", "default", "=", "False", ")", ":", "def", "decorator", "(", "method", ")", ":", "@", "wraps", "(", "method", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "option", "=", "Option", ".", "nested", ".", "value", "kwargs", "[", "option", "]", "=", "kwargs", ".", "get", "(", "option", ",", "bool", "(", "default", ")", ")", "return", "method", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper", "return", "decorator" ]
Attaches the option ``nested`` with its *default* value to the keyword arguments when the option does not exist. All positional arguments and keyword arguments are forwarded unchanged.
[ "Attaches", "the", "option", "nested", "with", "its", "*", "default", "*", "value", "to", "the", "keyword", "arguments", "when", "the", "option", "does", "not", "exist", ".", "All", "positional", "arguments", "and", "keyword", "arguments", "are", "forwarded", "unchanged", "." ]
python
train
NYUCCL/psiTurk
psiturk/amt_services_wrapper.py
https://github.com/NYUCCL/psiTurk/blob/7170b992a0b5f56c165929cf87b3d3a1f3336c36/psiturk/amt_services_wrapper.py#L533-L543
def db_aws_list_regions(self): ''' List AWS DB regions ''' regions = self.db_services.list_regions() if regions != []: print "Avaliable AWS regions:" for reg in regions: print '\t' + reg, if reg == self.db_services.get_region(): print "(currently selected)" else: print ''
[ "def", "db_aws_list_regions", "(", "self", ")", ":", "regions", "=", "self", ".", "db_services", ".", "list_regions", "(", ")", "if", "regions", "!=", "[", "]", ":", "print", "\"Avaliable AWS regions:\"", "for", "reg", "in", "regions", ":", "print", "'\\t'", "+", "reg", ",", "if", "reg", "==", "self", ".", "db_services", ".", "get_region", "(", ")", ":", "print", "\"(currently selected)\"", "else", ":", "print", "''" ]
List AWS DB regions
[ "List", "AWS", "DB", "regions" ]
python
train
SethMMorton/natsort
natsort/utils.py
https://github.com/SethMMorton/natsort/blob/ea0d37ef790b42c424a096e079edd9ea0d5717e3/natsort/utils.py#L729-L791
def path_splitter(s, _d_match=re.compile(r"\.\d").match): """ Split a string into its path components. Assumes a string is a path or is path-like. Parameters ---------- s : str | pathlib.Path Returns ------- split : tuple The path split by directory components and extensions. Examples -------- >>> tuple(path_splitter("this/thing.ext")) ({u}'this', {u}'thing', {u}'.ext') """ if has_pathlib and isinstance(s, PurePath): s = py23_str(s) path_parts = deque() p_appendleft = path_parts.appendleft # Continue splitting the path from the back until we have reached # '..' or '.', or until there is nothing left to split. path_location = s while path_location != os_curdir and path_location != os_pardir: parent_path = path_location path_location, child_path = path_split(parent_path) if path_location == parent_path: break p_appendleft(child_path) # This last append is the base path. # Only append if the string is non-empty. # Make sure the proper path separator for this OS is used # no matter what was actually given. if path_location: p_appendleft(py23_str(os_sep)) # Now, split off the file extensions using a similar method to above. # Continue splitting off file extensions until we reach a decimal number # or there are no more extensions. # We are not using built-in functionality of PathLib here because of # the recursive splitting up to a decimal. base = path_parts.pop() base_parts = deque() b_appendleft = base_parts.appendleft while True: front = base base, ext = path_splitext(front) if _d_match(ext) or not ext: # Reset base to before the split if the split is invalid. base = front break b_appendleft(ext) b_appendleft(base) # Return the split parent paths and then the split basename. return ichain(path_parts, base_parts)
[ "def", "path_splitter", "(", "s", ",", "_d_match", "=", "re", ".", "compile", "(", "r\"\\.\\d\"", ")", ".", "match", ")", ":", "if", "has_pathlib", "and", "isinstance", "(", "s", ",", "PurePath", ")", ":", "s", "=", "py23_str", "(", "s", ")", "path_parts", "=", "deque", "(", ")", "p_appendleft", "=", "path_parts", ".", "appendleft", "# Continue splitting the path from the back until we have reached", "# '..' or '.', or until there is nothing left to split.", "path_location", "=", "s", "while", "path_location", "!=", "os_curdir", "and", "path_location", "!=", "os_pardir", ":", "parent_path", "=", "path_location", "path_location", ",", "child_path", "=", "path_split", "(", "parent_path", ")", "if", "path_location", "==", "parent_path", ":", "break", "p_appendleft", "(", "child_path", ")", "# This last append is the base path.", "# Only append if the string is non-empty.", "# Make sure the proper path separator for this OS is used", "# no matter what was actually given.", "if", "path_location", ":", "p_appendleft", "(", "py23_str", "(", "os_sep", ")", ")", "# Now, split off the file extensions using a similar method to above.", "# Continue splitting off file extensions until we reach a decimal number", "# or there are no more extensions.", "# We are not using built-in functionality of PathLib here because of", "# the recursive splitting up to a decimal.", "base", "=", "path_parts", ".", "pop", "(", ")", "base_parts", "=", "deque", "(", ")", "b_appendleft", "=", "base_parts", ".", "appendleft", "while", "True", ":", "front", "=", "base", "base", ",", "ext", "=", "path_splitext", "(", "front", ")", "if", "_d_match", "(", "ext", ")", "or", "not", "ext", ":", "# Reset base to before the split if the split is invalid.", "base", "=", "front", "break", "b_appendleft", "(", "ext", ")", "b_appendleft", "(", "base", ")", "# Return the split parent paths and then the split basename.", "return", "ichain", "(", "path_parts", ",", "base_parts", ")" ]
Split a string into its path components. Assumes a string is a path or is path-like. Parameters ---------- s : str | pathlib.Path Returns ------- split : tuple The path split by directory components and extensions. Examples -------- >>> tuple(path_splitter("this/thing.ext")) ({u}'this', {u}'thing', {u}'.ext')
[ "Split", "a", "string", "into", "its", "path", "components", "." ]
python
train
user-cont/conu
conu/backend/docker/utils.py
https://github.com/user-cont/conu/blob/08caae7bb6bdd265b55bb106c3da6a7946a5a352/conu/backend/docker/utils.py#L65-L132
def inspect_to_container_metadata(c_metadata_object, inspect_data, image_instance): """ process data from `docker container inspect` and update provided container metadata object :param c_metadata_object: instance of ContainerMetadata :param inspect_data: dict, metadata from `docker inspect` or `dockert_client.images()` :param image_instance: instance of DockerImage :return: instance of ContainerMetadata """ inspect_to_metadata(c_metadata_object, inspect_data) status = ContainerStatus.get_from_docker( graceful_get(inspect_data, "State", "Status"), graceful_get(inspect_data, "State", "ExitCode"), ) image_id = graceful_get(inspect_data, "Image") if image_id: if ":" in image_id: # format of image name from docker inspect: # sha256:8f0e66c924c0c169352de487a3c2463d82da24e9442fc097dddaa5f800df7129 image_instance.identifier = image_id.split(':')[1] else: # container image_instance.identifier = image_id # format of Port mappings from docker inspect: # {'12345/tcp': [ # {'HostIp': '0.0.0.0', 'HostPort': '123'}, # {'HostIp': '0.0.0.0', 'HostPort': '1234'}]} port_mappings = dict() raw_port_mappings = graceful_get(inspect_data, 'HostConfig', 'PortBindings') or {} for key, value in raw_port_mappings.items(): for item in value: logger.debug("parsing ports: key = %s, item = %s", key, item) li = port_mappings.get(key, []) raw_host_port = item['HostPort'] if raw_host_port == "": int_port = None else: try: int_port = int(raw_host_port) except ValueError as ex: logger.error("could not parse port: %s", ex) continue li.append(int_port) port_mappings.update({key: li}) c_metadata_object.status = status c_metadata_object.port_mappings = port_mappings c_metadata_object.hostname = graceful_get(inspect_data, 'Config', 'Hostname') raw_networks = graceful_get(inspect_data, "NetworkSettings", "Networks").values() if raw_networks: c_metadata_object.ipv4_addresses = [ graceful_get(x, "IPAddress") for x in raw_networks if graceful_get(x, "IPAddress")] c_metadata_object.ipv6_addresses = [ graceful_get(x, "GlobalIPv6Address") for x in raw_networks if graceful_get(x, "GlobalIPv6Address")] c_metadata_object.image = image_instance name = graceful_get(inspect_data, "Name") if name: name = name[1:] if name.startswith("/") else name # remove / at the beginning c_metadata_object.name = name return c_metadata_object
[ "def", "inspect_to_container_metadata", "(", "c_metadata_object", ",", "inspect_data", ",", "image_instance", ")", ":", "inspect_to_metadata", "(", "c_metadata_object", ",", "inspect_data", ")", "status", "=", "ContainerStatus", ".", "get_from_docker", "(", "graceful_get", "(", "inspect_data", ",", "\"State\"", ",", "\"Status\"", ")", ",", "graceful_get", "(", "inspect_data", ",", "\"State\"", ",", "\"ExitCode\"", ")", ",", ")", "image_id", "=", "graceful_get", "(", "inspect_data", ",", "\"Image\"", ")", "if", "image_id", ":", "if", "\":\"", "in", "image_id", ":", "# format of image name from docker inspect:", "# sha256:8f0e66c924c0c169352de487a3c2463d82da24e9442fc097dddaa5f800df7129", "image_instance", ".", "identifier", "=", "image_id", ".", "split", "(", "':'", ")", "[", "1", "]", "else", ":", "# container", "image_instance", ".", "identifier", "=", "image_id", "# format of Port mappings from docker inspect:", "# {'12345/tcp': [", "# {'HostIp': '0.0.0.0', 'HostPort': '123'},", "# {'HostIp': '0.0.0.0', 'HostPort': '1234'}]}", "port_mappings", "=", "dict", "(", ")", "raw_port_mappings", "=", "graceful_get", "(", "inspect_data", ",", "'HostConfig'", ",", "'PortBindings'", ")", "or", "{", "}", "for", "key", ",", "value", "in", "raw_port_mappings", ".", "items", "(", ")", ":", "for", "item", "in", "value", ":", "logger", ".", "debug", "(", "\"parsing ports: key = %s, item = %s\"", ",", "key", ",", "item", ")", "li", "=", "port_mappings", ".", "get", "(", "key", ",", "[", "]", ")", "raw_host_port", "=", "item", "[", "'HostPort'", "]", "if", "raw_host_port", "==", "\"\"", ":", "int_port", "=", "None", "else", ":", "try", ":", "int_port", "=", "int", "(", "raw_host_port", ")", "except", "ValueError", "as", "ex", ":", "logger", ".", "error", "(", "\"could not parse port: %s\"", ",", "ex", ")", "continue", "li", ".", "append", "(", "int_port", ")", "port_mappings", ".", "update", "(", "{", "key", ":", "li", "}", ")", "c_metadata_object", ".", "status", "=", "status", "c_metadata_object", ".", "port_mappings", "=", "port_mappings", "c_metadata_object", ".", "hostname", "=", "graceful_get", "(", "inspect_data", ",", "'Config'", ",", "'Hostname'", ")", "raw_networks", "=", "graceful_get", "(", "inspect_data", ",", "\"NetworkSettings\"", ",", "\"Networks\"", ")", ".", "values", "(", ")", "if", "raw_networks", ":", "c_metadata_object", ".", "ipv4_addresses", "=", "[", "graceful_get", "(", "x", ",", "\"IPAddress\"", ")", "for", "x", "in", "raw_networks", "if", "graceful_get", "(", "x", ",", "\"IPAddress\"", ")", "]", "c_metadata_object", ".", "ipv6_addresses", "=", "[", "graceful_get", "(", "x", ",", "\"GlobalIPv6Address\"", ")", "for", "x", "in", "raw_networks", "if", "graceful_get", "(", "x", ",", "\"GlobalIPv6Address\"", ")", "]", "c_metadata_object", ".", "image", "=", "image_instance", "name", "=", "graceful_get", "(", "inspect_data", ",", "\"Name\"", ")", "if", "name", ":", "name", "=", "name", "[", "1", ":", "]", "if", "name", ".", "startswith", "(", "\"/\"", ")", "else", "name", "# remove / at the beginning", "c_metadata_object", ".", "name", "=", "name", "return", "c_metadata_object" ]
process data from `docker container inspect` and update provided container metadata object :param c_metadata_object: instance of ContainerMetadata :param inspect_data: dict, metadata from `docker inspect` or `dockert_client.images()` :param image_instance: instance of DockerImage :return: instance of ContainerMetadata
[ "process", "data", "from", "docker", "container", "inspect", "and", "update", "provided", "container", "metadata", "object" ]
python
train
theiviaxx/Frog
frog/common.py
https://github.com/theiviaxx/Frog/blob/a9475463a8eed1323fe3ef5d51f9751fb1dc9edd/frog/common.py#L57-L65
def append(self, val): """Appends the object to the end of the values list. Will also set the value to the first item in the values list :param val: Object to append :type val: primitive """ self.values.append(val) self.value = self.values[0]
[ "def", "append", "(", "self", ",", "val", ")", ":", "self", ".", "values", ".", "append", "(", "val", ")", "self", ".", "value", "=", "self", ".", "values", "[", "0", "]" ]
Appends the object to the end of the values list. Will also set the value to the first item in the values list :param val: Object to append :type val: primitive
[ "Appends", "the", "object", "to", "the", "end", "of", "the", "values", "list", ".", "Will", "also", "set", "the", "value", "to", "the", "first", "item", "in", "the", "values", "list" ]
python
train
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/input_readers.py
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/input_readers.py#L2151-L2178
def split_input(cls, mapper_spec): """Returns a list of input readers for the given input specification. Args: mapper_spec: The MapperSpec for this InputReader. Returns: A list of InputReaders. """ params = _get_params(mapper_spec) shard_count = mapper_spec.shard_count # Pick out the overall start and end times and time step per shard. start_time = params[cls.START_TIME_PARAM] end_time = params[cls.END_TIME_PARAM] seconds_per_shard = (end_time - start_time) / shard_count # Create a LogInputReader for each shard, modulating the params as we go. shards = [] for _ in xrange(shard_count - 1): params[cls.END_TIME_PARAM] = (params[cls.START_TIME_PARAM] + seconds_per_shard) shards.append(LogInputReader(**params)) params[cls.START_TIME_PARAM] = params[cls.END_TIME_PARAM] # Create a final shard to complete the time range. params[cls.END_TIME_PARAM] = end_time return shards + [LogInputReader(**params)]
[ "def", "split_input", "(", "cls", ",", "mapper_spec", ")", ":", "params", "=", "_get_params", "(", "mapper_spec", ")", "shard_count", "=", "mapper_spec", ".", "shard_count", "# Pick out the overall start and end times and time step per shard.", "start_time", "=", "params", "[", "cls", ".", "START_TIME_PARAM", "]", "end_time", "=", "params", "[", "cls", ".", "END_TIME_PARAM", "]", "seconds_per_shard", "=", "(", "end_time", "-", "start_time", ")", "/", "shard_count", "# Create a LogInputReader for each shard, modulating the params as we go.", "shards", "=", "[", "]", "for", "_", "in", "xrange", "(", "shard_count", "-", "1", ")", ":", "params", "[", "cls", ".", "END_TIME_PARAM", "]", "=", "(", "params", "[", "cls", ".", "START_TIME_PARAM", "]", "+", "seconds_per_shard", ")", "shards", ".", "append", "(", "LogInputReader", "(", "*", "*", "params", ")", ")", "params", "[", "cls", ".", "START_TIME_PARAM", "]", "=", "params", "[", "cls", ".", "END_TIME_PARAM", "]", "# Create a final shard to complete the time range.", "params", "[", "cls", ".", "END_TIME_PARAM", "]", "=", "end_time", "return", "shards", "+", "[", "LogInputReader", "(", "*", "*", "params", ")", "]" ]
Returns a list of input readers for the given input specification. Args: mapper_spec: The MapperSpec for this InputReader. Returns: A list of InputReaders.
[ "Returns", "a", "list", "of", "input", "readers", "for", "the", "given", "input", "specification", "." ]
python
train
twilio/twilio-python
twilio/rest/preview/wireless/command.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/preview/wireless/command.py#L71-L99
def list(self, device=values.unset, sim=values.unset, status=values.unset, direction=values.unset, limit=None, page_size=None): """ Lists CommandInstance records from the API as a list. Unlike stream(), this operation is eager and will load `limit` records into memory before returning. :param unicode device: The device :param unicode sim: The sim :param unicode status: The status :param unicode direction: The direction :param int limit: Upper limit for the number of records to return. list() guarantees never to return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, list() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.preview.wireless.command.CommandInstance] """ return list(self.stream( device=device, sim=sim, status=status, direction=direction, limit=limit, page_size=page_size, ))
[ "def", "list", "(", "self", ",", "device", "=", "values", ".", "unset", ",", "sim", "=", "values", ".", "unset", ",", "status", "=", "values", ".", "unset", ",", "direction", "=", "values", ".", "unset", ",", "limit", "=", "None", ",", "page_size", "=", "None", ")", ":", "return", "list", "(", "self", ".", "stream", "(", "device", "=", "device", ",", "sim", "=", "sim", ",", "status", "=", "status", ",", "direction", "=", "direction", ",", "limit", "=", "limit", ",", "page_size", "=", "page_size", ",", ")", ")" ]
Lists CommandInstance records from the API as a list. Unlike stream(), this operation is eager and will load `limit` records into memory before returning. :param unicode device: The device :param unicode sim: The sim :param unicode status: The status :param unicode direction: The direction :param int limit: Upper limit for the number of records to return. list() guarantees never to return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, list() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.preview.wireless.command.CommandInstance]
[ "Lists", "CommandInstance", "records", "from", "the", "API", "as", "a", "list", ".", "Unlike", "stream", "()", "this", "operation", "is", "eager", "and", "will", "load", "limit", "records", "into", "memory", "before", "returning", "." ]
python
train
spyder-ide/spyder
spyder/plugins/editor/lsp/decorators.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/lsp/decorators.py#L12-L24
def send_request(req=None, method=None, requires_response=True): """Call function req and then send its results via ZMQ.""" if req is None: return functools.partial(send_request, method=method, requires_response=requires_response) @functools.wraps(req) def wrapper(self, *args, **kwargs): params = req(self, *args, **kwargs) _id = self.send(method, params, requires_response) return _id wrapper._sends = method return wrapper
[ "def", "send_request", "(", "req", "=", "None", ",", "method", "=", "None", ",", "requires_response", "=", "True", ")", ":", "if", "req", "is", "None", ":", "return", "functools", ".", "partial", "(", "send_request", ",", "method", "=", "method", ",", "requires_response", "=", "requires_response", ")", "@", "functools", ".", "wraps", "(", "req", ")", "def", "wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "params", "=", "req", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "_id", "=", "self", ".", "send", "(", "method", ",", "params", ",", "requires_response", ")", "return", "_id", "wrapper", ".", "_sends", "=", "method", "return", "wrapper" ]
Call function req and then send its results via ZMQ.
[ "Call", "function", "req", "and", "then", "send", "its", "results", "via", "ZMQ", "." ]
python
train
skymill/automated-ebs-snapshots
automated_ebs_snapshots/volume_manager.py
https://github.com/skymill/automated-ebs-snapshots/blob/9595bc49d458f6ffb93430722757d2284e878fab/automated_ebs_snapshots/volume_manager.py#L113-L152
def watch(connection, volume_id, interval='daily', retention=0): """ Start watching a new volume :type connection: boto.ec2.connection.EC2Connection :param connection: EC2 connection object :type volume_id: str :param volume_id: VolumeID to add to the watchlist :type interval: str :param interval: Backup interval [hourly|daily|weekly|monthly|yearly] :type retention: int :param retention: Number of snapshots to keep. 0 == keep all :returns: bool - True if the watch was successful """ try: volume = connection.get_all_volumes(volume_ids=[volume_id])[0] except EC2ResponseError: logger.warning('Volume {} not found'.format(volume_id)) return False if interval not in VALID_INTERVALS: logger.warning( '{} is not a valid interval. Valid intervals are {}'.format( interval, ', '.join(VALID_INTERVALS))) # Remove the tag first volume.remove_tag('AutomatedEBSSnapshots') # Re-add the tag volume.add_tag('AutomatedEBSSnapshots', value=interval) # Remove the tag first volume.remove_tag('AutomatedEBSSnapshotsRetention') # Re-add the tag volume.add_tag('AutomatedEBSSnapshotsRetention', value=int(retention)) logger.info('Updated the rotation interval to {} for {}'.format( interval, volume_id)) return True
[ "def", "watch", "(", "connection", ",", "volume_id", ",", "interval", "=", "'daily'", ",", "retention", "=", "0", ")", ":", "try", ":", "volume", "=", "connection", ".", "get_all_volumes", "(", "volume_ids", "=", "[", "volume_id", "]", ")", "[", "0", "]", "except", "EC2ResponseError", ":", "logger", ".", "warning", "(", "'Volume {} not found'", ".", "format", "(", "volume_id", ")", ")", "return", "False", "if", "interval", "not", "in", "VALID_INTERVALS", ":", "logger", ".", "warning", "(", "'{} is not a valid interval. Valid intervals are {}'", ".", "format", "(", "interval", ",", "', '", ".", "join", "(", "VALID_INTERVALS", ")", ")", ")", "# Remove the tag first", "volume", ".", "remove_tag", "(", "'AutomatedEBSSnapshots'", ")", "# Re-add the tag", "volume", ".", "add_tag", "(", "'AutomatedEBSSnapshots'", ",", "value", "=", "interval", ")", "# Remove the tag first", "volume", ".", "remove_tag", "(", "'AutomatedEBSSnapshotsRetention'", ")", "# Re-add the tag", "volume", ".", "add_tag", "(", "'AutomatedEBSSnapshotsRetention'", ",", "value", "=", "int", "(", "retention", ")", ")", "logger", ".", "info", "(", "'Updated the rotation interval to {} for {}'", ".", "format", "(", "interval", ",", "volume_id", ")", ")", "return", "True" ]
Start watching a new volume :type connection: boto.ec2.connection.EC2Connection :param connection: EC2 connection object :type volume_id: str :param volume_id: VolumeID to add to the watchlist :type interval: str :param interval: Backup interval [hourly|daily|weekly|monthly|yearly] :type retention: int :param retention: Number of snapshots to keep. 0 == keep all :returns: bool - True if the watch was successful
[ "Start", "watching", "a", "new", "volume" ]
python
train
ojake/django-tracked-model
tracked_model/serializer.py
https://github.com/ojake/django-tracked-model/blob/19bc48874dd2e5fb5defedc6b8c5c3915cce1424/tracked_model/serializer.py#L86-L94
def restore_model(cls, data): """Returns instance of ``cls`` with attributed loaded from ``data`` dict. """ obj = cls() for field in data: setattr(obj, field, data[field][Field.VALUE]) return obj
[ "def", "restore_model", "(", "cls", ",", "data", ")", ":", "obj", "=", "cls", "(", ")", "for", "field", "in", "data", ":", "setattr", "(", "obj", ",", "field", ",", "data", "[", "field", "]", "[", "Field", ".", "VALUE", "]", ")", "return", "obj" ]
Returns instance of ``cls`` with attributed loaded from ``data`` dict.
[ "Returns", "instance", "of", "cls", "with", "attributed", "loaded", "from", "data", "dict", "." ]
python
train
CalebBell/thermo
thermo/activity.py
https://github.com/CalebBell/thermo/blob/3857ed023a3e64fd3039a32d53576c24990ef1c3/thermo/activity.py#L39-L168
def K_value(P=None, Psat=None, phi_l=None, phi_g=None, gamma=None, Poynting=1): r'''Calculates the equilibrium K-value assuming Raoult's law, or an equation of state model, or an activity coefficient model, or a combined equation of state-activity model. The calculation procedure will use the most advanced approach with the provided inputs: * If `P`, `Psat`, `phi_l`, `phi_g`, and `gamma` are provided, use the combined approach. * If `P`, `Psat`, and `gamma` are provided, use the modified Raoult's law. * If `phi_l` and `phi_g` are provided, use the EOS only method. * If `P` and `Psat` are provided, use Raoult's law. Definitions: .. math:: K_i=\frac{y_i}{x_i} Raoult's law: .. math:: K_i = \frac{P_{i}^{sat}}{P} Activity coefficient, no EOS (modified Raoult's law): .. math:: K_i = \frac{\gamma_i P_{i}^{sat}}{P} Equation of state only: .. math:: K_i = \frac{\phi_i^l}{\phi_i^v} = \frac{f_i^l}{f_i^v} Combined approach (liquid reference fugacity coefficient is normally calculated the saturation pressure for it as a pure species; vapor fugacity coefficient calculated normally): .. math:: K_i = \frac{\gamma_i P_i^{sat} \phi_i^{l,ref}}{\phi_i^v P} Combined approach, with Poynting Correction Factor (liquid molar volume in the integral is for i as a pure species only): .. math:: K_i = \frac{\gamma_i P_i^{sat} \phi_i^{l, ref} \exp\left[\frac{ \int_{P_i^{sat}}^P V_i^l dP}{RT}\right]}{\phi_i^v P} Parameters ---------- P : float System pressure, optional Psat : float Vapor pressure of species i, [Pa] phi_l : float Fugacity coefficient of species i in the liquid phase, either at the system conditions (EOS-only case) or at the saturation pressure of species i as a pure species (reference condition for the combined approach), optional [-] phi_g : float Fugacity coefficient of species i in the vapor phase at the system conditions, optional [-] gamma : float Activity coefficient of species i in the liquid phase, optional [-] Poynting : float Poynting correction factor, optional [-] Returns ------- K : float Equilibrium K value of component i, calculated with an approach depending on the provided inputs [-] Notes ----- The Poynting correction factor is normally simplified as follows, due to a liquid's low pressure dependency: .. math:: K_i = \frac{\gamma_i P_i^{sat} \phi_i^{l, ref} \exp\left[\frac{V_l (P-P_i^{sat})}{RT}\right]}{\phi_i^v P} Examples -------- Raoult's law: >>> K_value(101325, 3000.) 0.029607698001480384 Modified Raoult's law: >>> K_value(P=101325, Psat=3000, gamma=0.9) 0.026646928201332347 EOS-only approach: >>> K_value(phi_l=1.6356, phi_g=0.88427) 1.8496613025433408 Gamma-phi combined approach: >>> K_value(P=1E6, Psat=1938800, phi_l=1.4356, phi_g=0.88427, gamma=0.92) 2.8958055544121137 Gamma-phi combined approach with a Poynting factor: >>> K_value(P=1E6, Psat=1938800, phi_l=1.4356, phi_g=0.88427, gamma=0.92, ... Poynting=0.999) 2.8929097488577016 References ---------- .. [1] Gmehling, Jurgen, Barbel Kolbe, Michael Kleiber, and Jurgen Rarey. Chemical Thermodynamics for Process Simulation. 1st edition. Weinheim: Wiley-VCH, 2012. .. [2] Skogestad, Sigurd. Chemical and Energy Process Engineering. 1st edition. Boca Raton, FL: CRC Press, 2008. ''' try: if gamma: if phi_l: return gamma*Psat*phi_l*Poynting/(phi_g*P) return gamma*Psat*Poynting/P elif phi_l: return phi_l/phi_g return Psat/P except TypeError: raise Exception('Input must consist of one set from (P, Psat, phi_l, \ phi_g, gamma), (P, Psat, gamma), (phi_l, phi_g), (P, Psat)')
[ "def", "K_value", "(", "P", "=", "None", ",", "Psat", "=", "None", ",", "phi_l", "=", "None", ",", "phi_g", "=", "None", ",", "gamma", "=", "None", ",", "Poynting", "=", "1", ")", ":", "try", ":", "if", "gamma", ":", "if", "phi_l", ":", "return", "gamma", "*", "Psat", "*", "phi_l", "*", "Poynting", "/", "(", "phi_g", "*", "P", ")", "return", "gamma", "*", "Psat", "*", "Poynting", "/", "P", "elif", "phi_l", ":", "return", "phi_l", "/", "phi_g", "return", "Psat", "/", "P", "except", "TypeError", ":", "raise", "Exception", "(", "'Input must consist of one set from (P, Psat, phi_l, \\\nphi_g, gamma), (P, Psat, gamma), (phi_l, phi_g), (P, Psat)'", ")" ]
r'''Calculates the equilibrium K-value assuming Raoult's law, or an equation of state model, or an activity coefficient model, or a combined equation of state-activity model. The calculation procedure will use the most advanced approach with the provided inputs: * If `P`, `Psat`, `phi_l`, `phi_g`, and `gamma` are provided, use the combined approach. * If `P`, `Psat`, and `gamma` are provided, use the modified Raoult's law. * If `phi_l` and `phi_g` are provided, use the EOS only method. * If `P` and `Psat` are provided, use Raoult's law. Definitions: .. math:: K_i=\frac{y_i}{x_i} Raoult's law: .. math:: K_i = \frac{P_{i}^{sat}}{P} Activity coefficient, no EOS (modified Raoult's law): .. math:: K_i = \frac{\gamma_i P_{i}^{sat}}{P} Equation of state only: .. math:: K_i = \frac{\phi_i^l}{\phi_i^v} = \frac{f_i^l}{f_i^v} Combined approach (liquid reference fugacity coefficient is normally calculated the saturation pressure for it as a pure species; vapor fugacity coefficient calculated normally): .. math:: K_i = \frac{\gamma_i P_i^{sat} \phi_i^{l,ref}}{\phi_i^v P} Combined approach, with Poynting Correction Factor (liquid molar volume in the integral is for i as a pure species only): .. math:: K_i = \frac{\gamma_i P_i^{sat} \phi_i^{l, ref} \exp\left[\frac{ \int_{P_i^{sat}}^P V_i^l dP}{RT}\right]}{\phi_i^v P} Parameters ---------- P : float System pressure, optional Psat : float Vapor pressure of species i, [Pa] phi_l : float Fugacity coefficient of species i in the liquid phase, either at the system conditions (EOS-only case) or at the saturation pressure of species i as a pure species (reference condition for the combined approach), optional [-] phi_g : float Fugacity coefficient of species i in the vapor phase at the system conditions, optional [-] gamma : float Activity coefficient of species i in the liquid phase, optional [-] Poynting : float Poynting correction factor, optional [-] Returns ------- K : float Equilibrium K value of component i, calculated with an approach depending on the provided inputs [-] Notes ----- The Poynting correction factor is normally simplified as follows, due to a liquid's low pressure dependency: .. math:: K_i = \frac{\gamma_i P_i^{sat} \phi_i^{l, ref} \exp\left[\frac{V_l (P-P_i^{sat})}{RT}\right]}{\phi_i^v P} Examples -------- Raoult's law: >>> K_value(101325, 3000.) 0.029607698001480384 Modified Raoult's law: >>> K_value(P=101325, Psat=3000, gamma=0.9) 0.026646928201332347 EOS-only approach: >>> K_value(phi_l=1.6356, phi_g=0.88427) 1.8496613025433408 Gamma-phi combined approach: >>> K_value(P=1E6, Psat=1938800, phi_l=1.4356, phi_g=0.88427, gamma=0.92) 2.8958055544121137 Gamma-phi combined approach with a Poynting factor: >>> K_value(P=1E6, Psat=1938800, phi_l=1.4356, phi_g=0.88427, gamma=0.92, ... Poynting=0.999) 2.8929097488577016 References ---------- .. [1] Gmehling, Jurgen, Barbel Kolbe, Michael Kleiber, and Jurgen Rarey. Chemical Thermodynamics for Process Simulation. 1st edition. Weinheim: Wiley-VCH, 2012. .. [2] Skogestad, Sigurd. Chemical and Energy Process Engineering. 1st edition. Boca Raton, FL: CRC Press, 2008.
[ "r", "Calculates", "the", "equilibrium", "K", "-", "value", "assuming", "Raoult", "s", "law", "or", "an", "equation", "of", "state", "model", "or", "an", "activity", "coefficient", "model", "or", "a", "combined", "equation", "of", "state", "-", "activity", "model", "." ]
python
valid
PyCQA/pylint
pylint/message/message_handler_mix_in.py
https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/message/message_handler_mix_in.py#L403-L459
def _print_checker_doc(checker_name, info, stream=None): """Helper method for print_full_documentation. Also used by doc/exts/pylint_extensions.py. """ if not stream: stream = sys.stdout doc = info.get("doc") module = info.get("module") msgs = info.get("msgs") options = info.get("options") reports = info.get("reports") checker_title = "%s checker" % (checker_name.replace("_", " ").title()) if module: # Provide anchor to link against print(".. _%s:\n" % module, file=stream) print(checker_title, file=stream) print("~" * len(checker_title), file=stream) print("", file=stream) if module: print("This checker is provided by ``%s``." % module, file=stream) print("Verbatim name of the checker is ``%s``." % checker_name, file=stream) print("", file=stream) if doc: # Provide anchor to link against title = "{} Documentation".format(checker_title) print(title, file=stream) print("^" * len(title), file=stream) print(cleandoc(doc), file=stream) print("", file=stream) if options: title = "{} Options".format(checker_title) print(title, file=stream) print("^" * len(title), file=stream) _rest_format_section(stream, None, options) print("", file=stream) if msgs: title = "{} Messages".format(checker_title) print(title, file=stream) print("^" * len(title), file=stream) for msgid, msg in sorted( msgs.items(), key=lambda kv: (_MSG_ORDER.index(kv[0][0]), kv[1]) ): msg = build_message_definition(checker_name, msgid, msg) print(msg.format_help(checkerref=False), file=stream) print("", file=stream) if reports: title = "{} Reports".format(checker_title) print(title, file=stream) print("^" * len(title), file=stream) for report in reports: print(":%s: %s" % report[:2], file=stream) print("", file=stream) print("", file=stream)
[ "def", "_print_checker_doc", "(", "checker_name", ",", "info", ",", "stream", "=", "None", ")", ":", "if", "not", "stream", ":", "stream", "=", "sys", ".", "stdout", "doc", "=", "info", ".", "get", "(", "\"doc\"", ")", "module", "=", "info", ".", "get", "(", "\"module\"", ")", "msgs", "=", "info", ".", "get", "(", "\"msgs\"", ")", "options", "=", "info", ".", "get", "(", "\"options\"", ")", "reports", "=", "info", ".", "get", "(", "\"reports\"", ")", "checker_title", "=", "\"%s checker\"", "%", "(", "checker_name", ".", "replace", "(", "\"_\"", ",", "\" \"", ")", ".", "title", "(", ")", ")", "if", "module", ":", "# Provide anchor to link against", "print", "(", "\".. _%s:\\n\"", "%", "module", ",", "file", "=", "stream", ")", "print", "(", "checker_title", ",", "file", "=", "stream", ")", "print", "(", "\"~\"", "*", "len", "(", "checker_title", ")", ",", "file", "=", "stream", ")", "print", "(", "\"\"", ",", "file", "=", "stream", ")", "if", "module", ":", "print", "(", "\"This checker is provided by ``%s``.\"", "%", "module", ",", "file", "=", "stream", ")", "print", "(", "\"Verbatim name of the checker is ``%s``.\"", "%", "checker_name", ",", "file", "=", "stream", ")", "print", "(", "\"\"", ",", "file", "=", "stream", ")", "if", "doc", ":", "# Provide anchor to link against", "title", "=", "\"{} Documentation\"", ".", "format", "(", "checker_title", ")", "print", "(", "title", ",", "file", "=", "stream", ")", "print", "(", "\"^\"", "*", "len", "(", "title", ")", ",", "file", "=", "stream", ")", "print", "(", "cleandoc", "(", "doc", ")", ",", "file", "=", "stream", ")", "print", "(", "\"\"", ",", "file", "=", "stream", ")", "if", "options", ":", "title", "=", "\"{} Options\"", ".", "format", "(", "checker_title", ")", "print", "(", "title", ",", "file", "=", "stream", ")", "print", "(", "\"^\"", "*", "len", "(", "title", ")", ",", "file", "=", "stream", ")", "_rest_format_section", "(", "stream", ",", "None", ",", "options", ")", "print", "(", "\"\"", ",", "file", "=", "stream", ")", "if", "msgs", ":", "title", "=", "\"{} Messages\"", ".", "format", "(", "checker_title", ")", "print", "(", "title", ",", "file", "=", "stream", ")", "print", "(", "\"^\"", "*", "len", "(", "title", ")", ",", "file", "=", "stream", ")", "for", "msgid", ",", "msg", "in", "sorted", "(", "msgs", ".", "items", "(", ")", ",", "key", "=", "lambda", "kv", ":", "(", "_MSG_ORDER", ".", "index", "(", "kv", "[", "0", "]", "[", "0", "]", ")", ",", "kv", "[", "1", "]", ")", ")", ":", "msg", "=", "build_message_definition", "(", "checker_name", ",", "msgid", ",", "msg", ")", "print", "(", "msg", ".", "format_help", "(", "checkerref", "=", "False", ")", ",", "file", "=", "stream", ")", "print", "(", "\"\"", ",", "file", "=", "stream", ")", "if", "reports", ":", "title", "=", "\"{} Reports\"", ".", "format", "(", "checker_title", ")", "print", "(", "title", ",", "file", "=", "stream", ")", "print", "(", "\"^\"", "*", "len", "(", "title", ")", ",", "file", "=", "stream", ")", "for", "report", "in", "reports", ":", "print", "(", "\":%s: %s\"", "%", "report", "[", ":", "2", "]", ",", "file", "=", "stream", ")", "print", "(", "\"\"", ",", "file", "=", "stream", ")", "print", "(", "\"\"", ",", "file", "=", "stream", ")" ]
Helper method for print_full_documentation. Also used by doc/exts/pylint_extensions.py.
[ "Helper", "method", "for", "print_full_documentation", "." ]
python
test
pantsbuild/pants
src/python/pants/base/exiter.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/base/exiter.py#L83-L90
def exit_and_fail(self, msg=None, out=None): """Exits the runtime with a nonzero exit code, indicating failure. :param msg: A string message to print to stderr or another custom file desciptor before exiting. (Optional) :param out: The file descriptor to emit `msg` to. (Optional) """ self.exit(result=PANTS_FAILED_EXIT_CODE, msg=msg, out=out)
[ "def", "exit_and_fail", "(", "self", ",", "msg", "=", "None", ",", "out", "=", "None", ")", ":", "self", ".", "exit", "(", "result", "=", "PANTS_FAILED_EXIT_CODE", ",", "msg", "=", "msg", ",", "out", "=", "out", ")" ]
Exits the runtime with a nonzero exit code, indicating failure. :param msg: A string message to print to stderr or another custom file desciptor before exiting. (Optional) :param out: The file descriptor to emit `msg` to. (Optional)
[ "Exits", "the", "runtime", "with", "a", "nonzero", "exit", "code", "indicating", "failure", "." ]
python
train
saltstack/salt
salt/utils/openstack/nova.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/openstack/nova.py#L558-L573
def _volume_get(self, volume_id): ''' Organize information about a volume from the volume_id ''' if self.volume_conn is None: raise SaltCloudSystemExit('No cinder endpoint available') nt_ks = self.volume_conn volume = nt_ks.volumes.get(volume_id) response = {'name': volume.display_name, 'size': volume.size, 'id': volume.id, 'description': volume.display_description, 'attachments': volume.attachments, 'status': volume.status } return response
[ "def", "_volume_get", "(", "self", ",", "volume_id", ")", ":", "if", "self", ".", "volume_conn", "is", "None", ":", "raise", "SaltCloudSystemExit", "(", "'No cinder endpoint available'", ")", "nt_ks", "=", "self", ".", "volume_conn", "volume", "=", "nt_ks", ".", "volumes", ".", "get", "(", "volume_id", ")", "response", "=", "{", "'name'", ":", "volume", ".", "display_name", ",", "'size'", ":", "volume", ".", "size", ",", "'id'", ":", "volume", ".", "id", ",", "'description'", ":", "volume", ".", "display_description", ",", "'attachments'", ":", "volume", ".", "attachments", ",", "'status'", ":", "volume", ".", "status", "}", "return", "response" ]
Organize information about a volume from the volume_id
[ "Organize", "information", "about", "a", "volume", "from", "the", "volume_id" ]
python
train
bitprophet/releases
releases/line_manager.py
https://github.com/bitprophet/releases/blob/97a763e41bbe7374106a1c648b89346a0d935429/releases/line_manager.py#L23-L37
def add_family(self, major_number): """ Expand to a new release line with given ``major_number``. This will flesh out mandatory buckets like ``unreleased_bugfix`` and do other necessary bookkeeping. """ # Normally, we have separate buckets for bugfixes vs features keys = ['unreleased_bugfix', 'unreleased_feature'] # But unstable prehistorical releases roll all up into just # 'unreleased' if major_number == 0 and self.config.releases_unstable_prehistory: keys = ['unreleased'] # Either way, the buckets default to an empty list self[major_number] = {key: [] for key in keys}
[ "def", "add_family", "(", "self", ",", "major_number", ")", ":", "# Normally, we have separate buckets for bugfixes vs features", "keys", "=", "[", "'unreleased_bugfix'", ",", "'unreleased_feature'", "]", "# But unstable prehistorical releases roll all up into just", "# 'unreleased'", "if", "major_number", "==", "0", "and", "self", ".", "config", ".", "releases_unstable_prehistory", ":", "keys", "=", "[", "'unreleased'", "]", "# Either way, the buckets default to an empty list", "self", "[", "major_number", "]", "=", "{", "key", ":", "[", "]", "for", "key", "in", "keys", "}" ]
Expand to a new release line with given ``major_number``. This will flesh out mandatory buckets like ``unreleased_bugfix`` and do other necessary bookkeeping.
[ "Expand", "to", "a", "new", "release", "line", "with", "given", "major_number", "." ]
python
train
pyamg/pyamg
pyamg/util/linalg.py
https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/util/linalg.py#L58-L103
def infinity_norm(A): """Infinity norm of a matrix (maximum absolute row sum). Parameters ---------- A : csr_matrix, csc_matrix, sparse, or numpy matrix Sparse or dense matrix Returns ------- n : float Infinity norm of the matrix Notes ----- - This serves as an upper bound on spectral radius. - csr and csc avoid a deep copy - dense calls scipy.linalg.norm See Also -------- scipy.linalg.norm : dense matrix norms Examples -------- >>> import numpy as np >>> from scipy.sparse import spdiags >>> from pyamg.util.linalg import infinity_norm >>> n=10 >>> e = np.ones((n,1)).ravel() >>> data = [ -1*e, 2*e, -1*e ] >>> A = spdiags(data,[-1,0,1],n,n) >>> print infinity_norm(A) 4.0 """ if sparse.isspmatrix_csr(A) or sparse.isspmatrix_csc(A): # avoid copying index and ptr arrays abs_A = A.__class__((np.abs(A.data), A.indices, A.indptr), shape=A.shape) return (abs_A * np.ones((A.shape[1]), dtype=A.dtype)).max() elif sparse.isspmatrix(A): return (abs(A) * np.ones((A.shape[1]), dtype=A.dtype)).max() else: return np.dot(np.abs(A), np.ones((A.shape[1],), dtype=A.dtype)).max()
[ "def", "infinity_norm", "(", "A", ")", ":", "if", "sparse", ".", "isspmatrix_csr", "(", "A", ")", "or", "sparse", ".", "isspmatrix_csc", "(", "A", ")", ":", "# avoid copying index and ptr arrays", "abs_A", "=", "A", ".", "__class__", "(", "(", "np", ".", "abs", "(", "A", ".", "data", ")", ",", "A", ".", "indices", ",", "A", ".", "indptr", ")", ",", "shape", "=", "A", ".", "shape", ")", "return", "(", "abs_A", "*", "np", ".", "ones", "(", "(", "A", ".", "shape", "[", "1", "]", ")", ",", "dtype", "=", "A", ".", "dtype", ")", ")", ".", "max", "(", ")", "elif", "sparse", ".", "isspmatrix", "(", "A", ")", ":", "return", "(", "abs", "(", "A", ")", "*", "np", ".", "ones", "(", "(", "A", ".", "shape", "[", "1", "]", ")", ",", "dtype", "=", "A", ".", "dtype", ")", ")", ".", "max", "(", ")", "else", ":", "return", "np", ".", "dot", "(", "np", ".", "abs", "(", "A", ")", ",", "np", ".", "ones", "(", "(", "A", ".", "shape", "[", "1", "]", ",", ")", ",", "dtype", "=", "A", ".", "dtype", ")", ")", ".", "max", "(", ")" ]
Infinity norm of a matrix (maximum absolute row sum). Parameters ---------- A : csr_matrix, csc_matrix, sparse, or numpy matrix Sparse or dense matrix Returns ------- n : float Infinity norm of the matrix Notes ----- - This serves as an upper bound on spectral radius. - csr and csc avoid a deep copy - dense calls scipy.linalg.norm See Also -------- scipy.linalg.norm : dense matrix norms Examples -------- >>> import numpy as np >>> from scipy.sparse import spdiags >>> from pyamg.util.linalg import infinity_norm >>> n=10 >>> e = np.ones((n,1)).ravel() >>> data = [ -1*e, 2*e, -1*e ] >>> A = spdiags(data,[-1,0,1],n,n) >>> print infinity_norm(A) 4.0
[ "Infinity", "norm", "of", "a", "matrix", "(", "maximum", "absolute", "row", "sum", ")", "." ]
python
train
angr/angr
angr/analyses/cfg/cfg_emulated.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/cfg/cfg_emulated.py#L1479-L1520
def _post_handle_job_debug(self, job, successors): """ Post job handling: print debugging information regarding the current job. :param CFGJob job: The current CFGJob instance. :param list successors: All successors of the analysis job. :return: None """ sim_successors = job.sim_successors call_stack_suffix = job.call_stack_suffix extra_info = job.extra_info successor_status = job.successor_status func = self.project.loader.find_symbol(job.func_addr) obj = self.project.loader.find_object_containing(job.addr) function_name = func.name if func is not None else None module_name = obj.provides if obj is not None else None node = self.model.get_node(job.block_id) depth_str = "(D:%s)" % node.depth if node.depth is not None else "" l.debug("%s [%#x%s | %s]", sim_successors.description, sim_successors.addr, depth_str, "->".join([hex(i) for i in call_stack_suffix if i is not None]) ) l.debug("(Function %s of binary %s)", function_name, module_name) l.debug("| Call jump: %s", extra_info['is_call_jump'] if extra_info is not None else 'unknown') for suc in successors: jumpkind = suc.history.jumpkind if jumpkind == "Ijk_FakeRet": exit_type_str = "Simulated Ret" else: exit_type_str = "-" try: l.debug("| target: %#x %s [%s] %s", suc.solver.eval_one(suc.ip), successor_status[suc], exit_type_str, jumpkind) except (SimValueError, SimSolverModeError): l.debug("| target cannot be concretized. %s [%s] %s", successor_status[suc], exit_type_str, jumpkind) l.debug("%d exits remaining, %d exits pending.", len(self._job_info_queue), len(self._pending_jobs)) l.debug("%d unique basic blocks are analyzed so far.", len(self._analyzed_addrs))
[ "def", "_post_handle_job_debug", "(", "self", ",", "job", ",", "successors", ")", ":", "sim_successors", "=", "job", ".", "sim_successors", "call_stack_suffix", "=", "job", ".", "call_stack_suffix", "extra_info", "=", "job", ".", "extra_info", "successor_status", "=", "job", ".", "successor_status", "func", "=", "self", ".", "project", ".", "loader", ".", "find_symbol", "(", "job", ".", "func_addr", ")", "obj", "=", "self", ".", "project", ".", "loader", ".", "find_object_containing", "(", "job", ".", "addr", ")", "function_name", "=", "func", ".", "name", "if", "func", "is", "not", "None", "else", "None", "module_name", "=", "obj", ".", "provides", "if", "obj", "is", "not", "None", "else", "None", "node", "=", "self", ".", "model", ".", "get_node", "(", "job", ".", "block_id", ")", "depth_str", "=", "\"(D:%s)\"", "%", "node", ".", "depth", "if", "node", ".", "depth", "is", "not", "None", "else", "\"\"", "l", ".", "debug", "(", "\"%s [%#x%s | %s]\"", ",", "sim_successors", ".", "description", ",", "sim_successors", ".", "addr", ",", "depth_str", ",", "\"->\"", ".", "join", "(", "[", "hex", "(", "i", ")", "for", "i", "in", "call_stack_suffix", "if", "i", "is", "not", "None", "]", ")", ")", "l", ".", "debug", "(", "\"(Function %s of binary %s)\"", ",", "function_name", ",", "module_name", ")", "l", ".", "debug", "(", "\"| Call jump: %s\"", ",", "extra_info", "[", "'is_call_jump'", "]", "if", "extra_info", "is", "not", "None", "else", "'unknown'", ")", "for", "suc", "in", "successors", ":", "jumpkind", "=", "suc", ".", "history", ".", "jumpkind", "if", "jumpkind", "==", "\"Ijk_FakeRet\"", ":", "exit_type_str", "=", "\"Simulated Ret\"", "else", ":", "exit_type_str", "=", "\"-\"", "try", ":", "l", ".", "debug", "(", "\"| target: %#x %s [%s] %s\"", ",", "suc", ".", "solver", ".", "eval_one", "(", "suc", ".", "ip", ")", ",", "successor_status", "[", "suc", "]", ",", "exit_type_str", ",", "jumpkind", ")", "except", "(", "SimValueError", ",", "SimSolverModeError", ")", ":", "l", ".", "debug", "(", "\"| target cannot be concretized. %s [%s] %s\"", ",", "successor_status", "[", "suc", "]", ",", "exit_type_str", ",", "jumpkind", ")", "l", ".", "debug", "(", "\"%d exits remaining, %d exits pending.\"", ",", "len", "(", "self", ".", "_job_info_queue", ")", ",", "len", "(", "self", ".", "_pending_jobs", ")", ")", "l", ".", "debug", "(", "\"%d unique basic blocks are analyzed so far.\"", ",", "len", "(", "self", ".", "_analyzed_addrs", ")", ")" ]
Post job handling: print debugging information regarding the current job. :param CFGJob job: The current CFGJob instance. :param list successors: All successors of the analysis job. :return: None
[ "Post", "job", "handling", ":", "print", "debugging", "information", "regarding", "the", "current", "job", "." ]
python
train
noahbenson/neuropythy
neuropythy/commands/atlas.py
https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/commands/atlas.py#L158-L243
def calc_atlas_projections(subject_cortices, atlas_cortices, atlas_map, worklog, atlases=Ellipsis): ''' calc_atlas_projections calculates the lazy map of atlas projections. Afferent parameters: @ atlases The atlases that should be applied to the subject. This can be specified as a list/tuple of atlas names or as a string where the atlas names are separated by whitespace, commas, or semicolons. For example, to specify the 'benson14' atlas as well as the 'wang15' atlas, then ('benson14', 'wang15'), 'benson14 wang15' or 'benson14,wang15' would all be acceptable. To specify an atlas version, separate the atlas-name and the version with a colon (:), such as 'benson14:2.5'. If no version is provided, then the highest version found is used. If atlases is set to None or Ellipsis (the default), this is equivalent to 'benson14,wang15'. Efferent values: @ atlas_properties The atlas properties is a nested pimms lazy map whose key-path are like those of the atlas_map afferent parameter but which contains only those atlases requested via the atlases afferent parameter and whose deepest values are interpolated property vectors for the target subject. @ atlas_version_tags Each atlas can be specified as <atlas> or <atlas>:<version>; if the version is specified, then the version tag string (e.g., '.v1_5') is included in this dictionary; if only <atlas> was specified then this string is ''. If <atlas>: is specified, then the version string for whichever atlas was used is included. ''' # Parse the atlases argument first: if atlases is Ellipsis: atlases = ('benson14', 'wang15') if pimms.is_str(atlases): atlases = tuple(re.split(r'([,;]|\s)+', atlases)[::2]) def _atlas_to_atlver(atl): atl0 = atl if not pimms.is_vector(atl): if ':' in atl: atl = atl.split(':') if len(atl) != 2: raise ValueError('Cannot parse atlas spec: %s' % atl0) else: atl = [atl, None] if len(atl) != 2: raise ValueError('Improperly specified atlas: %s' % atl0) if pimms.is_str(atl[1]): if len(atl[1]) == 0: atl = (atl[0], None) else: if atl[1][0] == 'v': atl[1] = atl[1][1:] try: atl = (atl[0], tuple([int(x) for x in re.split(r'[-_.]+', atl[1])])) except Exception: raise ValueError('Could not parse atlas version string: %s' % atl[1]) elif pimms.is_int(atl[1]): atl = (atl[0], (atl[1],)) elif pimms.is_real(atl[1]): atl = (atl[0], (int(atl[1]), int(10*(atl[1] - int(atl[1]))),)) elif pimms.is_vector(atl[1], int): atl = (atl[0], tuple(atl[1])) elif atl[1] is not None: raise ValueError('atlas version must be a string (like "v1_5_1") or a list of ints') else: atl = tuple(atl) return atl + (atl0,) # Okay, let's find these versions of the atlases in the atlas_map... worklog('Preparing Atlases...') wl = worklog.indent() atl_props = AutoDict() avt = AutoDict() # keyfn is for sorting versions (newest version last) keyfn = lambda k:((np.inf,) if k is None else k + (np.inf,) if len(k) == 0 or k[-1] != 0 else k) for (atl,version,atl0) in [_atlas_to_atlver(atl) for atl in atlases]: if atl not in atlas_map: raise ValueError('Could not find an atlas named %s' % atl) atldat = atlas_map[atl] # if the version is None, we pick the highest of the available versions if version is None: v = sorted(atldat.keys(), key=keyfn)[-1] elif version in atldat: v = version else: raise ValueError('Could not find specific version %s of atlas %s' % (version,atl)) # update the atlas-version-tag data wl('Atlas: %s, Version: %s' % (atl, v)) avt[atl][v] = '' if v is None or ':' not in atl0 else ('.v' + '_'.join(map(str, v))) lmaps = atlas_map[atl][v] # convert these maps into interpolated properties... for (h,hmap) in six.iteritems(lmaps): hmap = pimms.lazy_map( {m:curry( lambda hmap,h,m: atlas_cortices[h].interpolate( subject_cortices[h], hmap[m]), hmap, h, m) for m in six.iterkeys(hmap)}) lmaps = lmaps.set(h, hmap) # add the lmaps (the persistent/lazy maps for this atlas version) in the atlprops atl_props[atl][v] = lmaps # That's all; we can return atl_props once we persist it return {'atlas_properties': pimms.persist(atl_props), 'atlas_version_tags': pimms.persist(avt)}
[ "def", "calc_atlas_projections", "(", "subject_cortices", ",", "atlas_cortices", ",", "atlas_map", ",", "worklog", ",", "atlases", "=", "Ellipsis", ")", ":", "# Parse the atlases argument first:", "if", "atlases", "is", "Ellipsis", ":", "atlases", "=", "(", "'benson14'", ",", "'wang15'", ")", "if", "pimms", ".", "is_str", "(", "atlases", ")", ":", "atlases", "=", "tuple", "(", "re", ".", "split", "(", "r'([,;]|\\s)+'", ",", "atlases", ")", "[", ":", ":", "2", "]", ")", "def", "_atlas_to_atlver", "(", "atl", ")", ":", "atl0", "=", "atl", "if", "not", "pimms", ".", "is_vector", "(", "atl", ")", ":", "if", "':'", "in", "atl", ":", "atl", "=", "atl", ".", "split", "(", "':'", ")", "if", "len", "(", "atl", ")", "!=", "2", ":", "raise", "ValueError", "(", "'Cannot parse atlas spec: %s'", "%", "atl0", ")", "else", ":", "atl", "=", "[", "atl", ",", "None", "]", "if", "len", "(", "atl", ")", "!=", "2", ":", "raise", "ValueError", "(", "'Improperly specified atlas: %s'", "%", "atl0", ")", "if", "pimms", ".", "is_str", "(", "atl", "[", "1", "]", ")", ":", "if", "len", "(", "atl", "[", "1", "]", ")", "==", "0", ":", "atl", "=", "(", "atl", "[", "0", "]", ",", "None", ")", "else", ":", "if", "atl", "[", "1", "]", "[", "0", "]", "==", "'v'", ":", "atl", "[", "1", "]", "=", "atl", "[", "1", "]", "[", "1", ":", "]", "try", ":", "atl", "=", "(", "atl", "[", "0", "]", ",", "tuple", "(", "[", "int", "(", "x", ")", "for", "x", "in", "re", ".", "split", "(", "r'[-_.]+'", ",", "atl", "[", "1", "]", ")", "]", ")", ")", "except", "Exception", ":", "raise", "ValueError", "(", "'Could not parse atlas version string: %s'", "%", "atl", "[", "1", "]", ")", "elif", "pimms", ".", "is_int", "(", "atl", "[", "1", "]", ")", ":", "atl", "=", "(", "atl", "[", "0", "]", ",", "(", "atl", "[", "1", "]", ",", ")", ")", "elif", "pimms", ".", "is_real", "(", "atl", "[", "1", "]", ")", ":", "atl", "=", "(", "atl", "[", "0", "]", ",", "(", "int", "(", "atl", "[", "1", "]", ")", ",", "int", "(", "10", "*", "(", "atl", "[", "1", "]", "-", "int", "(", "atl", "[", "1", "]", ")", ")", ")", ",", ")", ")", "elif", "pimms", ".", "is_vector", "(", "atl", "[", "1", "]", ",", "int", ")", ":", "atl", "=", "(", "atl", "[", "0", "]", ",", "tuple", "(", "atl", "[", "1", "]", ")", ")", "elif", "atl", "[", "1", "]", "is", "not", "None", ":", "raise", "ValueError", "(", "'atlas version must be a string (like \"v1_5_1\") or a list of ints'", ")", "else", ":", "atl", "=", "tuple", "(", "atl", ")", "return", "atl", "+", "(", "atl0", ",", ")", "# Okay, let's find these versions of the atlases in the atlas_map...", "worklog", "(", "'Preparing Atlases...'", ")", "wl", "=", "worklog", ".", "indent", "(", ")", "atl_props", "=", "AutoDict", "(", ")", "avt", "=", "AutoDict", "(", ")", "# keyfn is for sorting versions (newest version last)", "keyfn", "=", "lambda", "k", ":", "(", "(", "np", ".", "inf", ",", ")", "if", "k", "is", "None", "else", "k", "+", "(", "np", ".", "inf", ",", ")", "if", "len", "(", "k", ")", "==", "0", "or", "k", "[", "-", "1", "]", "!=", "0", "else", "k", ")", "for", "(", "atl", ",", "version", ",", "atl0", ")", "in", "[", "_atlas_to_atlver", "(", "atl", ")", "for", "atl", "in", "atlases", "]", ":", "if", "atl", "not", "in", "atlas_map", ":", "raise", "ValueError", "(", "'Could not find an atlas named %s'", "%", "atl", ")", "atldat", "=", "atlas_map", "[", "atl", "]", "# if the version is None, we pick the highest of the available versions", "if", "version", "is", "None", ":", "v", "=", "sorted", "(", "atldat", ".", "keys", "(", ")", ",", "key", "=", "keyfn", ")", "[", "-", "1", "]", "elif", "version", "in", "atldat", ":", "v", "=", "version", "else", ":", "raise", "ValueError", "(", "'Could not find specific version %s of atlas %s'", "%", "(", "version", ",", "atl", ")", ")", "# update the atlas-version-tag data", "wl", "(", "'Atlas: %s, Version: %s'", "%", "(", "atl", ",", "v", ")", ")", "avt", "[", "atl", "]", "[", "v", "]", "=", "''", "if", "v", "is", "None", "or", "':'", "not", "in", "atl0", "else", "(", "'.v'", "+", "'_'", ".", "join", "(", "map", "(", "str", ",", "v", ")", ")", ")", "lmaps", "=", "atlas_map", "[", "atl", "]", "[", "v", "]", "# convert these maps into interpolated properties...", "for", "(", "h", ",", "hmap", ")", "in", "six", ".", "iteritems", "(", "lmaps", ")", ":", "hmap", "=", "pimms", ".", "lazy_map", "(", "{", "m", ":", "curry", "(", "lambda", "hmap", ",", "h", ",", "m", ":", "atlas_cortices", "[", "h", "]", ".", "interpolate", "(", "subject_cortices", "[", "h", "]", ",", "hmap", "[", "m", "]", ")", ",", "hmap", ",", "h", ",", "m", ")", "for", "m", "in", "six", ".", "iterkeys", "(", "hmap", ")", "}", ")", "lmaps", "=", "lmaps", ".", "set", "(", "h", ",", "hmap", ")", "# add the lmaps (the persistent/lazy maps for this atlas version) in the atlprops", "atl_props", "[", "atl", "]", "[", "v", "]", "=", "lmaps", "# That's all; we can return atl_props once we persist it", "return", "{", "'atlas_properties'", ":", "pimms", ".", "persist", "(", "atl_props", ")", ",", "'atlas_version_tags'", ":", "pimms", ".", "persist", "(", "avt", ")", "}" ]
calc_atlas_projections calculates the lazy map of atlas projections. Afferent parameters: @ atlases The atlases that should be applied to the subject. This can be specified as a list/tuple of atlas names or as a string where the atlas names are separated by whitespace, commas, or semicolons. For example, to specify the 'benson14' atlas as well as the 'wang15' atlas, then ('benson14', 'wang15'), 'benson14 wang15' or 'benson14,wang15' would all be acceptable. To specify an atlas version, separate the atlas-name and the version with a colon (:), such as 'benson14:2.5'. If no version is provided, then the highest version found is used. If atlases is set to None or Ellipsis (the default), this is equivalent to 'benson14,wang15'. Efferent values: @ atlas_properties The atlas properties is a nested pimms lazy map whose key-path are like those of the atlas_map afferent parameter but which contains only those atlases requested via the atlases afferent parameter and whose deepest values are interpolated property vectors for the target subject. @ atlas_version_tags Each atlas can be specified as <atlas> or <atlas>:<version>; if the version is specified, then the version tag string (e.g., '.v1_5') is included in this dictionary; if only <atlas> was specified then this string is ''. If <atlas>: is specified, then the version string for whichever atlas was used is included.
[ "calc_atlas_projections", "calculates", "the", "lazy", "map", "of", "atlas", "projections", "." ]
python
train
aholkner/bacon
native/Vendor/FreeType/src/tools/docmaker/tohtml.py
https://github.com/aholkner/bacon/blob/edf3810dcb211942d392a8637945871399b0650d/native/Vendor/FreeType/src/tools/docmaker/tohtml.py#L245-L253
def make_html_words( self, words ): """ convert a series of simple words into some HTML text """ line = "" if words: line = html_quote( words[0] ) for w in words[1:]: line = line + " " + html_quote( w ) return line
[ "def", "make_html_words", "(", "self", ",", "words", ")", ":", "line", "=", "\"\"", "if", "words", ":", "line", "=", "html_quote", "(", "words", "[", "0", "]", ")", "for", "w", "in", "words", "[", "1", ":", "]", ":", "line", "=", "line", "+", "\" \"", "+", "html_quote", "(", "w", ")", "return", "line" ]
convert a series of simple words into some HTML text
[ "convert", "a", "series", "of", "simple", "words", "into", "some", "HTML", "text" ]
python
test
rambo/python-holviapi
holviapi/utils.py
https://github.com/rambo/python-holviapi/blob/f57f44e7b0a1030786aafd6f387114abb546bb32/holviapi/utils.py#L167-L173
def iso_reference_valid_char(c, raise_error=True): """Helper to make sure the given character is valid for a reference number""" if c in ISO_REFERENCE_VALID: return True if raise_error: raise ValueError("'%s' is not in '%s'" % (c, ISO_REFERENCE_VALID)) return False
[ "def", "iso_reference_valid_char", "(", "c", ",", "raise_error", "=", "True", ")", ":", "if", "c", "in", "ISO_REFERENCE_VALID", ":", "return", "True", "if", "raise_error", ":", "raise", "ValueError", "(", "\"'%s' is not in '%s'\"", "%", "(", "c", ",", "ISO_REFERENCE_VALID", ")", ")", "return", "False" ]
Helper to make sure the given character is valid for a reference number
[ "Helper", "to", "make", "sure", "the", "given", "character", "is", "valid", "for", "a", "reference", "number" ]
python
valid
aliyun/aliyun-odps-python-sdk
odps/df/expr/collections.py
https://github.com/aliyun/aliyun-odps-python-sdk/blob/4b0de18f5864386df6068f26f026e62f932c41e4/odps/df/expr/collections.py#L270-L363
def sample(expr, parts=None, columns=None, i=None, n=None, frac=None, replace=False, weights=None, strata=None, random_state=None): """ Sample collection. :param expr: collection :param parts: how many parts to hash :param columns: the columns to sample :param i: the part to sample out, can be a list of parts, must be from 0 to parts-1 :param n: how many rows to sample. If `strata` is specified, `n` should be a dict with values in the strata column as dictionary keys and corresponding sample size as values :param frac: how many fraction to sample. If `strata` is specified, `n` should be a dict with values in the strata column as dictionary keys and corresponding sample weight as values :param replace: whether to perform replace sampling :param weights: the column name of weights :param strata: the name of strata column :param random_state: the random seed when performing sampling :return: collection Note that n, frac, replace, weights, strata and random_state can only be used under Pandas DataFrames or XFlow. :Example: Sampling with parts: >>> df.sample(parts=1) >>> df.sample(parts=5, i=0) >>> df.sample(parts=10, columns=['name']) Sampling with fraction or weights, replacement option can be specified: >>> df.sample(n=100) >>> df.sample(frac=100) >>> df.sample(frac=100, replace=True) Sampling with weight column: >>> df.sample(n=100, weights='weight_col') >>> df.sample(n=100, weights='weight_col', replace=True) Stratified sampling. Note that currently we do not support stratified sampling with replacement. >>> df.sample(strata='category', frac={'Iris Setosa': 0.5, 'Iris Versicolour': 0.4}) """ if isinstance(expr, CollectionExpr): if n is None and frac is None and parts is None: raise ExpressionError('Either n or frac or parts should be provided') if i is not None and parts is None: raise ExpressionError('`parts` arg is required when `i` arg is specified') if len([arg for arg in (n, frac, parts) if arg is not None]) > 1: raise ExpressionError('You cannot specify `n` or `frac` or `parts` at the same time') if strata is None and n is not None and frac is not None: # strata can specify different types of strategies on different columns raise ExpressionError('You cannot specify `n` and `frac` at the same time.') if weights is not None and strata is not None: raise ExpressionError('You cannot specify `weights` and `strata` at the same time.') if strata is not None: if frac is not None and not isinstance(frac, (six.string_types, dict)): raise ExpressionError('`frac` should be a k-v string or a dictionary object.') if isinstance(frac, six.string_types): frac = str_to_kv(frac, float) if n is not None and not isinstance(n, (six.string_types, dict)): raise ExpressionError('`n` should be a k-v string or a dictionary object.') if isinstance(n, six.string_types): n = str_to_kv(n, int) for val in six.itervalues(frac or dict()): if val < 0 or val > 1: raise ExpressionError('Values in `frac` must be between 0 and 1') if n is not None and frac is not None: collides = set(six.iterkeys(n)).intersection(set(six.iterkeys(frac))) if collides: raise ExpressionError('Values in `frac` and `n` collides with each other.') else: if frac is not None and (not isinstance(frac, (six.integer_types, float)) or frac < 0 or frac > 1): raise ExpressionError('`frac` must be between 0 and 1') if parts is not None: if i is None: i = (0, ) elif isinstance(i, list): i = tuple(i) elif not isinstance(i, tuple): i = (i, ) for it in i: if it >= parts or it < 0: raise ExpressionError('`i` should be positive numbers that less than `parts`') elif hasattr(expr, '_xflow_sample'): return expr._xflow_sample(n=n, frac=frac, replace=replace, weights=weights, strata=strata, random_state=random_state) return expr.__sample(parts=parts, columns=columns, i=i, n=n, frac=frac, replace=replace, weights=weights, strata=strata, random_state=random_state)
[ "def", "sample", "(", "expr", ",", "parts", "=", "None", ",", "columns", "=", "None", ",", "i", "=", "None", ",", "n", "=", "None", ",", "frac", "=", "None", ",", "replace", "=", "False", ",", "weights", "=", "None", ",", "strata", "=", "None", ",", "random_state", "=", "None", ")", ":", "if", "isinstance", "(", "expr", ",", "CollectionExpr", ")", ":", "if", "n", "is", "None", "and", "frac", "is", "None", "and", "parts", "is", "None", ":", "raise", "ExpressionError", "(", "'Either n or frac or parts should be provided'", ")", "if", "i", "is", "not", "None", "and", "parts", "is", "None", ":", "raise", "ExpressionError", "(", "'`parts` arg is required when `i` arg is specified'", ")", "if", "len", "(", "[", "arg", "for", "arg", "in", "(", "n", ",", "frac", ",", "parts", ")", "if", "arg", "is", "not", "None", "]", ")", ">", "1", ":", "raise", "ExpressionError", "(", "'You cannot specify `n` or `frac` or `parts` at the same time'", ")", "if", "strata", "is", "None", "and", "n", "is", "not", "None", "and", "frac", "is", "not", "None", ":", "# strata can specify different types of strategies on different columns", "raise", "ExpressionError", "(", "'You cannot specify `n` and `frac` at the same time.'", ")", "if", "weights", "is", "not", "None", "and", "strata", "is", "not", "None", ":", "raise", "ExpressionError", "(", "'You cannot specify `weights` and `strata` at the same time.'", ")", "if", "strata", "is", "not", "None", ":", "if", "frac", "is", "not", "None", "and", "not", "isinstance", "(", "frac", ",", "(", "six", ".", "string_types", ",", "dict", ")", ")", ":", "raise", "ExpressionError", "(", "'`frac` should be a k-v string or a dictionary object.'", ")", "if", "isinstance", "(", "frac", ",", "six", ".", "string_types", ")", ":", "frac", "=", "str_to_kv", "(", "frac", ",", "float", ")", "if", "n", "is", "not", "None", "and", "not", "isinstance", "(", "n", ",", "(", "six", ".", "string_types", ",", "dict", ")", ")", ":", "raise", "ExpressionError", "(", "'`n` should be a k-v string or a dictionary object.'", ")", "if", "isinstance", "(", "n", ",", "six", ".", "string_types", ")", ":", "n", "=", "str_to_kv", "(", "n", ",", "int", ")", "for", "val", "in", "six", ".", "itervalues", "(", "frac", "or", "dict", "(", ")", ")", ":", "if", "val", "<", "0", "or", "val", ">", "1", ":", "raise", "ExpressionError", "(", "'Values in `frac` must be between 0 and 1'", ")", "if", "n", "is", "not", "None", "and", "frac", "is", "not", "None", ":", "collides", "=", "set", "(", "six", ".", "iterkeys", "(", "n", ")", ")", ".", "intersection", "(", "set", "(", "six", ".", "iterkeys", "(", "frac", ")", ")", ")", "if", "collides", ":", "raise", "ExpressionError", "(", "'Values in `frac` and `n` collides with each other.'", ")", "else", ":", "if", "frac", "is", "not", "None", "and", "(", "not", "isinstance", "(", "frac", ",", "(", "six", ".", "integer_types", ",", "float", ")", ")", "or", "frac", "<", "0", "or", "frac", ">", "1", ")", ":", "raise", "ExpressionError", "(", "'`frac` must be between 0 and 1'", ")", "if", "parts", "is", "not", "None", ":", "if", "i", "is", "None", ":", "i", "=", "(", "0", ",", ")", "elif", "isinstance", "(", "i", ",", "list", ")", ":", "i", "=", "tuple", "(", "i", ")", "elif", "not", "isinstance", "(", "i", ",", "tuple", ")", ":", "i", "=", "(", "i", ",", ")", "for", "it", "in", "i", ":", "if", "it", ">=", "parts", "or", "it", "<", "0", ":", "raise", "ExpressionError", "(", "'`i` should be positive numbers that less than `parts`'", ")", "elif", "hasattr", "(", "expr", ",", "'_xflow_sample'", ")", ":", "return", "expr", ".", "_xflow_sample", "(", "n", "=", "n", ",", "frac", "=", "frac", ",", "replace", "=", "replace", ",", "weights", "=", "weights", ",", "strata", "=", "strata", ",", "random_state", "=", "random_state", ")", "return", "expr", ".", "__sample", "(", "parts", "=", "parts", ",", "columns", "=", "columns", ",", "i", "=", "i", ",", "n", "=", "n", ",", "frac", "=", "frac", ",", "replace", "=", "replace", ",", "weights", "=", "weights", ",", "strata", "=", "strata", ",", "random_state", "=", "random_state", ")" ]
Sample collection. :param expr: collection :param parts: how many parts to hash :param columns: the columns to sample :param i: the part to sample out, can be a list of parts, must be from 0 to parts-1 :param n: how many rows to sample. If `strata` is specified, `n` should be a dict with values in the strata column as dictionary keys and corresponding sample size as values :param frac: how many fraction to sample. If `strata` is specified, `n` should be a dict with values in the strata column as dictionary keys and corresponding sample weight as values :param replace: whether to perform replace sampling :param weights: the column name of weights :param strata: the name of strata column :param random_state: the random seed when performing sampling :return: collection Note that n, frac, replace, weights, strata and random_state can only be used under Pandas DataFrames or XFlow. :Example: Sampling with parts: >>> df.sample(parts=1) >>> df.sample(parts=5, i=0) >>> df.sample(parts=10, columns=['name']) Sampling with fraction or weights, replacement option can be specified: >>> df.sample(n=100) >>> df.sample(frac=100) >>> df.sample(frac=100, replace=True) Sampling with weight column: >>> df.sample(n=100, weights='weight_col') >>> df.sample(n=100, weights='weight_col', replace=True) Stratified sampling. Note that currently we do not support stratified sampling with replacement. >>> df.sample(strata='category', frac={'Iris Setosa': 0.5, 'Iris Versicolour': 0.4})
[ "Sample", "collection", "." ]
python
train
openid/JWTConnect-Python-OidcService
src/oidcservice/state_interface.py
https://github.com/openid/JWTConnect-Python-OidcService/blob/759ab7adef30a7e3b9d75475e2971433b9613788/src/oidcservice/state_interface.py#L103-L118
def get_item(self, item_cls, item_type, key): """ Get a piece of information (a request or a response) from the state database. :param item_cls: The :py:class:`oidcmsg.message.Message` subclass that described the item. :param item_type: Which request/response that is wanted :param key: The key to the information in the state database :return: A :py:class:`oidcmsg.message.Message` instance """ _state = self.get_state(key) try: return item_cls(**_state[item_type]) except TypeError: return item_cls().from_json(_state[item_type])
[ "def", "get_item", "(", "self", ",", "item_cls", ",", "item_type", ",", "key", ")", ":", "_state", "=", "self", ".", "get_state", "(", "key", ")", "try", ":", "return", "item_cls", "(", "*", "*", "_state", "[", "item_type", "]", ")", "except", "TypeError", ":", "return", "item_cls", "(", ")", ".", "from_json", "(", "_state", "[", "item_type", "]", ")" ]
Get a piece of information (a request or a response) from the state database. :param item_cls: The :py:class:`oidcmsg.message.Message` subclass that described the item. :param item_type: Which request/response that is wanted :param key: The key to the information in the state database :return: A :py:class:`oidcmsg.message.Message` instance
[ "Get", "a", "piece", "of", "information", "(", "a", "request", "or", "a", "response", ")", "from", "the", "state", "database", "." ]
python
train
wilson-eft/wilson
wilson/run/smeft/classes.py
https://github.com/wilson-eft/wilson/blob/4164f55ff663d4f668c6e2b4575fd41562662cc9/wilson/run/smeft/classes.py#L197-L214
def run(self, scale, accuracy='integrate', **kwargs): """Return the Wilson coefficients (as wcxf.WC instance) evolved to the scale `scale`. Parameters: - `scale`: scale in GeV - accuracy: whether to use the numerical solution to the RGE ('integrate', the default, slow but precise) or the leading logarithmic approximation ('leadinglog', approximate but much faster). """ if accuracy == 'integrate': C_out = self._rgevolve(scale, **kwargs) elif accuracy == 'leadinglog': C_out = self._rgevolve_leadinglog(scale) else: raise ValueError("'{}' is not a valid value of 'accuracy' (must be either 'integrate' or 'leadinglog').".format(accuracy)) return self._to_wcxf(C_out, scale)
[ "def", "run", "(", "self", ",", "scale", ",", "accuracy", "=", "'integrate'", ",", "*", "*", "kwargs", ")", ":", "if", "accuracy", "==", "'integrate'", ":", "C_out", "=", "self", ".", "_rgevolve", "(", "scale", ",", "*", "*", "kwargs", ")", "elif", "accuracy", "==", "'leadinglog'", ":", "C_out", "=", "self", ".", "_rgevolve_leadinglog", "(", "scale", ")", "else", ":", "raise", "ValueError", "(", "\"'{}' is not a valid value of 'accuracy' (must be either 'integrate' or 'leadinglog').\"", ".", "format", "(", "accuracy", ")", ")", "return", "self", ".", "_to_wcxf", "(", "C_out", ",", "scale", ")" ]
Return the Wilson coefficients (as wcxf.WC instance) evolved to the scale `scale`. Parameters: - `scale`: scale in GeV - accuracy: whether to use the numerical solution to the RGE ('integrate', the default, slow but precise) or the leading logarithmic approximation ('leadinglog', approximate but much faster).
[ "Return", "the", "Wilson", "coefficients", "(", "as", "wcxf", ".", "WC", "instance", ")", "evolved", "to", "the", "scale", "scale", "." ]
python
train
spacetelescope/pysynphot
pysynphot/binning.py
https://github.com/spacetelescope/pysynphot/blob/a125ff956f4d94beb157bd51899747a13234bb97/pysynphot/binning.py#L7-L42
def calculate_bin_edges(centers): """ Calculate the edges of wavelength bins given the centers. The algorithm calculates bin edges as the midpoints between bin centers and treats the first and last bins as symmetric about their centers. Parameters ---------- centers : array_like Sequence of bin centers. Must be 1D and have at least two values. Returns ------- edges : ndarray Array of bin edges. Will be 1D and have one more value than ``centers``. """ centers = np.asanyarray(centers) if centers.ndim != 1: raise ValueError('centers input array must be 1D.') if centers.size < 2: raise ValueError('centers input must have at least two values.') edges = np.empty(centers.size + 1) edges[1:-1] = (centers[1:] + centers[:-1]) / 2. #compute the first and last by making them symmetric edges[0] = centers[0] - (edges[1] - centers[0]) edges[-1] = centers[-1] + (centers[-1] - edges[-2]) return edges
[ "def", "calculate_bin_edges", "(", "centers", ")", ":", "centers", "=", "np", ".", "asanyarray", "(", "centers", ")", "if", "centers", ".", "ndim", "!=", "1", ":", "raise", "ValueError", "(", "'centers input array must be 1D.'", ")", "if", "centers", ".", "size", "<", "2", ":", "raise", "ValueError", "(", "'centers input must have at least two values.'", ")", "edges", "=", "np", ".", "empty", "(", "centers", ".", "size", "+", "1", ")", "edges", "[", "1", ":", "-", "1", "]", "=", "(", "centers", "[", "1", ":", "]", "+", "centers", "[", ":", "-", "1", "]", ")", "/", "2.", "#compute the first and last by making them symmetric", "edges", "[", "0", "]", "=", "centers", "[", "0", "]", "-", "(", "edges", "[", "1", "]", "-", "centers", "[", "0", "]", ")", "edges", "[", "-", "1", "]", "=", "centers", "[", "-", "1", "]", "+", "(", "centers", "[", "-", "1", "]", "-", "edges", "[", "-", "2", "]", ")", "return", "edges" ]
Calculate the edges of wavelength bins given the centers. The algorithm calculates bin edges as the midpoints between bin centers and treats the first and last bins as symmetric about their centers. Parameters ---------- centers : array_like Sequence of bin centers. Must be 1D and have at least two values. Returns ------- edges : ndarray Array of bin edges. Will be 1D and have one more value than ``centers``.
[ "Calculate", "the", "edges", "of", "wavelength", "bins", "given", "the", "centers", "." ]
python
train
KnightConan/sspdatatables
src/sspdatatables/utils/enum.py
https://github.com/KnightConan/sspdatatables/blob/1179a11358734e5e472e5eee703e8d34fa49e9bf/src/sspdatatables/utils/enum.py#L135-L157
def describe(cls) -> None: """ Prints in the console a table showing all the attributes for all the definitions inside the class :return: None """ max_lengths = [] for attr_name in cls.attr_names(): attr_func = "%ss" % attr_name attr_list = list(map(str, getattr(cls, attr_func)())) + [attr_name] max_lengths.append(max(list(map(len, attr_list)))) row_format = "{:>%d} | {:>%d} | {:>%d}" % tuple(max_lengths) headers = [attr_name.capitalize() for attr_name in cls.attr_names()] header_line = row_format.format(*headers) output = "Class: %s\n" % cls.__name__ output += header_line + "\n" output += "-"*(len(header_line)) + "\n" for item in cls: format_list = [str(getattr(item, attr_name)) for attr_name in cls.attr_names()] output += row_format.format(*format_list) + "\n" print(output)
[ "def", "describe", "(", "cls", ")", "->", "None", ":", "max_lengths", "=", "[", "]", "for", "attr_name", "in", "cls", ".", "attr_names", "(", ")", ":", "attr_func", "=", "\"%ss\"", "%", "attr_name", "attr_list", "=", "list", "(", "map", "(", "str", ",", "getattr", "(", "cls", ",", "attr_func", ")", "(", ")", ")", ")", "+", "[", "attr_name", "]", "max_lengths", ".", "append", "(", "max", "(", "list", "(", "map", "(", "len", ",", "attr_list", ")", ")", ")", ")", "row_format", "=", "\"{:>%d} | {:>%d} | {:>%d}\"", "%", "tuple", "(", "max_lengths", ")", "headers", "=", "[", "attr_name", ".", "capitalize", "(", ")", "for", "attr_name", "in", "cls", ".", "attr_names", "(", ")", "]", "header_line", "=", "row_format", ".", "format", "(", "*", "headers", ")", "output", "=", "\"Class: %s\\n\"", "%", "cls", ".", "__name__", "output", "+=", "header_line", "+", "\"\\n\"", "output", "+=", "\"-\"", "*", "(", "len", "(", "header_line", ")", ")", "+", "\"\\n\"", "for", "item", "in", "cls", ":", "format_list", "=", "[", "str", "(", "getattr", "(", "item", ",", "attr_name", ")", ")", "for", "attr_name", "in", "cls", ".", "attr_names", "(", ")", "]", "output", "+=", "row_format", ".", "format", "(", "*", "format_list", ")", "+", "\"\\n\"", "print", "(", "output", ")" ]
Prints in the console a table showing all the attributes for all the definitions inside the class :return: None
[ "Prints", "in", "the", "console", "a", "table", "showing", "all", "the", "attributes", "for", "all", "the", "definitions", "inside", "the", "class" ]
python
train
quantumlib/Cirq
cirq/circuits/text_diagram_drawer.py
https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/circuits/text_diagram_drawer.py#L144-L155
def transpose(self) -> 'TextDiagramDrawer': """Returns the same diagram, but mirrored across its diagonal.""" out = TextDiagramDrawer() out.entries = {(y, x): _DiagramText(v.transposed_text, v.text) for (x, y), v in self.entries.items()} out.vertical_lines = [_VerticalLine(*e) for e in self.horizontal_lines] out.horizontal_lines = [_HorizontalLine(*e) for e in self.vertical_lines] out.vertical_padding = self.horizontal_padding.copy() out.horizontal_padding = self.vertical_padding.copy() return out
[ "def", "transpose", "(", "self", ")", "->", "'TextDiagramDrawer'", ":", "out", "=", "TextDiagramDrawer", "(", ")", "out", ".", "entries", "=", "{", "(", "y", ",", "x", ")", ":", "_DiagramText", "(", "v", ".", "transposed_text", ",", "v", ".", "text", ")", "for", "(", "x", ",", "y", ")", ",", "v", "in", "self", ".", "entries", ".", "items", "(", ")", "}", "out", ".", "vertical_lines", "=", "[", "_VerticalLine", "(", "*", "e", ")", "for", "e", "in", "self", ".", "horizontal_lines", "]", "out", ".", "horizontal_lines", "=", "[", "_HorizontalLine", "(", "*", "e", ")", "for", "e", "in", "self", ".", "vertical_lines", "]", "out", ".", "vertical_padding", "=", "self", ".", "horizontal_padding", ".", "copy", "(", ")", "out", ".", "horizontal_padding", "=", "self", ".", "vertical_padding", ".", "copy", "(", ")", "return", "out" ]
Returns the same diagram, but mirrored across its diagonal.
[ "Returns", "the", "same", "diagram", "but", "mirrored", "across", "its", "diagonal", "." ]
python
train
helium/helium-python
helium/timeseries.py
https://github.com/helium/helium-python/blob/db73480b143da4fc48e95c4414bd69c576a3a390/helium/timeseries.py#L228-L269
def create(self, port, value, timestamp=None): """Post a new reading to a timeseries. A reading is comprised of a `port`, a `value` and a timestamp. A port is like a tag for the given reading and gives an indication of the meaning of the value. The value of the reading can be any valid json value. The timestamp is considered the time the reading was taken, as opposed to the `created` time of the data-point which represents when the data-point was stored in the Helium API. If the timestamp is not given the server will construct a timestemp upon receiving the new reading. Args: port(string): The port to use for the new data-point value: The value for the new data-point Keyword Args: timestamp(:class:`datetime`): An optional :class:`datetime` object """ session = self._session datapoint_class = self._datapoint_class attributes = { 'port': port, 'value': value, } if timestamp is not None: attributes['timestamp'] = to_iso_date(timestamp) attributes = build_request_body('data-point', None, attributes=attributes) def _process(json): data = json.get('data') return datapoint_class(data, session) return session.post(self._base_url, CB.json(201, _process), json=attributes)
[ "def", "create", "(", "self", ",", "port", ",", "value", ",", "timestamp", "=", "None", ")", ":", "session", "=", "self", ".", "_session", "datapoint_class", "=", "self", ".", "_datapoint_class", "attributes", "=", "{", "'port'", ":", "port", ",", "'value'", ":", "value", ",", "}", "if", "timestamp", "is", "not", "None", ":", "attributes", "[", "'timestamp'", "]", "=", "to_iso_date", "(", "timestamp", ")", "attributes", "=", "build_request_body", "(", "'data-point'", ",", "None", ",", "attributes", "=", "attributes", ")", "def", "_process", "(", "json", ")", ":", "data", "=", "json", ".", "get", "(", "'data'", ")", "return", "datapoint_class", "(", "data", ",", "session", ")", "return", "session", ".", "post", "(", "self", ".", "_base_url", ",", "CB", ".", "json", "(", "201", ",", "_process", ")", ",", "json", "=", "attributes", ")" ]
Post a new reading to a timeseries. A reading is comprised of a `port`, a `value` and a timestamp. A port is like a tag for the given reading and gives an indication of the meaning of the value. The value of the reading can be any valid json value. The timestamp is considered the time the reading was taken, as opposed to the `created` time of the data-point which represents when the data-point was stored in the Helium API. If the timestamp is not given the server will construct a timestemp upon receiving the new reading. Args: port(string): The port to use for the new data-point value: The value for the new data-point Keyword Args: timestamp(:class:`datetime`): An optional :class:`datetime` object
[ "Post", "a", "new", "reading", "to", "a", "timeseries", "." ]
python
train
Diviyan-Kalainathan/CausalDiscoveryToolbox
cdt/independence/graph/model.py
https://github.com/Diviyan-Kalainathan/CausalDiscoveryToolbox/blob/be228b078ba9eb76c01b3ccba9a1c0ad9e9e5ed1/cdt/independence/graph/model.py#L83-L100
def run_feature_selection(self, df_data, target, idx=0, **kwargs): """Run feature selection for one node: wrapper around ``self.predict_features``. Args: df_data (pandas.DataFrame): All the observational data target (str): Name of the target variable idx (int): (optional) For printing purposes Returns: list: scores of each feature relatively to the target """ list_features = list(df_data.columns.values) list_features.remove(target) df_target = pd.DataFrame(df_data[target], columns=[target]) df_features = df_data[list_features] return self.predict_features(df_features, df_target, idx=idx, **kwargs)
[ "def", "run_feature_selection", "(", "self", ",", "df_data", ",", "target", ",", "idx", "=", "0", ",", "*", "*", "kwargs", ")", ":", "list_features", "=", "list", "(", "df_data", ".", "columns", ".", "values", ")", "list_features", ".", "remove", "(", "target", ")", "df_target", "=", "pd", ".", "DataFrame", "(", "df_data", "[", "target", "]", ",", "columns", "=", "[", "target", "]", ")", "df_features", "=", "df_data", "[", "list_features", "]", "return", "self", ".", "predict_features", "(", "df_features", ",", "df_target", ",", "idx", "=", "idx", ",", "*", "*", "kwargs", ")" ]
Run feature selection for one node: wrapper around ``self.predict_features``. Args: df_data (pandas.DataFrame): All the observational data target (str): Name of the target variable idx (int): (optional) For printing purposes Returns: list: scores of each feature relatively to the target
[ "Run", "feature", "selection", "for", "one", "node", ":", "wrapper", "around", "self", ".", "predict_features", "." ]
python
valid
python-tap/tappy
tap/main.py
https://github.com/python-tap/tappy/blob/79a749313c61ea94ee49d67ba6a1534974bc03aa/tap/main.py#L21-L28
def build_suite(args): """Build a test suite by loading TAP files or a TAP stream.""" loader = Loader() if len(args.files) == 0 or args.files[0] == "-": suite = loader.load_suite_from_stdin() else: suite = loader.load(args.files) return suite
[ "def", "build_suite", "(", "args", ")", ":", "loader", "=", "Loader", "(", ")", "if", "len", "(", "args", ".", "files", ")", "==", "0", "or", "args", ".", "files", "[", "0", "]", "==", "\"-\"", ":", "suite", "=", "loader", ".", "load_suite_from_stdin", "(", ")", "else", ":", "suite", "=", "loader", ".", "load", "(", "args", ".", "files", ")", "return", "suite" ]
Build a test suite by loading TAP files or a TAP stream.
[ "Build", "a", "test", "suite", "by", "loading", "TAP", "files", "or", "a", "TAP", "stream", "." ]
python
train
user-cont/conu
conu/utils/filesystem.py
https://github.com/user-cont/conu/blob/08caae7bb6bdd265b55bb106c3da6a7946a5a352/conu/utils/filesystem.py#L177-L190
def _set_ownership(self): """ set ownership of the directory: user and group :return: None """ if self.owner or self.group: args = ( self.path, self.owner if self.owner else -1, self.group if self.group else -1, ) logger.debug("changing ownership bits of %s to %s", self.path, args) os.chown(*args)
[ "def", "_set_ownership", "(", "self", ")", ":", "if", "self", ".", "owner", "or", "self", ".", "group", ":", "args", "=", "(", "self", ".", "path", ",", "self", ".", "owner", "if", "self", ".", "owner", "else", "-", "1", ",", "self", ".", "group", "if", "self", ".", "group", "else", "-", "1", ",", ")", "logger", ".", "debug", "(", "\"changing ownership bits of %s to %s\"", ",", "self", ".", "path", ",", "args", ")", "os", ".", "chown", "(", "*", "args", ")" ]
set ownership of the directory: user and group :return: None
[ "set", "ownership", "of", "the", "directory", ":", "user", "and", "group" ]
python
train
polyaxon/polyaxon-schemas
polyaxon_schemas/specs/group.py
https://github.com/polyaxon/polyaxon-schemas/blob/a5360240316f4bbccfcdcb97a489cab14458277a/polyaxon_schemas/specs/group.py#L75-L80
def get_experiment_spec(self, matrix_declaration): """Returns an experiment spec for this group spec and the given matrix declaration.""" parsed_data = Parser.parse(self, self._data, matrix_declaration) del parsed_data[self.HP_TUNING] validator.validate(spec=self, data=parsed_data) return ExperimentSpecification(values=[parsed_data, {'kind': self._EXPERIMENT}])
[ "def", "get_experiment_spec", "(", "self", ",", "matrix_declaration", ")", ":", "parsed_data", "=", "Parser", ".", "parse", "(", "self", ",", "self", ".", "_data", ",", "matrix_declaration", ")", "del", "parsed_data", "[", "self", ".", "HP_TUNING", "]", "validator", ".", "validate", "(", "spec", "=", "self", ",", "data", "=", "parsed_data", ")", "return", "ExperimentSpecification", "(", "values", "=", "[", "parsed_data", ",", "{", "'kind'", ":", "self", ".", "_EXPERIMENT", "}", "]", ")" ]
Returns an experiment spec for this group spec and the given matrix declaration.
[ "Returns", "an", "experiment", "spec", "for", "this", "group", "spec", "and", "the", "given", "matrix", "declaration", "." ]
python
train
pipermerriam/flex
flex/core.py
https://github.com/pipermerriam/flex/blob/233f8149fb851a6255753bcec948cb6fefb2723b/flex/core.py#L121-L138
def validate_api_response(schema, raw_response, request_method='get', raw_request=None): """ Validate the response of an api call against a swagger schema. """ request = None if raw_request is not None: request = normalize_request(raw_request) response = None if raw_response is not None: response = normalize_response(raw_response, request=request) if response is not None: validate_response( response=response, request_method=request_method, schema=schema )
[ "def", "validate_api_response", "(", "schema", ",", "raw_response", ",", "request_method", "=", "'get'", ",", "raw_request", "=", "None", ")", ":", "request", "=", "None", "if", "raw_request", "is", "not", "None", ":", "request", "=", "normalize_request", "(", "raw_request", ")", "response", "=", "None", "if", "raw_response", "is", "not", "None", ":", "response", "=", "normalize_response", "(", "raw_response", ",", "request", "=", "request", ")", "if", "response", "is", "not", "None", ":", "validate_response", "(", "response", "=", "response", ",", "request_method", "=", "request_method", ",", "schema", "=", "schema", ")" ]
Validate the response of an api call against a swagger schema.
[ "Validate", "the", "response", "of", "an", "api", "call", "against", "a", "swagger", "schema", "." ]
python
train
materialsproject/pymatgen-db
matgendb/dbgroup.py
https://github.com/materialsproject/pymatgen-db/blob/02e4351c2cea431407644f49193e8bf43ed39b9a/matgendb/dbgroup.py#L110-L126
def _expand(self, name): """Perform real work of `expand()` function.""" cfg = self._d[name] if cfg.collection is None: base_coll = '' else: base_coll = cfg.collection + self.SEP qe = self._get_qe(name, cfg) coll, db = qe.collection, qe.db cur_coll = coll.name for coll_name in db.collection_names(): if coll_name == cur_coll or not coll_name.startswith(base_coll): continue ex_cfg = cfg.copy() ex_cfg.collection = coll_name group_name = name + self.SEP + coll_name[len(base_coll):] self.add(group_name, ex_cfg, expand=False)
[ "def", "_expand", "(", "self", ",", "name", ")", ":", "cfg", "=", "self", ".", "_d", "[", "name", "]", "if", "cfg", ".", "collection", "is", "None", ":", "base_coll", "=", "''", "else", ":", "base_coll", "=", "cfg", ".", "collection", "+", "self", ".", "SEP", "qe", "=", "self", ".", "_get_qe", "(", "name", ",", "cfg", ")", "coll", ",", "db", "=", "qe", ".", "collection", ",", "qe", ".", "db", "cur_coll", "=", "coll", ".", "name", "for", "coll_name", "in", "db", ".", "collection_names", "(", ")", ":", "if", "coll_name", "==", "cur_coll", "or", "not", "coll_name", ".", "startswith", "(", "base_coll", ")", ":", "continue", "ex_cfg", "=", "cfg", ".", "copy", "(", ")", "ex_cfg", ".", "collection", "=", "coll_name", "group_name", "=", "name", "+", "self", ".", "SEP", "+", "coll_name", "[", "len", "(", "base_coll", ")", ":", "]", "self", ".", "add", "(", "group_name", ",", "ex_cfg", ",", "expand", "=", "False", ")" ]
Perform real work of `expand()` function.
[ "Perform", "real", "work", "of", "expand", "()", "function", "." ]
python
train
maas/python-libmaas
maas/client/viscera/nodes.py
https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/viscera/nodes.py#L93-L101
def as_machine(self): """Convert to a `Machine` object. `node_type` must be `NodeType.MACHINE`. """ if self.node_type != NodeType.MACHINE: raise ValueError( 'Cannot convert to `Machine`, node_type is not a machine.') return self._origin.Machine(self._data)
[ "def", "as_machine", "(", "self", ")", ":", "if", "self", ".", "node_type", "!=", "NodeType", ".", "MACHINE", ":", "raise", "ValueError", "(", "'Cannot convert to `Machine`, node_type is not a machine.'", ")", "return", "self", ".", "_origin", ".", "Machine", "(", "self", ".", "_data", ")" ]
Convert to a `Machine` object. `node_type` must be `NodeType.MACHINE`.
[ "Convert", "to", "a", "Machine", "object", "." ]
python
train
chainer/chainerui
chainerui/views/project.py
https://github.com/chainer/chainerui/blob/87ad25e875bc332bfdad20197fd3d0cb81a078e8/chainerui/views/project.py#L73-L95
def put(self, id): """put.""" project = db.session.query(Project).filter_by(id=id).first() if project is None: return jsonify({ 'project': None, 'message': 'No interface defined for URL.' }), 404 request_project = request.get_json().get('project') project_name = request_project.get('name', None) if project_name is not None: project.name = project_name db.session.add(project) db.session.commit() return jsonify({ 'project': project.serialize })
[ "def", "put", "(", "self", ",", "id", ")", ":", "project", "=", "db", ".", "session", ".", "query", "(", "Project", ")", ".", "filter_by", "(", "id", "=", "id", ")", ".", "first", "(", ")", "if", "project", "is", "None", ":", "return", "jsonify", "(", "{", "'project'", ":", "None", ",", "'message'", ":", "'No interface defined for URL.'", "}", ")", ",", "404", "request_project", "=", "request", ".", "get_json", "(", ")", ".", "get", "(", "'project'", ")", "project_name", "=", "request_project", ".", "get", "(", "'name'", ",", "None", ")", "if", "project_name", "is", "not", "None", ":", "project", ".", "name", "=", "project_name", "db", ".", "session", ".", "add", "(", "project", ")", "db", ".", "session", ".", "commit", "(", ")", "return", "jsonify", "(", "{", "'project'", ":", "project", ".", "serialize", "}", ")" ]
put.
[ "put", "." ]
python
train
Kortemme-Lab/pull_into_place
pull_into_place/pipeline.py
https://github.com/Kortemme-Lab/pull_into_place/blob/247f303100a612cc90cf31c86e4fe5052eb28c8d/pull_into_place/pipeline.py#L811-L842
def root_from_dir(directory, recurse=True): """ Similar to workspace_from_dir, but this returns the root directory of a workspace rather than a workspace object. """ directory = os.path.abspath(directory) pickle_path = os.path.join(directory, 'workspace.pkl') # Make sure the given directory contains a 'workspace' file. This file is # needed to instantiate the right kind of workspace. if not os.path.exists(pickle_path): if recurse: parent_dir = os.path.dirname(directory) # Keep looking for a workspace as long as we haven't hit the root # of the file system. If an exception is raised, that means no # workspace was found. Catch and re-raise the exception so that # the name of the directory reported in the exception is meaningful # to the user. try: return root_from_dir(parent_dir, parent_dir != '/') except WorkspaceNotFound: raise WorkspaceNotFound(directory) else: raise WorkspaceNotFound(directory) # Return the directory in which the pkl file was found. return pickle_path[:-len('workspace.pkl')]
[ "def", "root_from_dir", "(", "directory", ",", "recurse", "=", "True", ")", ":", "directory", "=", "os", ".", "path", ".", "abspath", "(", "directory", ")", "pickle_path", "=", "os", ".", "path", ".", "join", "(", "directory", ",", "'workspace.pkl'", ")", "# Make sure the given directory contains a 'workspace' file. This file is", "# needed to instantiate the right kind of workspace.", "if", "not", "os", ".", "path", ".", "exists", "(", "pickle_path", ")", ":", "if", "recurse", ":", "parent_dir", "=", "os", ".", "path", ".", "dirname", "(", "directory", ")", "# Keep looking for a workspace as long as we haven't hit the root", "# of the file system. If an exception is raised, that means no", "# workspace was found. Catch and re-raise the exception so that", "# the name of the directory reported in the exception is meaningful", "# to the user.", "try", ":", "return", "root_from_dir", "(", "parent_dir", ",", "parent_dir", "!=", "'/'", ")", "except", "WorkspaceNotFound", ":", "raise", "WorkspaceNotFound", "(", "directory", ")", "else", ":", "raise", "WorkspaceNotFound", "(", "directory", ")", "# Return the directory in which the pkl file was found.", "return", "pickle_path", "[", ":", "-", "len", "(", "'workspace.pkl'", ")", "]" ]
Similar to workspace_from_dir, but this returns the root directory of a workspace rather than a workspace object.
[ "Similar", "to", "workspace_from_dir", "but", "this", "returns", "the", "root", "directory", "of", "a", "workspace", "rather", "than", "a", "workspace", "object", "." ]
python
train
NoneGG/aredis
aredis/lock.py
https://github.com/NoneGG/aredis/blob/204caad740ac13e5760d46444a2ba7632982a046/aredis/lock.py#L309-L350
async def acquire(self, blocking=None, blocking_timeout=None): """ Use Redis to hold a shared, distributed lock named ``name``. Returns True once the lock is acquired. If ``blocking`` is False, always return immediately. If the lock was acquired, return True, otherwise return False. ``blocking_timeout`` specifies the maximum number of seconds to wait trying to acquire the lock. It should not be greater than expire time of the lock """ sleep = self.sleep token = b(uuid.uuid1().hex) if blocking is None: blocking = self.blocking if blocking_timeout is None: blocking_timeout = self.blocking_timeout blocking_timeout = blocking_timeout or self.timeout stop_trying_at = mod_time.time() + min(blocking_timeout, self.timeout) while True: if await self.do_acquire(token): lock_acquired_at = mod_time.time() if await self.check_lock_in_slaves(token): check_finished_at = mod_time.time() # if time expends on acquiring lock is greater than given time # the lock should be released manually if check_finished_at > stop_trying_at: await self.do_release(token) return False self.local.token = token # validity time is considered to be the # initial validity time minus the time elapsed during check await self.do_extend(lock_acquired_at - check_finished_at) return True else: await self.do_release(token) return False if not blocking or mod_time.time() > stop_trying_at: return False await asyncio.sleep(sleep, loop=self.redis.connection_pool.loop)
[ "async", "def", "acquire", "(", "self", ",", "blocking", "=", "None", ",", "blocking_timeout", "=", "None", ")", ":", "sleep", "=", "self", ".", "sleep", "token", "=", "b", "(", "uuid", ".", "uuid1", "(", ")", ".", "hex", ")", "if", "blocking", "is", "None", ":", "blocking", "=", "self", ".", "blocking", "if", "blocking_timeout", "is", "None", ":", "blocking_timeout", "=", "self", ".", "blocking_timeout", "blocking_timeout", "=", "blocking_timeout", "or", "self", ".", "timeout", "stop_trying_at", "=", "mod_time", ".", "time", "(", ")", "+", "min", "(", "blocking_timeout", ",", "self", ".", "timeout", ")", "while", "True", ":", "if", "await", "self", ".", "do_acquire", "(", "token", ")", ":", "lock_acquired_at", "=", "mod_time", ".", "time", "(", ")", "if", "await", "self", ".", "check_lock_in_slaves", "(", "token", ")", ":", "check_finished_at", "=", "mod_time", ".", "time", "(", ")", "# if time expends on acquiring lock is greater than given time", "# the lock should be released manually", "if", "check_finished_at", ">", "stop_trying_at", ":", "await", "self", ".", "do_release", "(", "token", ")", "return", "False", "self", ".", "local", ".", "token", "=", "token", "# validity time is considered to be the", "# initial validity time minus the time elapsed during check", "await", "self", ".", "do_extend", "(", "lock_acquired_at", "-", "check_finished_at", ")", "return", "True", "else", ":", "await", "self", ".", "do_release", "(", "token", ")", "return", "False", "if", "not", "blocking", "or", "mod_time", ".", "time", "(", ")", ">", "stop_trying_at", ":", "return", "False", "await", "asyncio", ".", "sleep", "(", "sleep", ",", "loop", "=", "self", ".", "redis", ".", "connection_pool", ".", "loop", ")" ]
Use Redis to hold a shared, distributed lock named ``name``. Returns True once the lock is acquired. If ``blocking`` is False, always return immediately. If the lock was acquired, return True, otherwise return False. ``blocking_timeout`` specifies the maximum number of seconds to wait trying to acquire the lock. It should not be greater than expire time of the lock
[ "Use", "Redis", "to", "hold", "a", "shared", "distributed", "lock", "named", "name", ".", "Returns", "True", "once", "the", "lock", "is", "acquired", "." ]
python
train
eight04/pyAPNG
apng/__init__.py
https://github.com/eight04/pyAPNG/blob/b4d2927f7892a1de967b5cf57d434ed65f6a017e/apng/__init__.py#L208-L224
def open_any(cls, file): """Open an image file. If the image is not PNG format, it would convert the image into PNG with Pillow module. If the module is not installed, :class:`ImportError` would be raised. :arg file: Input file. :type file: path-like or file-like :rtype: :class:`PNG` """ with open_file(file, "rb") as f: header = f.read(8) f.seek(0) if header != PNG_SIGN: b = file_to_png(f) else: b = f.read() return cls.from_bytes(b)
[ "def", "open_any", "(", "cls", ",", "file", ")", ":", "with", "open_file", "(", "file", ",", "\"rb\"", ")", "as", "f", ":", "header", "=", "f", ".", "read", "(", "8", ")", "f", ".", "seek", "(", "0", ")", "if", "header", "!=", "PNG_SIGN", ":", "b", "=", "file_to_png", "(", "f", ")", "else", ":", "b", "=", "f", ".", "read", "(", ")", "return", "cls", ".", "from_bytes", "(", "b", ")" ]
Open an image file. If the image is not PNG format, it would convert the image into PNG with Pillow module. If the module is not installed, :class:`ImportError` would be raised. :arg file: Input file. :type file: path-like or file-like :rtype: :class:`PNG`
[ "Open", "an", "image", "file", ".", "If", "the", "image", "is", "not", "PNG", "format", "it", "would", "convert", "the", "image", "into", "PNG", "with", "Pillow", "module", ".", "If", "the", "module", "is", "not", "installed", ":", "class", ":", "ImportError", "would", "be", "raised", ".", ":", "arg", "file", ":", "Input", "file", ".", ":", "type", "file", ":", "path", "-", "like", "or", "file", "-", "like", ":", "rtype", ":", ":", "class", ":", "PNG" ]
python
train
willkg/everett
everett/ext/yamlfile.py
https://github.com/willkg/everett/blob/5653134af59f439d2b33f3939fab2b8544428f11/everett/ext/yamlfile.py#L159-L166
def get(self, key, namespace=None): """Retrieve value for key.""" if not self.path: return NO_VALUE logger.debug('Searching %r for key: %s, namepsace: %s', self, key, namespace) full_key = generate_uppercase_key(key, namespace) return get_key_from_envs(self.cfg, full_key)
[ "def", "get", "(", "self", ",", "key", ",", "namespace", "=", "None", ")", ":", "if", "not", "self", ".", "path", ":", "return", "NO_VALUE", "logger", ".", "debug", "(", "'Searching %r for key: %s, namepsace: %s'", ",", "self", ",", "key", ",", "namespace", ")", "full_key", "=", "generate_uppercase_key", "(", "key", ",", "namespace", ")", "return", "get_key_from_envs", "(", "self", ".", "cfg", ",", "full_key", ")" ]
Retrieve value for key.
[ "Retrieve", "value", "for", "key", "." ]
python
train
MisterWil/skybellpy
skybellpy/__init__.py
https://github.com/MisterWil/skybellpy/blob/ac966d9f590cda7654f6de7eecc94e2103459eef/skybellpy/__init__.py#L229-L241
def _load_cache(self): """Load existing cache and merge for updating if required.""" if not self._disable_cache: if os.path.exists(self._cache_path): _LOGGER.debug("Cache found at: %s", self._cache_path) if os.path.getsize(self._cache_path) > 0: loaded_cache = UTILS.load_cache(self._cache_path) UTILS.update(self._cache, loaded_cache) else: _LOGGER.debug("Cache file is empty. Removing it.") os.remove(self._cache_path) self._save_cache()
[ "def", "_load_cache", "(", "self", ")", ":", "if", "not", "self", ".", "_disable_cache", ":", "if", "os", ".", "path", ".", "exists", "(", "self", ".", "_cache_path", ")", ":", "_LOGGER", ".", "debug", "(", "\"Cache found at: %s\"", ",", "self", ".", "_cache_path", ")", "if", "os", ".", "path", ".", "getsize", "(", "self", ".", "_cache_path", ")", ">", "0", ":", "loaded_cache", "=", "UTILS", ".", "load_cache", "(", "self", ".", "_cache_path", ")", "UTILS", ".", "update", "(", "self", ".", "_cache", ",", "loaded_cache", ")", "else", ":", "_LOGGER", ".", "debug", "(", "\"Cache file is empty. Removing it.\"", ")", "os", ".", "remove", "(", "self", ".", "_cache_path", ")", "self", ".", "_save_cache", "(", ")" ]
Load existing cache and merge for updating if required.
[ "Load", "existing", "cache", "and", "merge", "for", "updating", "if", "required", "." ]
python
train
Azure/azure-multiapi-storage-python
azure/multiapi/storage/v2015_04_05/table/tableservice.py
https://github.com/Azure/azure-multiapi-storage-python/blob/bd5482547f993c6eb56fd09070e15c2e9616e440/azure/multiapi/storage/v2015_04_05/table/tableservice.py#L385-L419
def _list_tables(self, max_results=None, marker=None, timeout=None): ''' Returns a list of tables under the specified account. Makes a single list request to the service. Used internally by the list_tables method. :param int max_results: The maximum number of tables to return. A single list request may return up to 1000 tables and potentially a continuation token which should be followed to get additional resutls. :param marker: A dictionary which identifies the portion of the query to be returned with the next query operation. The operation returns a next_marker element within the response body if the list returned was not complete. This value may then be used as a query parameter in a subsequent call to request the next portion of the list of queues. The marker value is opaque to the client. :type marker: obj :param int timeout: The server timeout, expressed in seconds. :return: A list of tables, potentially with a next_marker property. :rtype: list of :class:`~azure.storage.models.table.Table`: ''' request = HTTPRequest() request.method = 'GET' request.host = self._get_host() request.path = '/Tables' request.headers = [('Accept', TablePayloadFormat.JSON_NO_METADATA)] request.query = [ ('$top', _int_to_str(max_results)), ('NextTableName', _to_str(marker)), ('timeout', _int_to_str(timeout)), ] response = self._perform_request(request) return _convert_json_response_to_tables(response)
[ "def", "_list_tables", "(", "self", ",", "max_results", "=", "None", ",", "marker", "=", "None", ",", "timeout", "=", "None", ")", ":", "request", "=", "HTTPRequest", "(", ")", "request", ".", "method", "=", "'GET'", "request", ".", "host", "=", "self", ".", "_get_host", "(", ")", "request", ".", "path", "=", "'/Tables'", "request", ".", "headers", "=", "[", "(", "'Accept'", ",", "TablePayloadFormat", ".", "JSON_NO_METADATA", ")", "]", "request", ".", "query", "=", "[", "(", "'$top'", ",", "_int_to_str", "(", "max_results", ")", ")", ",", "(", "'NextTableName'", ",", "_to_str", "(", "marker", ")", ")", ",", "(", "'timeout'", ",", "_int_to_str", "(", "timeout", ")", ")", ",", "]", "response", "=", "self", ".", "_perform_request", "(", "request", ")", "return", "_convert_json_response_to_tables", "(", "response", ")" ]
Returns a list of tables under the specified account. Makes a single list request to the service. Used internally by the list_tables method. :param int max_results: The maximum number of tables to return. A single list request may return up to 1000 tables and potentially a continuation token which should be followed to get additional resutls. :param marker: A dictionary which identifies the portion of the query to be returned with the next query operation. The operation returns a next_marker element within the response body if the list returned was not complete. This value may then be used as a query parameter in a subsequent call to request the next portion of the list of queues. The marker value is opaque to the client. :type marker: obj :param int timeout: The server timeout, expressed in seconds. :return: A list of tables, potentially with a next_marker property. :rtype: list of :class:`~azure.storage.models.table.Table`:
[ "Returns", "a", "list", "of", "tables", "under", "the", "specified", "account", ".", "Makes", "a", "single", "list", "request", "to", "the", "service", ".", "Used", "internally", "by", "the", "list_tables", "method", "." ]
python
train
ChrisTimperley/Kaskara
python/kaskara/insertions.py
https://github.com/ChrisTimperley/Kaskara/blob/3d182d95b2938508e5990eddd30321be15e2f2ef/python/kaskara/insertions.py#L98-L110
def at_line(self, line: FileLine) -> Iterator[InsertionPoint]: """ Returns an iterator over all of the insertion points located at a given line. """ logger.debug("finding insertion points at line: %s", str(line)) filename = line.filename # type: str line_num = line.num # type: int for ins in self.in_file(filename): if line_num == ins.location.line: logger.debug("found insertion point at line [%s]: %s", str(line), ins) yield ins
[ "def", "at_line", "(", "self", ",", "line", ":", "FileLine", ")", "->", "Iterator", "[", "InsertionPoint", "]", ":", "logger", ".", "debug", "(", "\"finding insertion points at line: %s\"", ",", "str", "(", "line", ")", ")", "filename", "=", "line", ".", "filename", "# type: str", "line_num", "=", "line", ".", "num", "# type: int", "for", "ins", "in", "self", ".", "in_file", "(", "filename", ")", ":", "if", "line_num", "==", "ins", ".", "location", ".", "line", ":", "logger", ".", "debug", "(", "\"found insertion point at line [%s]: %s\"", ",", "str", "(", "line", ")", ",", "ins", ")", "yield", "ins" ]
Returns an iterator over all of the insertion points located at a given line.
[ "Returns", "an", "iterator", "over", "all", "of", "the", "insertion", "points", "located", "at", "a", "given", "line", "." ]
python
train
fprimex/zdesk
zdesk/zdesk_api.py
https://github.com/fprimex/zdesk/blob/851611c13b4d530e9df31390b3ec709baf0a0188/zdesk/zdesk_api.py#L3924-L3928
def user_identities(self, user_id, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/user_identities#list-identities" api_path = "/api/v2/users/{user_id}/identities.json" api_path = api_path.format(user_id=user_id) return self.call(api_path, **kwargs)
[ "def", "user_identities", "(", "self", ",", "user_id", ",", "*", "*", "kwargs", ")", ":", "api_path", "=", "\"/api/v2/users/{user_id}/identities.json\"", "api_path", "=", "api_path", ".", "format", "(", "user_id", "=", "user_id", ")", "return", "self", ".", "call", "(", "api_path", ",", "*", "*", "kwargs", ")" ]
https://developer.zendesk.com/rest_api/docs/core/user_identities#list-identities
[ "https", ":", "//", "developer", ".", "zendesk", ".", "com", "/", "rest_api", "/", "docs", "/", "core", "/", "user_identities#list", "-", "identities" ]
python
train
zaturox/glin
glin/zmq/messages.py
https://github.com/zaturox/glin/blob/55214a579c4e4b4d74765f3f6aa2eb815bac1c3b/glin/zmq/messages.py#L79-L85
def uint8_3(self, val1, val2, val3): """append a frame containing 3 uint8""" try: self.msg += [pack("BBB", val1, val2, val3)] except struct.error: raise ValueError("Expected uint8") return self
[ "def", "uint8_3", "(", "self", ",", "val1", ",", "val2", ",", "val3", ")", ":", "try", ":", "self", ".", "msg", "+=", "[", "pack", "(", "\"BBB\"", ",", "val1", ",", "val2", ",", "val3", ")", "]", "except", "struct", ".", "error", ":", "raise", "ValueError", "(", "\"Expected uint8\"", ")", "return", "self" ]
append a frame containing 3 uint8
[ "append", "a", "frame", "containing", "3", "uint8" ]
python
train
marrow/WebCore
web/server/tornado_.py
https://github.com/marrow/WebCore/blob/38d50f8022ca62976a1e5ff23f7714bd647b6532/web/server/tornado_.py#L18-L34
def serve(application, host='127.0.0.1', port=8080, **options): """Tornado's HTTPServer. This is a high quality asynchronous server with many options. For details, please visit: http://www.tornadoweb.org/en/stable/httpserver.html#http-server """ # Wrap our our WSGI application (potentially stack) in a Tornado adapter. container = tornado.wsgi.WSGIContainer(application) # Spin up a Tornado HTTP server using this container. http_server = tornado.httpserver.HTTPServer(container, **options) http_server.listen(int(port), host) # Start and block on the Tornado IO loop. tornado.ioloop.IOLoop.instance().start()
[ "def", "serve", "(", "application", ",", "host", "=", "'127.0.0.1'", ",", "port", "=", "8080", ",", "*", "*", "options", ")", ":", "# Wrap our our WSGI application (potentially stack) in a Tornado adapter.", "container", "=", "tornado", ".", "wsgi", ".", "WSGIContainer", "(", "application", ")", "# Spin up a Tornado HTTP server using this container.", "http_server", "=", "tornado", ".", "httpserver", ".", "HTTPServer", "(", "container", ",", "*", "*", "options", ")", "http_server", ".", "listen", "(", "int", "(", "port", ")", ",", "host", ")", "# Start and block on the Tornado IO loop.", "tornado", ".", "ioloop", ".", "IOLoop", ".", "instance", "(", ")", ".", "start", "(", ")" ]
Tornado's HTTPServer. This is a high quality asynchronous server with many options. For details, please visit: http://www.tornadoweb.org/en/stable/httpserver.html#http-server
[ "Tornado", "s", "HTTPServer", ".", "This", "is", "a", "high", "quality", "asynchronous", "server", "with", "many", "options", ".", "For", "details", "please", "visit", ":", "http", ":", "//", "www", ".", "tornadoweb", ".", "org", "/", "en", "/", "stable", "/", "httpserver", ".", "html#http", "-", "server" ]
python
train
iotile/coretools
iotilecore/iotile/core/utilities/rcfile.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilecore/iotile/core/utilities/rcfile.py#L52-L61
def save(self): """Update the configuration file on disk with the current contents of self.contents. Previous contents are overwritten. """ try: with open(self.path, "w") as f: f.writelines(self.contents) except IOError as e: raise InternalError("Could not write RCFile contents", name=self.name, path=self.path, error_message=str(e))
[ "def", "save", "(", "self", ")", ":", "try", ":", "with", "open", "(", "self", ".", "path", ",", "\"w\"", ")", "as", "f", ":", "f", ".", "writelines", "(", "self", ".", "contents", ")", "except", "IOError", "as", "e", ":", "raise", "InternalError", "(", "\"Could not write RCFile contents\"", ",", "name", "=", "self", ".", "name", ",", "path", "=", "self", ".", "path", ",", "error_message", "=", "str", "(", "e", ")", ")" ]
Update the configuration file on disk with the current contents of self.contents. Previous contents are overwritten.
[ "Update", "the", "configuration", "file", "on", "disk", "with", "the", "current", "contents", "of", "self", ".", "contents", ".", "Previous", "contents", "are", "overwritten", "." ]
python
train
nesdis/djongo
djongo/base.py
https://github.com/nesdis/djongo/blob/7f9d79455cf030cb5eee0b822502c50a0d9d3abb/djongo/base.py#L122-L157
def get_connection_params(self): """ Default method to acquire database connection parameters. Sets connection parameters to match settings.py, and sets default values to blank fields. """ valid_settings = { 'NAME': 'name', 'HOST': 'host', 'PORT': 'port', 'USER': 'username', 'PASSWORD': 'password', 'AUTH_SOURCE': 'authSource', 'AUTH_MECHANISM': 'authMechanism', 'ENFORCE_SCHEMA': 'enforce_schema', 'REPLICASET': 'replicaset', 'SSL': 'ssl', 'SSL_CERTFILE': 'ssl_certfile', 'SSL_CA_CERTS': 'ssl_ca_certs', 'READ_PREFERENCE': 'read_preference' } connection_params = { 'name': 'djongo_test', 'enforce_schema': True } for setting_name, kwarg in valid_settings.items(): try: setting = self.settings_dict[setting_name] except KeyError: continue if setting or setting is False: connection_params[kwarg] = setting return connection_params
[ "def", "get_connection_params", "(", "self", ")", ":", "valid_settings", "=", "{", "'NAME'", ":", "'name'", ",", "'HOST'", ":", "'host'", ",", "'PORT'", ":", "'port'", ",", "'USER'", ":", "'username'", ",", "'PASSWORD'", ":", "'password'", ",", "'AUTH_SOURCE'", ":", "'authSource'", ",", "'AUTH_MECHANISM'", ":", "'authMechanism'", ",", "'ENFORCE_SCHEMA'", ":", "'enforce_schema'", ",", "'REPLICASET'", ":", "'replicaset'", ",", "'SSL'", ":", "'ssl'", ",", "'SSL_CERTFILE'", ":", "'ssl_certfile'", ",", "'SSL_CA_CERTS'", ":", "'ssl_ca_certs'", ",", "'READ_PREFERENCE'", ":", "'read_preference'", "}", "connection_params", "=", "{", "'name'", ":", "'djongo_test'", ",", "'enforce_schema'", ":", "True", "}", "for", "setting_name", ",", "kwarg", "in", "valid_settings", ".", "items", "(", ")", ":", "try", ":", "setting", "=", "self", ".", "settings_dict", "[", "setting_name", "]", "except", "KeyError", ":", "continue", "if", "setting", "or", "setting", "is", "False", ":", "connection_params", "[", "kwarg", "]", "=", "setting", "return", "connection_params" ]
Default method to acquire database connection parameters. Sets connection parameters to match settings.py, and sets default values to blank fields.
[ "Default", "method", "to", "acquire", "database", "connection", "parameters", "." ]
python
test
prthkms/alex
alex/support.py
https://github.com/prthkms/alex/blob/79d3167c877e94cc07db0aab55a35857fac67ef7/alex/support.py#L53-L61
def get_path(query): """get_path(query) -> pathname -- return the path found in a given, found by matching a regular expression. """ match = re.search(r'/(.*/)+(\S*(\.[\d\w]{1,4})?)', query) if(os.path.isfile(match.group()) or os.path.isdir(match.group())): return match.group() else: return None
[ "def", "get_path", "(", "query", ")", ":", "match", "=", "re", ".", "search", "(", "r'/(.*/)+(\\S*(\\.[\\d\\w]{1,4})?)'", ",", "query", ")", "if", "(", "os", ".", "path", ".", "isfile", "(", "match", ".", "group", "(", ")", ")", "or", "os", ".", "path", ".", "isdir", "(", "match", ".", "group", "(", ")", ")", ")", ":", "return", "match", ".", "group", "(", ")", "else", ":", "return", "None" ]
get_path(query) -> pathname -- return the path found in a given, found by matching a regular expression.
[ "get_path", "(", "query", ")", "-", ">", "pathname", "--", "return", "the", "path", "found", "in", "a", "given", "found", "by", "matching", "a", "regular", "expression", "." ]
python
train
ecell/ecell4
ecell4/util/simulation.py
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/util/simulation.py#L96-L318
def run_simulation( t, y0=None, volume=1.0, model=None, solver='ode', is_netfree=False, species_list=None, without_reset=False, return_type='matplotlib', opt_args=(), opt_kwargs=None, structures=None, observers=(), progressbar=0, rndseed=None, factory=None, ## deprecated **kwargs): """Run a simulation with the given model and plot the result on IPython notebook with matplotlib. Parameters ---------- t : array or Real A sequence of time points for which to solve for 'm'. y0 : dict Initial condition. volume : Real or Real3, optional A size of the simulation volume. Keyword 'v' is a shortcut for specifying 'volume'. model : Model, optional Keyword 'm' is a shortcut for specifying 'model'. solver : str, tuple or Factory, optional Solver type. Choose one from 'ode', 'gillespie', 'spatiocyte', 'meso', 'bd' and 'egfrd'. Default is 'ode'. When tuple is given, the first value must be str as explained above. All the rest is used as arguments for the corresponding factory class. Keyword 's' is a shortcut for specifying 'solver'. species_list : list of str, optional A list of names of Species observed. If None, log all. Default is None. return_type : str, optional Choose a type of return value from 'array', 'observer', 'matplotlib', 'nyaplot', 'world', 'dataframe', 'none' or None. If None or 'none', return and plot nothing. Default is 'matplotlib'. 'dataframe' requires numpy and pandas libraries. Keyword 'r' is a shortcut for specifying 'return_type'. opt_args: list, tuple or dict, optional Arguments for plotting. If return_type suggests no plotting, just ignored. opt_kwargs: dict, optional Arguments for plotting. If return_type suggests no plotting or opt_args is a list or tuple, just ignored. i.e.) viz.plot_number_observer(obs, *opt_args, **opt_kwargs) is_netfree: bool, optional Whether the model is netfree or not. When a model is given as an argument, just ignored. Default is False. structures : list or dict, optional A dictionary which gives pairs of a name and shape of structures. Not fully supported yet. observers : Observer or list, optional A list of extra observer references. progressbar : float, optional A timeout for a progress bar in seconds. When the value is not more than 0, show nothing. Default is 0. rndseed : int, optional A random seed for a simulation. This argument will be ignored when 'solver' is given NOT as a string. Returns ------- value : list, TimingNumberObserver, World or None Return a value suggested by ``return_type``. When ``return_type`` is 'array', return a time course data. When ``return_type`` is 'observer', return an observer. When ``return_type`` is 'world', return the last state of ``World``. Return nothing if else. """ y0 = y0 or {} opt_kwargs = opt_kwargs or {} structures = structures or {} for key, value in kwargs.items(): if key == 'r': return_type = value elif key == 'v': volume = value elif key == 's': solver = value elif key == 'm': model = value else: raise ValueError( "An unknown keyword argument was given [{}={}]".format(key, value)) import ecell4_base if unit.HAS_PINT: if isinstance(t, unit._Quantity): if unit.STRICT and not unit.check_dimensionality(t, '[time]'): raise ValueError("Cannot convert [t] from '{}' ({}) to '[time]'".format(t.dimensionality, t.u)) t = t.to_base_units().magnitude if isinstance(volume, unit._Quantity): if unit.STRICT: if isinstance(volume.magnitude, ecell4_base.core.Real3) and not unit.check_dimensionality(volume, '[length]'): raise ValueError("Cannot convert [volume] from '{}' ({}) to '[length]'".format( volume.dimensionality, volume.u)) elif not unit.check_dimensionality(volume, '[volume]'): raise ValueError("Cannot convert [volume] from '{}' ({}) to '[volume]'".format( volume.dimensionality, volume.u)) volume = volume.to_base_units().magnitude if not isinstance(solver, str) and isinstance(solver, collections.Iterable): solver = [ value.to_base_units().magnitude if isinstance(value, unit._Quantity) else value for value in solver] if factory is not None: # f = factory #XXX: will be deprecated in the future. just use solver raise ValueError( "Argument 'factory' is no longer available. Use 'solver' instead.") elif isinstance(solver, str): f = get_factory(solver) elif isinstance(solver, collections.Iterable): f = get_factory(*solver) else: f = solver if rndseed is not None: f = f.rng(ecell4_base.core.GSLRandomNumberGenerator(rndseed)) if model is None: model = get_model(is_netfree, without_reset) w = f.world(volume) edge_lengths = w.edge_lengths() if unit.HAS_PINT: y0 = y0.copy() for key, value in y0.items(): if isinstance(value, unit._Quantity): if not unit.STRICT: y0[key] = value.to_base_units().magnitude elif unit.check_dimensionality(value, '[substance]'): y0[key] = value.to_base_units().magnitude elif unit.check_dimensionality(value, '[concentration]'): volume = w.volume() if not isinstance(w, ecell4_base.spatiocyte.SpatiocyteWorld) else w.actual_volume() y0[key] = value.to_base_units().magnitude * volume else: raise ValueError( "Cannot convert a quantity for [{}] from '{}' ({}) to '[substance]'".format( key, value.dimensionality, value.u)) if not isinstance(w, ecell4_base.ode.ODEWorld): w.bind_to(model) for (name, shape) in (structures.items() if isinstance(structures, dict) else structures): if isinstance(shape, str): w.add_structure(ecell4_base.core.Species(name), get_shape(shape)) elif isinstance(shape, collections.Iterable): w.add_structure(ecell4_base.core.Species(name), get_shape(*shape)) else: w.add_structure(ecell4_base.core.Species(name), shape) if isinstance(w, ecell4_base.ode.ODEWorld): # w.bind_to(model) # stop binding for ode for serial, n in y0.items(): w.set_value(ecell4_base.core.Species(serial), n) else: # w.bind_to(model) for serial, n in y0.items(): w.add_molecules(ecell4_base.core.Species(serial), n) if not isinstance(t, collections.Iterable): t = [float(t) * i / 100 for i in range(101)] if species_list is not None: obs = ecell4_base.core.TimingNumberObserver(t, species_list) else: obs = ecell4_base.core.TimingNumberObserver(t) sim = f.simulator(w, model) # sim = f.simulator(w) if not isinstance(observers, collections.Iterable): observers = (observers, ) if return_type not in ('world', 'none', None): observers = (obs, ) + tuple(observers) if progressbar > 0: from .progressbar import progressbar as pb pb(sim, timeout=progressbar, flush=True).run(t[-1], observers) else: sim.run(t[-1], observers) if return_type in ('matplotlib', 'm'): if isinstance(opt_args, (list, tuple)): viz.plot_number_observer(obs, *opt_args, **opt_kwargs) elif isinstance(opt_args, dict): # opt_kwargs is ignored viz.plot_number_observer(obs, **opt_args) else: raise ValueError('opt_args [{}] must be list or dict.'.format( repr(opt_args))) elif return_type in ('nyaplot', 'n'): if isinstance(opt_args, (list, tuple)): viz.plot_number_observer_with_nya(obs, *opt_args, **opt_kwargs) elif isinstance(opt_args, dict): # opt_kwargs is ignored viz.plot_number_observer_with_nya(obs, **opt_args) else: raise ValueError('opt_args [{}] must be list or dict.'.format( repr(opt_args))) elif return_type in ('observer', 'o'): return obs elif return_type in ('array', 'a'): return obs.data() elif return_type in ('dataframe', 'd'): import pandas import numpy data = numpy.array(obs.data()).T return pandas.concat([ pandas.DataFrame(dict(Time=data[0], Value=data[i + 1], Species=sp.serial(), **opt_kwargs)) for i, sp in enumerate(obs.targets())]) elif return_type in ('world', 'w'): return sim.world() elif return_type is None or return_type in ('none', ): return else: raise ValueError( 'An invald value for "return_type" was given [{}].'.format(str(return_type)) + 'Use "none" if you need nothing to be returned.')
[ "def", "run_simulation", "(", "t", ",", "y0", "=", "None", ",", "volume", "=", "1.0", ",", "model", "=", "None", ",", "solver", "=", "'ode'", ",", "is_netfree", "=", "False", ",", "species_list", "=", "None", ",", "without_reset", "=", "False", ",", "return_type", "=", "'matplotlib'", ",", "opt_args", "=", "(", ")", ",", "opt_kwargs", "=", "None", ",", "structures", "=", "None", ",", "observers", "=", "(", ")", ",", "progressbar", "=", "0", ",", "rndseed", "=", "None", ",", "factory", "=", "None", ",", "## deprecated", "*", "*", "kwargs", ")", ":", "y0", "=", "y0", "or", "{", "}", "opt_kwargs", "=", "opt_kwargs", "or", "{", "}", "structures", "=", "structures", "or", "{", "}", "for", "key", ",", "value", "in", "kwargs", ".", "items", "(", ")", ":", "if", "key", "==", "'r'", ":", "return_type", "=", "value", "elif", "key", "==", "'v'", ":", "volume", "=", "value", "elif", "key", "==", "'s'", ":", "solver", "=", "value", "elif", "key", "==", "'m'", ":", "model", "=", "value", "else", ":", "raise", "ValueError", "(", "\"An unknown keyword argument was given [{}={}]\"", ".", "format", "(", "key", ",", "value", ")", ")", "import", "ecell4_base", "if", "unit", ".", "HAS_PINT", ":", "if", "isinstance", "(", "t", ",", "unit", ".", "_Quantity", ")", ":", "if", "unit", ".", "STRICT", "and", "not", "unit", ".", "check_dimensionality", "(", "t", ",", "'[time]'", ")", ":", "raise", "ValueError", "(", "\"Cannot convert [t] from '{}' ({}) to '[time]'\"", ".", "format", "(", "t", ".", "dimensionality", ",", "t", ".", "u", ")", ")", "t", "=", "t", ".", "to_base_units", "(", ")", ".", "magnitude", "if", "isinstance", "(", "volume", ",", "unit", ".", "_Quantity", ")", ":", "if", "unit", ".", "STRICT", ":", "if", "isinstance", "(", "volume", ".", "magnitude", ",", "ecell4_base", ".", "core", ".", "Real3", ")", "and", "not", "unit", ".", "check_dimensionality", "(", "volume", ",", "'[length]'", ")", ":", "raise", "ValueError", "(", "\"Cannot convert [volume] from '{}' ({}) to '[length]'\"", ".", "format", "(", "volume", ".", "dimensionality", ",", "volume", ".", "u", ")", ")", "elif", "not", "unit", ".", "check_dimensionality", "(", "volume", ",", "'[volume]'", ")", ":", "raise", "ValueError", "(", "\"Cannot convert [volume] from '{}' ({}) to '[volume]'\"", ".", "format", "(", "volume", ".", "dimensionality", ",", "volume", ".", "u", ")", ")", "volume", "=", "volume", ".", "to_base_units", "(", ")", ".", "magnitude", "if", "not", "isinstance", "(", "solver", ",", "str", ")", "and", "isinstance", "(", "solver", ",", "collections", ".", "Iterable", ")", ":", "solver", "=", "[", "value", ".", "to_base_units", "(", ")", ".", "magnitude", "if", "isinstance", "(", "value", ",", "unit", ".", "_Quantity", ")", "else", "value", "for", "value", "in", "solver", "]", "if", "factory", "is", "not", "None", ":", "# f = factory #XXX: will be deprecated in the future. just use solver", "raise", "ValueError", "(", "\"Argument 'factory' is no longer available. Use 'solver' instead.\"", ")", "elif", "isinstance", "(", "solver", ",", "str", ")", ":", "f", "=", "get_factory", "(", "solver", ")", "elif", "isinstance", "(", "solver", ",", "collections", ".", "Iterable", ")", ":", "f", "=", "get_factory", "(", "*", "solver", ")", "else", ":", "f", "=", "solver", "if", "rndseed", "is", "not", "None", ":", "f", "=", "f", ".", "rng", "(", "ecell4_base", ".", "core", ".", "GSLRandomNumberGenerator", "(", "rndseed", ")", ")", "if", "model", "is", "None", ":", "model", "=", "get_model", "(", "is_netfree", ",", "without_reset", ")", "w", "=", "f", ".", "world", "(", "volume", ")", "edge_lengths", "=", "w", ".", "edge_lengths", "(", ")", "if", "unit", ".", "HAS_PINT", ":", "y0", "=", "y0", ".", "copy", "(", ")", "for", "key", ",", "value", "in", "y0", ".", "items", "(", ")", ":", "if", "isinstance", "(", "value", ",", "unit", ".", "_Quantity", ")", ":", "if", "not", "unit", ".", "STRICT", ":", "y0", "[", "key", "]", "=", "value", ".", "to_base_units", "(", ")", ".", "magnitude", "elif", "unit", ".", "check_dimensionality", "(", "value", ",", "'[substance]'", ")", ":", "y0", "[", "key", "]", "=", "value", ".", "to_base_units", "(", ")", ".", "magnitude", "elif", "unit", ".", "check_dimensionality", "(", "value", ",", "'[concentration]'", ")", ":", "volume", "=", "w", ".", "volume", "(", ")", "if", "not", "isinstance", "(", "w", ",", "ecell4_base", ".", "spatiocyte", ".", "SpatiocyteWorld", ")", "else", "w", ".", "actual_volume", "(", ")", "y0", "[", "key", "]", "=", "value", ".", "to_base_units", "(", ")", ".", "magnitude", "*", "volume", "else", ":", "raise", "ValueError", "(", "\"Cannot convert a quantity for [{}] from '{}' ({}) to '[substance]'\"", ".", "format", "(", "key", ",", "value", ".", "dimensionality", ",", "value", ".", "u", ")", ")", "if", "not", "isinstance", "(", "w", ",", "ecell4_base", ".", "ode", ".", "ODEWorld", ")", ":", "w", ".", "bind_to", "(", "model", ")", "for", "(", "name", ",", "shape", ")", "in", "(", "structures", ".", "items", "(", ")", "if", "isinstance", "(", "structures", ",", "dict", ")", "else", "structures", ")", ":", "if", "isinstance", "(", "shape", ",", "str", ")", ":", "w", ".", "add_structure", "(", "ecell4_base", ".", "core", ".", "Species", "(", "name", ")", ",", "get_shape", "(", "shape", ")", ")", "elif", "isinstance", "(", "shape", ",", "collections", ".", "Iterable", ")", ":", "w", ".", "add_structure", "(", "ecell4_base", ".", "core", ".", "Species", "(", "name", ")", ",", "get_shape", "(", "*", "shape", ")", ")", "else", ":", "w", ".", "add_structure", "(", "ecell4_base", ".", "core", ".", "Species", "(", "name", ")", ",", "shape", ")", "if", "isinstance", "(", "w", ",", "ecell4_base", ".", "ode", ".", "ODEWorld", ")", ":", "# w.bind_to(model) # stop binding for ode", "for", "serial", ",", "n", "in", "y0", ".", "items", "(", ")", ":", "w", ".", "set_value", "(", "ecell4_base", ".", "core", ".", "Species", "(", "serial", ")", ",", "n", ")", "else", ":", "# w.bind_to(model)", "for", "serial", ",", "n", "in", "y0", ".", "items", "(", ")", ":", "w", ".", "add_molecules", "(", "ecell4_base", ".", "core", ".", "Species", "(", "serial", ")", ",", "n", ")", "if", "not", "isinstance", "(", "t", ",", "collections", ".", "Iterable", ")", ":", "t", "=", "[", "float", "(", "t", ")", "*", "i", "/", "100", "for", "i", "in", "range", "(", "101", ")", "]", "if", "species_list", "is", "not", "None", ":", "obs", "=", "ecell4_base", ".", "core", ".", "TimingNumberObserver", "(", "t", ",", "species_list", ")", "else", ":", "obs", "=", "ecell4_base", ".", "core", ".", "TimingNumberObserver", "(", "t", ")", "sim", "=", "f", ".", "simulator", "(", "w", ",", "model", ")", "# sim = f.simulator(w)", "if", "not", "isinstance", "(", "observers", ",", "collections", ".", "Iterable", ")", ":", "observers", "=", "(", "observers", ",", ")", "if", "return_type", "not", "in", "(", "'world'", ",", "'none'", ",", "None", ")", ":", "observers", "=", "(", "obs", ",", ")", "+", "tuple", "(", "observers", ")", "if", "progressbar", ">", "0", ":", "from", ".", "progressbar", "import", "progressbar", "as", "pb", "pb", "(", "sim", ",", "timeout", "=", "progressbar", ",", "flush", "=", "True", ")", ".", "run", "(", "t", "[", "-", "1", "]", ",", "observers", ")", "else", ":", "sim", ".", "run", "(", "t", "[", "-", "1", "]", ",", "observers", ")", "if", "return_type", "in", "(", "'matplotlib'", ",", "'m'", ")", ":", "if", "isinstance", "(", "opt_args", ",", "(", "list", ",", "tuple", ")", ")", ":", "viz", ".", "plot_number_observer", "(", "obs", ",", "*", "opt_args", ",", "*", "*", "opt_kwargs", ")", "elif", "isinstance", "(", "opt_args", ",", "dict", ")", ":", "# opt_kwargs is ignored", "viz", ".", "plot_number_observer", "(", "obs", ",", "*", "*", "opt_args", ")", "else", ":", "raise", "ValueError", "(", "'opt_args [{}] must be list or dict.'", ".", "format", "(", "repr", "(", "opt_args", ")", ")", ")", "elif", "return_type", "in", "(", "'nyaplot'", ",", "'n'", ")", ":", "if", "isinstance", "(", "opt_args", ",", "(", "list", ",", "tuple", ")", ")", ":", "viz", ".", "plot_number_observer_with_nya", "(", "obs", ",", "*", "opt_args", ",", "*", "*", "opt_kwargs", ")", "elif", "isinstance", "(", "opt_args", ",", "dict", ")", ":", "# opt_kwargs is ignored", "viz", ".", "plot_number_observer_with_nya", "(", "obs", ",", "*", "*", "opt_args", ")", "else", ":", "raise", "ValueError", "(", "'opt_args [{}] must be list or dict.'", ".", "format", "(", "repr", "(", "opt_args", ")", ")", ")", "elif", "return_type", "in", "(", "'observer'", ",", "'o'", ")", ":", "return", "obs", "elif", "return_type", "in", "(", "'array'", ",", "'a'", ")", ":", "return", "obs", ".", "data", "(", ")", "elif", "return_type", "in", "(", "'dataframe'", ",", "'d'", ")", ":", "import", "pandas", "import", "numpy", "data", "=", "numpy", ".", "array", "(", "obs", ".", "data", "(", ")", ")", ".", "T", "return", "pandas", ".", "concat", "(", "[", "pandas", ".", "DataFrame", "(", "dict", "(", "Time", "=", "data", "[", "0", "]", ",", "Value", "=", "data", "[", "i", "+", "1", "]", ",", "Species", "=", "sp", ".", "serial", "(", ")", ",", "*", "*", "opt_kwargs", ")", ")", "for", "i", ",", "sp", "in", "enumerate", "(", "obs", ".", "targets", "(", ")", ")", "]", ")", "elif", "return_type", "in", "(", "'world'", ",", "'w'", ")", ":", "return", "sim", ".", "world", "(", ")", "elif", "return_type", "is", "None", "or", "return_type", "in", "(", "'none'", ",", ")", ":", "return", "else", ":", "raise", "ValueError", "(", "'An invald value for \"return_type\" was given [{}].'", ".", "format", "(", "str", "(", "return_type", ")", ")", "+", "'Use \"none\" if you need nothing to be returned.'", ")" ]
Run a simulation with the given model and plot the result on IPython notebook with matplotlib. Parameters ---------- t : array or Real A sequence of time points for which to solve for 'm'. y0 : dict Initial condition. volume : Real or Real3, optional A size of the simulation volume. Keyword 'v' is a shortcut for specifying 'volume'. model : Model, optional Keyword 'm' is a shortcut for specifying 'model'. solver : str, tuple or Factory, optional Solver type. Choose one from 'ode', 'gillespie', 'spatiocyte', 'meso', 'bd' and 'egfrd'. Default is 'ode'. When tuple is given, the first value must be str as explained above. All the rest is used as arguments for the corresponding factory class. Keyword 's' is a shortcut for specifying 'solver'. species_list : list of str, optional A list of names of Species observed. If None, log all. Default is None. return_type : str, optional Choose a type of return value from 'array', 'observer', 'matplotlib', 'nyaplot', 'world', 'dataframe', 'none' or None. If None or 'none', return and plot nothing. Default is 'matplotlib'. 'dataframe' requires numpy and pandas libraries. Keyword 'r' is a shortcut for specifying 'return_type'. opt_args: list, tuple or dict, optional Arguments for plotting. If return_type suggests no plotting, just ignored. opt_kwargs: dict, optional Arguments for plotting. If return_type suggests no plotting or opt_args is a list or tuple, just ignored. i.e.) viz.plot_number_observer(obs, *opt_args, **opt_kwargs) is_netfree: bool, optional Whether the model is netfree or not. When a model is given as an argument, just ignored. Default is False. structures : list or dict, optional A dictionary which gives pairs of a name and shape of structures. Not fully supported yet. observers : Observer or list, optional A list of extra observer references. progressbar : float, optional A timeout for a progress bar in seconds. When the value is not more than 0, show nothing. Default is 0. rndseed : int, optional A random seed for a simulation. This argument will be ignored when 'solver' is given NOT as a string. Returns ------- value : list, TimingNumberObserver, World or None Return a value suggested by ``return_type``. When ``return_type`` is 'array', return a time course data. When ``return_type`` is 'observer', return an observer. When ``return_type`` is 'world', return the last state of ``World``. Return nothing if else.
[ "Run", "a", "simulation", "with", "the", "given", "model", "and", "plot", "the", "result", "on", "IPython", "notebook", "with", "matplotlib", "." ]
python
train
cirruscluster/cirruscluster
cirruscluster/core.py
https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/core.py#L586-L597
def __WaitForVolume(volume, desired_state): """ Blocks until EBS volume is in desired state. """ print 'Waiting for volume %s to be %s...' % (volume.id, desired_state) while True: volume.update() sys.stdout.write('.') sys.stdout.flush() #print 'status is: %s' % volume.status if volume.status == desired_state: break time.sleep(5) return
[ "def", "__WaitForVolume", "(", "volume", ",", "desired_state", ")", ":", "print", "'Waiting for volume %s to be %s...'", "%", "(", "volume", ".", "id", ",", "desired_state", ")", "while", "True", ":", "volume", ".", "update", "(", ")", "sys", ".", "stdout", ".", "write", "(", "'.'", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "#print 'status is: %s' % volume.status", "if", "volume", ".", "status", "==", "desired_state", ":", "break", "time", ".", "sleep", "(", "5", ")", "return" ]
Blocks until EBS volume is in desired state.
[ "Blocks", "until", "EBS", "volume", "is", "in", "desired", "state", "." ]
python
train
scour-project/scour
scour/scour.py
https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L1897-L1923
def removeDefaultAttributeValue(node, attribute): """ Removes the DefaultAttribute 'attribute' from 'node' if specified conditions are fulfilled Warning: Does NOT check if the attribute is actually valid for the passed element type for increased preformance! """ if not node.hasAttribute(attribute.name): return 0 # differentiate between text and numeric values if isinstance(attribute.value, str): if node.getAttribute(attribute.name) == attribute.value: if (attribute.conditions is None) or attribute.conditions(node): node.removeAttribute(attribute.name) return 1 else: nodeValue = SVGLength(node.getAttribute(attribute.name)) if ((attribute.value is None) or ((nodeValue.value == attribute.value) and not (nodeValue.units == Unit.INVALID))): if ((attribute.units is None) or (nodeValue.units == attribute.units) or (isinstance(attribute.units, list) and nodeValue.units in attribute.units)): if (attribute.conditions is None) or attribute.conditions(node): node.removeAttribute(attribute.name) return 1 return 0
[ "def", "removeDefaultAttributeValue", "(", "node", ",", "attribute", ")", ":", "if", "not", "node", ".", "hasAttribute", "(", "attribute", ".", "name", ")", ":", "return", "0", "# differentiate between text and numeric values", "if", "isinstance", "(", "attribute", ".", "value", ",", "str", ")", ":", "if", "node", ".", "getAttribute", "(", "attribute", ".", "name", ")", "==", "attribute", ".", "value", ":", "if", "(", "attribute", ".", "conditions", "is", "None", ")", "or", "attribute", ".", "conditions", "(", "node", ")", ":", "node", ".", "removeAttribute", "(", "attribute", ".", "name", ")", "return", "1", "else", ":", "nodeValue", "=", "SVGLength", "(", "node", ".", "getAttribute", "(", "attribute", ".", "name", ")", ")", "if", "(", "(", "attribute", ".", "value", "is", "None", ")", "or", "(", "(", "nodeValue", ".", "value", "==", "attribute", ".", "value", ")", "and", "not", "(", "nodeValue", ".", "units", "==", "Unit", ".", "INVALID", ")", ")", ")", ":", "if", "(", "(", "attribute", ".", "units", "is", "None", ")", "or", "(", "nodeValue", ".", "units", "==", "attribute", ".", "units", ")", "or", "(", "isinstance", "(", "attribute", ".", "units", ",", "list", ")", "and", "nodeValue", ".", "units", "in", "attribute", ".", "units", ")", ")", ":", "if", "(", "attribute", ".", "conditions", "is", "None", ")", "or", "attribute", ".", "conditions", "(", "node", ")", ":", "node", ".", "removeAttribute", "(", "attribute", ".", "name", ")", "return", "1", "return", "0" ]
Removes the DefaultAttribute 'attribute' from 'node' if specified conditions are fulfilled Warning: Does NOT check if the attribute is actually valid for the passed element type for increased preformance!
[ "Removes", "the", "DefaultAttribute", "attribute", "from", "node", "if", "specified", "conditions", "are", "fulfilled" ]
python
train
data61/clkhash
clkhash/field_formats.py
https://github.com/data61/clkhash/blob/ec6398d6708a063de83f7c3d6286587bff8e7121/clkhash/field_formats.py#L157-L188
def fhp_from_json_dict( json_dict # type: Dict[str, Any] ): # type: (...) -> FieldHashingProperties """ Make a :class:`FieldHashingProperties` object from a dictionary. :param dict json_dict: The dictionary must have have an 'ngram' key and one of k or num_bits. It may have 'positional' key; if missing a default is used. The encoding is always set to the default value. :return: A :class:`FieldHashingProperties` instance. """ h = json_dict.get('hash', {'type': 'blakeHash'}) num_bits = json_dict.get('numBits') k = json_dict.get('k') if not num_bits and not k: num_bits = 200 # default for v2 schema return FieldHashingProperties( ngram=json_dict['ngram'], positional=json_dict.get( 'positional', FieldHashingProperties._DEFAULT_POSITIONAL), hash_type=h['type'], prevent_singularity=h.get('prevent_singularity'), num_bits=num_bits, k=k, missing_value=MissingValueSpec.from_json_dict( json_dict[ 'missingValue']) if 'missingValue' in json_dict else None )
[ "def", "fhp_from_json_dict", "(", "json_dict", "# type: Dict[str, Any]", ")", ":", "# type: (...) -> FieldHashingProperties", "h", "=", "json_dict", ".", "get", "(", "'hash'", ",", "{", "'type'", ":", "'blakeHash'", "}", ")", "num_bits", "=", "json_dict", ".", "get", "(", "'numBits'", ")", "k", "=", "json_dict", ".", "get", "(", "'k'", ")", "if", "not", "num_bits", "and", "not", "k", ":", "num_bits", "=", "200", "# default for v2 schema", "return", "FieldHashingProperties", "(", "ngram", "=", "json_dict", "[", "'ngram'", "]", ",", "positional", "=", "json_dict", ".", "get", "(", "'positional'", ",", "FieldHashingProperties", ".", "_DEFAULT_POSITIONAL", ")", ",", "hash_type", "=", "h", "[", "'type'", "]", ",", "prevent_singularity", "=", "h", ".", "get", "(", "'prevent_singularity'", ")", ",", "num_bits", "=", "num_bits", ",", "k", "=", "k", ",", "missing_value", "=", "MissingValueSpec", ".", "from_json_dict", "(", "json_dict", "[", "'missingValue'", "]", ")", "if", "'missingValue'", "in", "json_dict", "else", "None", ")" ]
Make a :class:`FieldHashingProperties` object from a dictionary. :param dict json_dict: The dictionary must have have an 'ngram' key and one of k or num_bits. It may have 'positional' key; if missing a default is used. The encoding is always set to the default value. :return: A :class:`FieldHashingProperties` instance.
[ "Make", "a", ":", "class", ":", "FieldHashingProperties", "object", "from", "a", "dictionary", "." ]
python
train
googledatalab/pydatalab
datalab/utils/_job.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/utils/_job.py#L175-L199
def wait(self, timeout=None): """ Wait for the job to complete, or a timeout to happen. Args: timeout: how long to wait before giving up (in seconds); default None which means no timeout. Returns: The Job """ if self._future: try: # Future.exception() will return rather than raise any exception so we use it. self._future.exception(timeout) except concurrent.futures.TimeoutError: self._timeout() self._refresh_state() else: # fall back to polling while not self.is_complete: if timeout is not None: if timeout <= 0: self._timeout() timeout -= Job._POLL_INTERVAL_SECONDS time.sleep(Job._POLL_INTERVAL_SECONDS) return self
[ "def", "wait", "(", "self", ",", "timeout", "=", "None", ")", ":", "if", "self", ".", "_future", ":", "try", ":", "# Future.exception() will return rather than raise any exception so we use it.", "self", ".", "_future", ".", "exception", "(", "timeout", ")", "except", "concurrent", ".", "futures", ".", "TimeoutError", ":", "self", ".", "_timeout", "(", ")", "self", ".", "_refresh_state", "(", ")", "else", ":", "# fall back to polling", "while", "not", "self", ".", "is_complete", ":", "if", "timeout", "is", "not", "None", ":", "if", "timeout", "<=", "0", ":", "self", ".", "_timeout", "(", ")", "timeout", "-=", "Job", ".", "_POLL_INTERVAL_SECONDS", "time", ".", "sleep", "(", "Job", ".", "_POLL_INTERVAL_SECONDS", ")", "return", "self" ]
Wait for the job to complete, or a timeout to happen. Args: timeout: how long to wait before giving up (in seconds); default None which means no timeout. Returns: The Job
[ "Wait", "for", "the", "job", "to", "complete", "or", "a", "timeout", "to", "happen", "." ]
python
train
blockstack/virtualchain
virtualchain/lib/indexer.py
https://github.com/blockstack/virtualchain/blob/fcfc970064ca7dfcab26ebd3ab955870a763ea39/virtualchain/lib/indexer.py#L1789-L1842
def state_engine_replay(consensus_impl, existing_working_dir, new_state_engine, target_block_height, start_block=None, initial_snapshots={}, expected_snapshots={}): """ Given consensus rules, a target block height, and a path to an existing chainstate db, replay the chain state's virtual transactions through the consensus rules into a given directory (working_dir). Optionally check that the snapshots in @expected_snapshots match up as we verify. @expected_snapshots maps str(block_height) to str(consensus hash) Return the consensus hash calculated at the target block height Return None on verification failure (i.e. we got a different consensus hash than one for the same block in expected_snapshots) """ assert hasattr(consensus_impl, 'get_opcodes') assert hasattr(consensus_impl, 'get_magic_bytes') assert hasattr(consensus_impl, 'get_opfields') assert hasattr(consensus_impl, 'get_first_block_id') consensus_opcodes = consensus_impl.get_opcodes() consensus_magic_bytes = consensus_impl.get_magic_bytes() consensus_opfields = consensus_impl.get_opfields() existing_state_engine = StateEngine(consensus_impl, existing_working_dir) # set up existing state engine rc = existing_state_engine.db_setup() if not rc: # do not touch the existing db raise Exception("Existing state in {} is unusable or corrupt".format(os.path.dirname(existing_working_dir))) if start_block is None: # maybe we're resuming? start_block = new_state_engine.get_lastblock(new_state_engine.impl, new_state_engine.working_dir) if start_block is None: # starting from scratch start_block = consensus_impl.get_first_block_id() log.debug("Rebuilding database from {} to {}".format(start_block, target_block_height)) consensus_hashes = {} for block_height in range(start_block, target_block_height+1): # recover virtualchain transactions from the existing db and feed them into the new db consensus_hash = state_engine_replay_block(existing_state_engine, new_state_engine, block_height, expected_snapshots=expected_snapshots) log.debug("VERIFY CONSENSUS({}): {}".format(block_height, consensus_hash)) consensus_hashes[block_height] = consensus_hash if block_height in expected_snapshots: if expected_snapshots[block_height] != consensus_hash: log.error("DATABASE IS NOT CONSISTENT AT {}: {} != {}".format(block_height, expected_snapshots[block_height], consensus_hash)) return None # final consensus hash return consensus_hashes[target_block_height]
[ "def", "state_engine_replay", "(", "consensus_impl", ",", "existing_working_dir", ",", "new_state_engine", ",", "target_block_height", ",", "start_block", "=", "None", ",", "initial_snapshots", "=", "{", "}", ",", "expected_snapshots", "=", "{", "}", ")", ":", "assert", "hasattr", "(", "consensus_impl", ",", "'get_opcodes'", ")", "assert", "hasattr", "(", "consensus_impl", ",", "'get_magic_bytes'", ")", "assert", "hasattr", "(", "consensus_impl", ",", "'get_opfields'", ")", "assert", "hasattr", "(", "consensus_impl", ",", "'get_first_block_id'", ")", "consensus_opcodes", "=", "consensus_impl", ".", "get_opcodes", "(", ")", "consensus_magic_bytes", "=", "consensus_impl", ".", "get_magic_bytes", "(", ")", "consensus_opfields", "=", "consensus_impl", ".", "get_opfields", "(", ")", "existing_state_engine", "=", "StateEngine", "(", "consensus_impl", ",", "existing_working_dir", ")", "# set up existing state engine ", "rc", "=", "existing_state_engine", ".", "db_setup", "(", ")", "if", "not", "rc", ":", "# do not touch the existing db", "raise", "Exception", "(", "\"Existing state in {} is unusable or corrupt\"", ".", "format", "(", "os", ".", "path", ".", "dirname", "(", "existing_working_dir", ")", ")", ")", "if", "start_block", "is", "None", ":", "# maybe we're resuming?", "start_block", "=", "new_state_engine", ".", "get_lastblock", "(", "new_state_engine", ".", "impl", ",", "new_state_engine", ".", "working_dir", ")", "if", "start_block", "is", "None", ":", "# starting from scratch", "start_block", "=", "consensus_impl", ".", "get_first_block_id", "(", ")", "log", ".", "debug", "(", "\"Rebuilding database from {} to {}\"", ".", "format", "(", "start_block", ",", "target_block_height", ")", ")", "consensus_hashes", "=", "{", "}", "for", "block_height", "in", "range", "(", "start_block", ",", "target_block_height", "+", "1", ")", ":", "# recover virtualchain transactions from the existing db and feed them into the new db", "consensus_hash", "=", "state_engine_replay_block", "(", "existing_state_engine", ",", "new_state_engine", ",", "block_height", ",", "expected_snapshots", "=", "expected_snapshots", ")", "log", ".", "debug", "(", "\"VERIFY CONSENSUS({}): {}\"", ".", "format", "(", "block_height", ",", "consensus_hash", ")", ")", "consensus_hashes", "[", "block_height", "]", "=", "consensus_hash", "if", "block_height", "in", "expected_snapshots", ":", "if", "expected_snapshots", "[", "block_height", "]", "!=", "consensus_hash", ":", "log", ".", "error", "(", "\"DATABASE IS NOT CONSISTENT AT {}: {} != {}\"", ".", "format", "(", "block_height", ",", "expected_snapshots", "[", "block_height", "]", ",", "consensus_hash", ")", ")", "return", "None", "# final consensus hash", "return", "consensus_hashes", "[", "target_block_height", "]" ]
Given consensus rules, a target block height, and a path to an existing chainstate db, replay the chain state's virtual transactions through the consensus rules into a given directory (working_dir). Optionally check that the snapshots in @expected_snapshots match up as we verify. @expected_snapshots maps str(block_height) to str(consensus hash) Return the consensus hash calculated at the target block height Return None on verification failure (i.e. we got a different consensus hash than one for the same block in expected_snapshots)
[ "Given", "consensus", "rules", "a", "target", "block", "height", "and", "a", "path", "to", "an", "existing", "chainstate", "db", "replay", "the", "chain", "state", "s", "virtual", "transactions", "through", "the", "consensus", "rules", "into", "a", "given", "directory", "(", "working_dir", ")", "." ]
python
train
tijme/not-your-average-web-crawler
nyawc/http/Handler.py
https://github.com/tijme/not-your-average-web-crawler/blob/d77c14e1616c541bb3980f649a7e6f8ed02761fb/nyawc/http/Handler.py#L154-L176
def __content_type_matches(self, content_type, available_content_types): """Check if the given content type matches one of the available content types. Args: content_type (str): The given content type. available_content_types list(str): All the available content types. Returns: bool: True if a match was found, False otherwise. """ if content_type is None: return False if content_type in available_content_types: return True for available_content_type in available_content_types: if available_content_type in content_type: return True return False
[ "def", "__content_type_matches", "(", "self", ",", "content_type", ",", "available_content_types", ")", ":", "if", "content_type", "is", "None", ":", "return", "False", "if", "content_type", "in", "available_content_types", ":", "return", "True", "for", "available_content_type", "in", "available_content_types", ":", "if", "available_content_type", "in", "content_type", ":", "return", "True", "return", "False" ]
Check if the given content type matches one of the available content types. Args: content_type (str): The given content type. available_content_types list(str): All the available content types. Returns: bool: True if a match was found, False otherwise.
[ "Check", "if", "the", "given", "content", "type", "matches", "one", "of", "the", "available", "content", "types", "." ]
python
train
MacHu-GWU/angora-project
angora/bot/macro.py
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/bot/macro.py#L175-L179
def Up(self, n = 1, dl = 0): """上方向键n次 """ self.Delay(dl) self.keyboard.tap_key(self.keyboard.up_key, n)
[ "def", "Up", "(", "self", ",", "n", "=", "1", ",", "dl", "=", "0", ")", ":", "self", ".", "Delay", "(", "dl", ")", "self", ".", "keyboard", ".", "tap_key", "(", "self", ".", "keyboard", ".", "up_key", ",", "n", ")" ]
上方向键n次
[ "上方向键n次" ]
python
train
fprimex/zdesk
zdesk/zdesk_api.py
https://github.com/fprimex/zdesk/blob/851611c13b4d530e9df31390b3ec709baf0a0188/zdesk/zdesk_api.py#L425-L444
def bans_list(self, limit=None, max_id=None, since_id=None, **kwargs): "https://developer.zendesk.com/rest_api/docs/chat/bans#get-all-bans" api_path = "/api/v2/bans" api_query = {} if "query" in kwargs.keys(): api_query.update(kwargs["query"]) del kwargs["query"] if limit: api_query.update({ "limit": limit, }) if max_id: api_query.update({ "max_id": max_id, }) if since_id: api_query.update({ "since_id": since_id, }) return self.call(api_path, query=api_query, **kwargs)
[ "def", "bans_list", "(", "self", ",", "limit", "=", "None", ",", "max_id", "=", "None", ",", "since_id", "=", "None", ",", "*", "*", "kwargs", ")", ":", "api_path", "=", "\"/api/v2/bans\"", "api_query", "=", "{", "}", "if", "\"query\"", "in", "kwargs", ".", "keys", "(", ")", ":", "api_query", ".", "update", "(", "kwargs", "[", "\"query\"", "]", ")", "del", "kwargs", "[", "\"query\"", "]", "if", "limit", ":", "api_query", ".", "update", "(", "{", "\"limit\"", ":", "limit", ",", "}", ")", "if", "max_id", ":", "api_query", ".", "update", "(", "{", "\"max_id\"", ":", "max_id", ",", "}", ")", "if", "since_id", ":", "api_query", ".", "update", "(", "{", "\"since_id\"", ":", "since_id", ",", "}", ")", "return", "self", ".", "call", "(", "api_path", ",", "query", "=", "api_query", ",", "*", "*", "kwargs", ")" ]
https://developer.zendesk.com/rest_api/docs/chat/bans#get-all-bans
[ "https", ":", "//", "developer", ".", "zendesk", ".", "com", "/", "rest_api", "/", "docs", "/", "chat", "/", "bans#get", "-", "all", "-", "bans" ]
python
train
pysal/esda
esda/smoothing.py
https://github.com/pysal/esda/blob/2fafc6ec505e153152a86601d3e0fba080610c20/esda/smoothing.py#L216-L298
def direct_age_standardization(e, b, s, n, alpha=0.05): """A utility function to compute rate through direct age standardization Parameters ---------- e : array (n*h, 1), event variable measured for each age group across n spatial units b : array (n*h, 1), population at risk variable measured for each age group across n spatial units s : array (n*h, 1), standard population for each age group across n spatial units n : integer the number of spatial units alpha : float significance level for confidence interval Notes ----- e, b, and s are arranged in the same order Returns ------- list a list of n tuples; a tuple has a rate and its lower and upper limits age standardized rates and confidence intervals Examples -------- Creating an array of an event variable (e.g., the number of cancer patients) for 2 regions in each of which 4 age groups are available. The first 4 values are event values for 4 age groups in the region 1, and the next 4 values are for 4 age groups in the region 2. >>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20]) Creating another array of a population-at-risk variable (e.g., total population) for the same two regions. The order for entering values is the same as the case of e. >>> b = np.array([1000, 1000, 1100, 900, 1000, 900, 1100, 900]) For direct age standardization, we also need the data for standard population. Standard population is a reference population-at-risk (e.g., population distribution for the U.S.) whose age distribution can be used as a benchmarking point for comparing age distributions across regions (e.g., population distribution for Arizona and California). Another array including standard population is created. >>> s = np.array([1000, 900, 1000, 900, 1000, 900, 1000, 900]) Specifying the number of regions. >>> n = 2 Applying direct_age_standardization function to e and b >>> a, b = [i[0] for i in direct_age_standardization(e, b, s, n)] >>> round(a, 4) 0.0237 >>> round(b, 4) 0.0267 """ age_weight = (1.0 / b) * (s * 1.0 / sum_by_n(s, 1.0, n).repeat(len(s) // n)) adjusted_r = sum_by_n(e, age_weight, n) var_estimate = sum_by_n(e, np.square(age_weight), n) g_a = np.square(adjusted_r) / var_estimate g_b = var_estimate / adjusted_r k = [age_weight[i:i + len(b) // n].max() for i in range(0, len(b), len(b) // n)] g_a_k = np.square(adjusted_r + k) / (var_estimate + np.square(k)) g_b_k = (var_estimate + np.square(k)) / (adjusted_r + k) summed_b = sum_by_n(b, 1.0, n) res = [] for i in range(len(adjusted_r)): if adjusted_r[i] == 0: upper = 0.5 * chi2.ppf(1 - 0.5 * alpha) lower = 0.0 else: lower = gamma.ppf(0.5 * alpha, g_a[i], scale=g_b[i]) upper = gamma.ppf(1 - 0.5 * alpha, g_a_k[i], scale=g_b_k[i]) res.append((adjusted_r[i], lower, upper)) return res
[ "def", "direct_age_standardization", "(", "e", ",", "b", ",", "s", ",", "n", ",", "alpha", "=", "0.05", ")", ":", "age_weight", "=", "(", "1.0", "/", "b", ")", "*", "(", "s", "*", "1.0", "/", "sum_by_n", "(", "s", ",", "1.0", ",", "n", ")", ".", "repeat", "(", "len", "(", "s", ")", "//", "n", ")", ")", "adjusted_r", "=", "sum_by_n", "(", "e", ",", "age_weight", ",", "n", ")", "var_estimate", "=", "sum_by_n", "(", "e", ",", "np", ".", "square", "(", "age_weight", ")", ",", "n", ")", "g_a", "=", "np", ".", "square", "(", "adjusted_r", ")", "/", "var_estimate", "g_b", "=", "var_estimate", "/", "adjusted_r", "k", "=", "[", "age_weight", "[", "i", ":", "i", "+", "len", "(", "b", ")", "//", "n", "]", ".", "max", "(", ")", "for", "i", "in", "range", "(", "0", ",", "len", "(", "b", ")", ",", "len", "(", "b", ")", "//", "n", ")", "]", "g_a_k", "=", "np", ".", "square", "(", "adjusted_r", "+", "k", ")", "/", "(", "var_estimate", "+", "np", ".", "square", "(", "k", ")", ")", "g_b_k", "=", "(", "var_estimate", "+", "np", ".", "square", "(", "k", ")", ")", "/", "(", "adjusted_r", "+", "k", ")", "summed_b", "=", "sum_by_n", "(", "b", ",", "1.0", ",", "n", ")", "res", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "adjusted_r", ")", ")", ":", "if", "adjusted_r", "[", "i", "]", "==", "0", ":", "upper", "=", "0.5", "*", "chi2", ".", "ppf", "(", "1", "-", "0.5", "*", "alpha", ")", "lower", "=", "0.0", "else", ":", "lower", "=", "gamma", ".", "ppf", "(", "0.5", "*", "alpha", ",", "g_a", "[", "i", "]", ",", "scale", "=", "g_b", "[", "i", "]", ")", "upper", "=", "gamma", ".", "ppf", "(", "1", "-", "0.5", "*", "alpha", ",", "g_a_k", "[", "i", "]", ",", "scale", "=", "g_b_k", "[", "i", "]", ")", "res", ".", "append", "(", "(", "adjusted_r", "[", "i", "]", ",", "lower", ",", "upper", ")", ")", "return", "res" ]
A utility function to compute rate through direct age standardization Parameters ---------- e : array (n*h, 1), event variable measured for each age group across n spatial units b : array (n*h, 1), population at risk variable measured for each age group across n spatial units s : array (n*h, 1), standard population for each age group across n spatial units n : integer the number of spatial units alpha : float significance level for confidence interval Notes ----- e, b, and s are arranged in the same order Returns ------- list a list of n tuples; a tuple has a rate and its lower and upper limits age standardized rates and confidence intervals Examples -------- Creating an array of an event variable (e.g., the number of cancer patients) for 2 regions in each of which 4 age groups are available. The first 4 values are event values for 4 age groups in the region 1, and the next 4 values are for 4 age groups in the region 2. >>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20]) Creating another array of a population-at-risk variable (e.g., total population) for the same two regions. The order for entering values is the same as the case of e. >>> b = np.array([1000, 1000, 1100, 900, 1000, 900, 1100, 900]) For direct age standardization, we also need the data for standard population. Standard population is a reference population-at-risk (e.g., population distribution for the U.S.) whose age distribution can be used as a benchmarking point for comparing age distributions across regions (e.g., population distribution for Arizona and California). Another array including standard population is created. >>> s = np.array([1000, 900, 1000, 900, 1000, 900, 1000, 900]) Specifying the number of regions. >>> n = 2 Applying direct_age_standardization function to e and b >>> a, b = [i[0] for i in direct_age_standardization(e, b, s, n)] >>> round(a, 4) 0.0237 >>> round(b, 4) 0.0267
[ "A", "utility", "function", "to", "compute", "rate", "through", "direct", "age", "standardization" ]
python
train
btel/svg_utils
src/svgutils/transform.py
https://github.com/btel/svg_utils/blob/ee00726ebed1bd97fd496b15b6a8e7f233ebb5e3/src/svgutils/transform.py#L315-L334
def fromstring(text): """Create a SVG figure from a string. Parameters ---------- text : str string representing the SVG content. Must be valid SVG. Returns ------- SVGFigure newly created :py:class:`SVGFigure` initialised with the string content. """ fig = SVGFigure() svg = etree.fromstring(text.encode()) fig.root = svg return fig
[ "def", "fromstring", "(", "text", ")", ":", "fig", "=", "SVGFigure", "(", ")", "svg", "=", "etree", ".", "fromstring", "(", "text", ".", "encode", "(", ")", ")", "fig", ".", "root", "=", "svg", "return", "fig" ]
Create a SVG figure from a string. Parameters ---------- text : str string representing the SVG content. Must be valid SVG. Returns ------- SVGFigure newly created :py:class:`SVGFigure` initialised with the string content.
[ "Create", "a", "SVG", "figure", "from", "a", "string", "." ]
python
train
naphatkrit/easyci
easyci/results.py
https://github.com/naphatkrit/easyci/blob/7aee8d7694fe4e2da42ce35b0f700bc840c8b95f/easyci/results.py#L64-L76
def remove_results(vcs, signature): """Removed saved results for this signature Args: vcs (easyci.vcs.base.Vcs) signature (str) Raises: ResultsNotFoundError """ results_directory = _get_results_directory(vcs, signature) if not os.path.exists(results_directory): raise ResultsNotFoundError shutil.rmtree(results_directory)
[ "def", "remove_results", "(", "vcs", ",", "signature", ")", ":", "results_directory", "=", "_get_results_directory", "(", "vcs", ",", "signature", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "results_directory", ")", ":", "raise", "ResultsNotFoundError", "shutil", ".", "rmtree", "(", "results_directory", ")" ]
Removed saved results for this signature Args: vcs (easyci.vcs.base.Vcs) signature (str) Raises: ResultsNotFoundError
[ "Removed", "saved", "results", "for", "this", "signature" ]
python
train
materialsproject/pymatgen
pymatgen/core/xcfunc.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/core/xcfunc.py#L219-L227
def type(self): """The type of the functional.""" if self.xc in self.defined_aliases: return self.defined_aliases[self.xc].type xc = (self.x, self.c) if xc in self.defined_aliases: return self.defined_aliases[xc].type # If self is not in defined_aliases, use LibxcFunc family if self.xc is not None: return self.xc.family return "+".join([self.x.family, self.c.family])
[ "def", "type", "(", "self", ")", ":", "if", "self", ".", "xc", "in", "self", ".", "defined_aliases", ":", "return", "self", ".", "defined_aliases", "[", "self", ".", "xc", "]", ".", "type", "xc", "=", "(", "self", ".", "x", ",", "self", ".", "c", ")", "if", "xc", "in", "self", ".", "defined_aliases", ":", "return", "self", ".", "defined_aliases", "[", "xc", "]", ".", "type", "# If self is not in defined_aliases, use LibxcFunc family", "if", "self", ".", "xc", "is", "not", "None", ":", "return", "self", ".", "xc", ".", "family", "return", "\"+\"", ".", "join", "(", "[", "self", ".", "x", ".", "family", ",", "self", ".", "c", ".", "family", "]", ")" ]
The type of the functional.
[ "The", "type", "of", "the", "functional", "." ]
python
train
intuition-io/intuition
intuition/finance.py
https://github.com/intuition-io/intuition/blob/cd517e6b3b315a743eb4d0d0dc294e264ab913ce/intuition/finance.py#L151-L184
def returns(ts, **kwargs): ''' Compute returns on the given period @param ts : time serie to process @param kwargs.type: gross or simple returns @param delta : period betweend two computed returns @param start : with end, will return the return betweend this elapsed time @param period : delta is the number of lines/periods provided @param end : so said @param cumulative: compute cumulative returns ''' returns_type = kwargs.get('type', 'net') cumulative = kwargs.get('cumulative', False) if returns_type == 'net': relative = 0 else: relative = 1 # gross start = kwargs.get('start', None) end = kwargs.get('end', dt.datetime.today()) #delta = kwargs.get('delta', None) period = kwargs.get('period', 1) if isinstance(start, dt.datetime): log.debug('{} / {} -1'.format(ts[end], ts[start])) return ts[end] / ts[start] - 1 + relative #elif isinstance(delta, pd.DateOffset) or isinstance(delta, dt.timedelta): #FIXME timezone problem #FIXME reIndexDF is deprecated #ts = reIndexDF(ts, delta=delta) #period = 1 rets_df = ts / ts.shift(period) - 1 + relative if cumulative: return rets_df.cumprod() return rets_df[1:]
[ "def", "returns", "(", "ts", ",", "*", "*", "kwargs", ")", ":", "returns_type", "=", "kwargs", ".", "get", "(", "'type'", ",", "'net'", ")", "cumulative", "=", "kwargs", ".", "get", "(", "'cumulative'", ",", "False", ")", "if", "returns_type", "==", "'net'", ":", "relative", "=", "0", "else", ":", "relative", "=", "1", "# gross", "start", "=", "kwargs", ".", "get", "(", "'start'", ",", "None", ")", "end", "=", "kwargs", ".", "get", "(", "'end'", ",", "dt", ".", "datetime", ".", "today", "(", ")", ")", "#delta = kwargs.get('delta', None)", "period", "=", "kwargs", ".", "get", "(", "'period'", ",", "1", ")", "if", "isinstance", "(", "start", ",", "dt", ".", "datetime", ")", ":", "log", ".", "debug", "(", "'{} / {} -1'", ".", "format", "(", "ts", "[", "end", "]", ",", "ts", "[", "start", "]", ")", ")", "return", "ts", "[", "end", "]", "/", "ts", "[", "start", "]", "-", "1", "+", "relative", "#elif isinstance(delta, pd.DateOffset) or isinstance(delta, dt.timedelta):", "#FIXME timezone problem", "#FIXME reIndexDF is deprecated", "#ts = reIndexDF(ts, delta=delta)", "#period = 1", "rets_df", "=", "ts", "/", "ts", ".", "shift", "(", "period", ")", "-", "1", "+", "relative", "if", "cumulative", ":", "return", "rets_df", ".", "cumprod", "(", ")", "return", "rets_df", "[", "1", ":", "]" ]
Compute returns on the given period @param ts : time serie to process @param kwargs.type: gross or simple returns @param delta : period betweend two computed returns @param start : with end, will return the return betweend this elapsed time @param period : delta is the number of lines/periods provided @param end : so said @param cumulative: compute cumulative returns
[ "Compute", "returns", "on", "the", "given", "period" ]
python
train