repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
SBRG/ssbio
ssbio/protein/structure/utils/foldx.py
https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/structure/utils/foldx.py#L107-L131
def run_repair_pdb(self, silent=False, force_rerun=False): """Run FoldX RepairPDB on this PDB file. Original command:: foldx --command=RepairPDB --pdb=4bxi.pdb Args: silent (bool): If FoldX output should be silenced from printing to the shell. force_rerun (bool): If FoldX RepairPDB should be rerun even if a repaired file exists. """ # Create RepairPDB command foldx_repair_pdb = 'foldx --command=RepairPDB --pdb={}'.format(self.pdb_file) # Repaired PDB output file name foldx_repair_outfile = '{}_Repair.pdb'.format(op.splitext(self.pdb_file)[0]) # Run RepairPDB ssbio.utils.command_runner(shell_command=foldx_repair_pdb, force_rerun_flag=force_rerun, silent=silent, outfile_checker=foldx_repair_outfile, cwd=self.foldx_dir) # TODO: write stdout/stderr to log file somewhere! self.repaired_pdb_outfile = foldx_repair_outfile
[ "def", "run_repair_pdb", "(", "self", ",", "silent", "=", "False", ",", "force_rerun", "=", "False", ")", ":", "# Create RepairPDB command", "foldx_repair_pdb", "=", "'foldx --command=RepairPDB --pdb={}'", ".", "format", "(", "self", ".", "pdb_file", ")", "# Repaired PDB output file name", "foldx_repair_outfile", "=", "'{}_Repair.pdb'", ".", "format", "(", "op", ".", "splitext", "(", "self", ".", "pdb_file", ")", "[", "0", "]", ")", "# Run RepairPDB", "ssbio", ".", "utils", ".", "command_runner", "(", "shell_command", "=", "foldx_repair_pdb", ",", "force_rerun_flag", "=", "force_rerun", ",", "silent", "=", "silent", ",", "outfile_checker", "=", "foldx_repair_outfile", ",", "cwd", "=", "self", ".", "foldx_dir", ")", "# TODO: write stdout/stderr to log file somewhere!", "self", ".", "repaired_pdb_outfile", "=", "foldx_repair_outfile" ]
Run FoldX RepairPDB on this PDB file. Original command:: foldx --command=RepairPDB --pdb=4bxi.pdb Args: silent (bool): If FoldX output should be silenced from printing to the shell. force_rerun (bool): If FoldX RepairPDB should be rerun even if a repaired file exists.
[ "Run", "FoldX", "RepairPDB", "on", "this", "PDB", "file", "." ]
python
train
adafruit/Adafruit_Python_GPIO
Adafruit_GPIO/FT232H.py
https://github.com/adafruit/Adafruit_Python_GPIO/blob/a92a23d6b5869663b2bc1ccf78bb11585076a9c4/Adafruit_GPIO/FT232H.py#L700-L716
def _i2c_read_bytes(self, length=1): """Read the specified number of bytes from the I2C bus. Length is the number of bytes to read (must be 1 or more). """ for i in range(length-1): # Read a byte and send ACK. self._command.append('\x20\x00\x00\x13\x00\x00') # Make sure pins are back in idle state with clock low and data high. self._ft232h.output_pins({0: GPIO.LOW, 1: GPIO.HIGH}, write=False) self._command.append(self._ft232h.mpsse_gpio()) # Read last byte and send NAK. self._command.append('\x20\x00\x00\x13\x00\xFF') # Make sure pins are back in idle state with clock low and data high. self._ft232h.output_pins({0: GPIO.LOW, 1: GPIO.HIGH}, write=False) self._command.append(self._ft232h.mpsse_gpio()) # Increase expected number of bytes. self._expected += length
[ "def", "_i2c_read_bytes", "(", "self", ",", "length", "=", "1", ")", ":", "for", "i", "in", "range", "(", "length", "-", "1", ")", ":", "# Read a byte and send ACK.", "self", ".", "_command", ".", "append", "(", "'\\x20\\x00\\x00\\x13\\x00\\x00'", ")", "# Make sure pins are back in idle state with clock low and data high.", "self", ".", "_ft232h", ".", "output_pins", "(", "{", "0", ":", "GPIO", ".", "LOW", ",", "1", ":", "GPIO", ".", "HIGH", "}", ",", "write", "=", "False", ")", "self", ".", "_command", ".", "append", "(", "self", ".", "_ft232h", ".", "mpsse_gpio", "(", ")", ")", "# Read last byte and send NAK.", "self", ".", "_command", ".", "append", "(", "'\\x20\\x00\\x00\\x13\\x00\\xFF'", ")", "# Make sure pins are back in idle state with clock low and data high.", "self", ".", "_ft232h", ".", "output_pins", "(", "{", "0", ":", "GPIO", ".", "LOW", ",", "1", ":", "GPIO", ".", "HIGH", "}", ",", "write", "=", "False", ")", "self", ".", "_command", ".", "append", "(", "self", ".", "_ft232h", ".", "mpsse_gpio", "(", ")", ")", "# Increase expected number of bytes.", "self", ".", "_expected", "+=", "length" ]
Read the specified number of bytes from the I2C bus. Length is the number of bytes to read (must be 1 or more).
[ "Read", "the", "specified", "number", "of", "bytes", "from", "the", "I2C", "bus", ".", "Length", "is", "the", "number", "of", "bytes", "to", "read", "(", "must", "be", "1", "or", "more", ")", "." ]
python
valid
SBRG/ssbio
ssbio/databases/swissmodel.py
https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/databases/swissmodel.py#L168-L201
def get_oligomeric_state(swiss_model_path): """Parse the oligomeric prediction in a SWISS-MODEL repository file As of 2018-02-26, works on all E. coli models. Untested on other pre-made organism models. Args: swiss_model_path (str): Path to SWISS-MODEL PDB file Returns: dict: Information parsed about the oligomeric state """ oligo_info = {} with open(swiss_model_path, 'r') as f: for line in f: if line.startswith('REMARK 3 MODEL INFORMATION'): break for i in range(10): line = f.readline() if 'ENGIN' in line: oligo_info['ENGIN'] = line.rstrip().split(' ')[-1] elif 'OSTAT' in line: oligo_info['OSTAT'] = line.rstrip().split(' ')[-1] elif 'OSRSN' in line: oligo_info['OSRSN'] = line.rstrip().split(' ')[-1] elif 'QSPRD' in line: oligo_info['QSPRD'] = line.rstrip().split(' ')[-1] elif 'GMQE' in line: oligo_info['GMQE'] = line.rstrip().split(' ')[-1] elif 'QMN4' in line: oligo_info['QMN4'] = line.rstrip().split(' ')[-1] elif 'MODT' in line: oligo_info['MODT'] = line.rstrip().split(' ')[-1] return oligo_info
[ "def", "get_oligomeric_state", "(", "swiss_model_path", ")", ":", "oligo_info", "=", "{", "}", "with", "open", "(", "swiss_model_path", ",", "'r'", ")", "as", "f", ":", "for", "line", "in", "f", ":", "if", "line", ".", "startswith", "(", "'REMARK 3 MODEL INFORMATION'", ")", ":", "break", "for", "i", "in", "range", "(", "10", ")", ":", "line", "=", "f", ".", "readline", "(", ")", "if", "'ENGIN'", "in", "line", ":", "oligo_info", "[", "'ENGIN'", "]", "=", "line", ".", "rstrip", "(", ")", ".", "split", "(", "' '", ")", "[", "-", "1", "]", "elif", "'OSTAT'", "in", "line", ":", "oligo_info", "[", "'OSTAT'", "]", "=", "line", ".", "rstrip", "(", ")", ".", "split", "(", "' '", ")", "[", "-", "1", "]", "elif", "'OSRSN'", "in", "line", ":", "oligo_info", "[", "'OSRSN'", "]", "=", "line", ".", "rstrip", "(", ")", ".", "split", "(", "' '", ")", "[", "-", "1", "]", "elif", "'QSPRD'", "in", "line", ":", "oligo_info", "[", "'QSPRD'", "]", "=", "line", ".", "rstrip", "(", ")", ".", "split", "(", "' '", ")", "[", "-", "1", "]", "elif", "'GMQE'", "in", "line", ":", "oligo_info", "[", "'GMQE'", "]", "=", "line", ".", "rstrip", "(", ")", ".", "split", "(", "' '", ")", "[", "-", "1", "]", "elif", "'QMN4'", "in", "line", ":", "oligo_info", "[", "'QMN4'", "]", "=", "line", ".", "rstrip", "(", ")", ".", "split", "(", "' '", ")", "[", "-", "1", "]", "elif", "'MODT'", "in", "line", ":", "oligo_info", "[", "'MODT'", "]", "=", "line", ".", "rstrip", "(", ")", ".", "split", "(", "' '", ")", "[", "-", "1", "]", "return", "oligo_info" ]
Parse the oligomeric prediction in a SWISS-MODEL repository file As of 2018-02-26, works on all E. coli models. Untested on other pre-made organism models. Args: swiss_model_path (str): Path to SWISS-MODEL PDB file Returns: dict: Information parsed about the oligomeric state
[ "Parse", "the", "oligomeric", "prediction", "in", "a", "SWISS", "-", "MODEL", "repository", "file" ]
python
train
ericmjl/nxviz
nxviz/plots.py
https://github.com/ericmjl/nxviz/blob/6ea5823a8030a686f165fbe37d7a04d0f037ecc9/nxviz/plots.py#L671-L714
def store_node_label_meta(self, x, y, tx, ty, rot): """ This function stored coordinates-related metadate for a node This function should not be called by the user :param x: x location of node label or number :type x: np.float64 :param y: y location of node label or number :type y: np.float64 :param tx: text location x of node label (numbers) :type tx: np.float64 :param ty: text location y of node label (numbers) :type ty: np.float64 :param rot: rotation angle of the text (rotation) :type rot: float """ # Store computed values self.node_label_coords["x"].append(x) self.node_label_coords["y"].append(y) self.node_label_coords["tx"].append(tx) self.node_label_coords["ty"].append(ty) # Computes the text alignment for x if x == 0: self.node_label_aligns["has"].append("center") elif x > 0: self.node_label_aligns["has"].append("left") else: self.node_label_aligns["has"].append("right") # Computes the text alignment for y if self.node_label_layout == "rotate" or y == 0: self.node_label_aligns["vas"].append("center") elif y > 0: self.node_label_aligns["vas"].append("bottom") else: self.node_label_aligns["vas"].append("top") self.node_label_rotation.append(rot)
[ "def", "store_node_label_meta", "(", "self", ",", "x", ",", "y", ",", "tx", ",", "ty", ",", "rot", ")", ":", "# Store computed values", "self", ".", "node_label_coords", "[", "\"x\"", "]", ".", "append", "(", "x", ")", "self", ".", "node_label_coords", "[", "\"y\"", "]", ".", "append", "(", "y", ")", "self", ".", "node_label_coords", "[", "\"tx\"", "]", ".", "append", "(", "tx", ")", "self", ".", "node_label_coords", "[", "\"ty\"", "]", ".", "append", "(", "ty", ")", "# Computes the text alignment for x", "if", "x", "==", "0", ":", "self", ".", "node_label_aligns", "[", "\"has\"", "]", ".", "append", "(", "\"center\"", ")", "elif", "x", ">", "0", ":", "self", ".", "node_label_aligns", "[", "\"has\"", "]", ".", "append", "(", "\"left\"", ")", "else", ":", "self", ".", "node_label_aligns", "[", "\"has\"", "]", ".", "append", "(", "\"right\"", ")", "# Computes the text alignment for y", "if", "self", ".", "node_label_layout", "==", "\"rotate\"", "or", "y", "==", "0", ":", "self", ".", "node_label_aligns", "[", "\"vas\"", "]", ".", "append", "(", "\"center\"", ")", "elif", "y", ">", "0", ":", "self", ".", "node_label_aligns", "[", "\"vas\"", "]", ".", "append", "(", "\"bottom\"", ")", "else", ":", "self", ".", "node_label_aligns", "[", "\"vas\"", "]", ".", "append", "(", "\"top\"", ")", "self", ".", "node_label_rotation", ".", "append", "(", "rot", ")" ]
This function stored coordinates-related metadate for a node This function should not be called by the user :param x: x location of node label or number :type x: np.float64 :param y: y location of node label or number :type y: np.float64 :param tx: text location x of node label (numbers) :type tx: np.float64 :param ty: text location y of node label (numbers) :type ty: np.float64 :param rot: rotation angle of the text (rotation) :type rot: float
[ "This", "function", "stored", "coordinates", "-", "related", "metadate", "for", "a", "node", "This", "function", "should", "not", "be", "called", "by", "the", "user" ]
python
train
SUNCAT-Center/CatHub
cathub/reaction_networks.py
https://github.com/SUNCAT-Center/CatHub/blob/324625d1d8e740673f139658b2de4c9e1059739e/cathub/reaction_networks.py#L749-L848
def reaction_scheme(self, df, temperature, pressure, pH, potential): """Returns a dataframe with Gibbs free reaction energies. Parameters ---------- df : Pandas DataFrame generated by db_to_df temperature : numeric temperature in K pressure : numeric pressure in mbar pH : PH in bulk solution potential : Electric potential vs. SHE in eV Returns ------- df : DataFrame suitable for plotting. """ # set reaction scheme reactions = self.intermediates df_param = self.df_reaction_parameters.fillna(0) # set reaction labels for plotting reaction_labels = [w.replace('gas', '$_g$') for w in reactions] reaction_labels = [w.replace('star', '*') for w in reaction_labels] reaction_scheme = reactions[1:] reaction_number = np.arange(len(reactions)) # get unique substrates labels = list(df.labels) # Gather and summarize data from data base. unique_system_label = [] surface = [] facet = [] intermediate_label = [] reaction_coordinate = [] energies = [] transition_states = [] # compute proton-electron energetics G_PE = 0.0 if potential is not None and pH is not None: ne = 1.0 G_PE = -ne * potential + \ proton_hydroxide_free_energy(temperature=temperature, pressure=pressure, pH=pH)[0] for lab in list(set(labels)): energies_system = [0.0] reaction_coordinate_system = [[x, x + 0.5] for x in reaction_number] reaction_coordinate.append(reaction_coordinate_system) for step, products in enumerate(reaction_scheme): df_tmp = df[df['labels'].apply(lambda x: lab in x)] df_tmp = df_tmp[df_tmp['products'].apply(lambda x: products in x)] # Set initial reactants as reference energy = 0 eV. if step == 0: unique_system_label.append(df_tmp.iloc[0]['labels']) surface.append(df_tmp.iloc[0]['system']) facet.append(df_tmp.iloc[0]['facet']) intermediate_label.append(reaction_labels) previous_reaction_energy = 0 ts = [False] # Get free energy corrections (FEC). all_reactants = list(df_tmp['reactants'])[0] all_products = list(df_tmp['products'])[0] reactant_FEC = get_FEC(all_reactants, temperature, pressure) product_FEC = get_FEC(all_products, temperature, pressure) FEC = product_FEC - reactant_FEC # Get betas, and corrections. intermediate_name = reactions[step + 1] beta = df_param[df_param['intermediate'] == intermediate_name].betas.values[0] net_correction = df_param[df_param['intermediate'] == intermediate_name].net_corrections.values[0] ts.append(df_param[df_param['intermediate'] == intermediate_name].transition_state.values[0]) # Calculate the energy with respect to the initial reactants. reaction_energy = previous_reaction_energy + df_tmp.iloc[0]['reaction_energy'] \ + FEC - beta * G_PE + net_correction # print('FEC : '+str(FEC)) previous_reaction_energy = reaction_energy energies_system.append(reaction_energy) energies.append(energies_system) transition_states.append(ts) # Dump data into data frame. df_new = pd.DataFrame() df_new['facet'] = facet df_new['intermediate_labels'] = intermediate_label df_new['reaction_coordinate'] = reaction_coordinate df_new['reaction_energy'] = energies df_new['system_label'] = unique_system_label df_new['system'] = surface df_new['transition_states'] = transition_states df_new = df_new.sort_values(by=['facet', 'system']) df_new = df_new.reset_index(drop=True) self.df_react = df_new return(True)
[ "def", "reaction_scheme", "(", "self", ",", "df", ",", "temperature", ",", "pressure", ",", "pH", ",", "potential", ")", ":", "# set reaction scheme", "reactions", "=", "self", ".", "intermediates", "df_param", "=", "self", ".", "df_reaction_parameters", ".", "fillna", "(", "0", ")", "# set reaction labels for plotting", "reaction_labels", "=", "[", "w", ".", "replace", "(", "'gas'", ",", "'$_g$'", ")", "for", "w", "in", "reactions", "]", "reaction_labels", "=", "[", "w", ".", "replace", "(", "'star'", ",", "'*'", ")", "for", "w", "in", "reaction_labels", "]", "reaction_scheme", "=", "reactions", "[", "1", ":", "]", "reaction_number", "=", "np", ".", "arange", "(", "len", "(", "reactions", ")", ")", "# get unique substrates", "labels", "=", "list", "(", "df", ".", "labels", ")", "# Gather and summarize data from data base.", "unique_system_label", "=", "[", "]", "surface", "=", "[", "]", "facet", "=", "[", "]", "intermediate_label", "=", "[", "]", "reaction_coordinate", "=", "[", "]", "energies", "=", "[", "]", "transition_states", "=", "[", "]", "# compute proton-electron energetics", "G_PE", "=", "0.0", "if", "potential", "is", "not", "None", "and", "pH", "is", "not", "None", ":", "ne", "=", "1.0", "G_PE", "=", "-", "ne", "*", "potential", "+", "proton_hydroxide_free_energy", "(", "temperature", "=", "temperature", ",", "pressure", "=", "pressure", ",", "pH", "=", "pH", ")", "[", "0", "]", "for", "lab", "in", "list", "(", "set", "(", "labels", ")", ")", ":", "energies_system", "=", "[", "0.0", "]", "reaction_coordinate_system", "=", "[", "[", "x", ",", "x", "+", "0.5", "]", "for", "x", "in", "reaction_number", "]", "reaction_coordinate", ".", "append", "(", "reaction_coordinate_system", ")", "for", "step", ",", "products", "in", "enumerate", "(", "reaction_scheme", ")", ":", "df_tmp", "=", "df", "[", "df", "[", "'labels'", "]", ".", "apply", "(", "lambda", "x", ":", "lab", "in", "x", ")", "]", "df_tmp", "=", "df_tmp", "[", "df_tmp", "[", "'products'", "]", ".", "apply", "(", "lambda", "x", ":", "products", "in", "x", ")", "]", "# Set initial reactants as reference energy = 0 eV.", "if", "step", "==", "0", ":", "unique_system_label", ".", "append", "(", "df_tmp", ".", "iloc", "[", "0", "]", "[", "'labels'", "]", ")", "surface", ".", "append", "(", "df_tmp", ".", "iloc", "[", "0", "]", "[", "'system'", "]", ")", "facet", ".", "append", "(", "df_tmp", ".", "iloc", "[", "0", "]", "[", "'facet'", "]", ")", "intermediate_label", ".", "append", "(", "reaction_labels", ")", "previous_reaction_energy", "=", "0", "ts", "=", "[", "False", "]", "# Get free energy corrections (FEC).", "all_reactants", "=", "list", "(", "df_tmp", "[", "'reactants'", "]", ")", "[", "0", "]", "all_products", "=", "list", "(", "df_tmp", "[", "'products'", "]", ")", "[", "0", "]", "reactant_FEC", "=", "get_FEC", "(", "all_reactants", ",", "temperature", ",", "pressure", ")", "product_FEC", "=", "get_FEC", "(", "all_products", ",", "temperature", ",", "pressure", ")", "FEC", "=", "product_FEC", "-", "reactant_FEC", "# Get betas, and corrections.", "intermediate_name", "=", "reactions", "[", "step", "+", "1", "]", "beta", "=", "df_param", "[", "df_param", "[", "'intermediate'", "]", "==", "intermediate_name", "]", ".", "betas", ".", "values", "[", "0", "]", "net_correction", "=", "df_param", "[", "df_param", "[", "'intermediate'", "]", "==", "intermediate_name", "]", ".", "net_corrections", ".", "values", "[", "0", "]", "ts", ".", "append", "(", "df_param", "[", "df_param", "[", "'intermediate'", "]", "==", "intermediate_name", "]", ".", "transition_state", ".", "values", "[", "0", "]", ")", "# Calculate the energy with respect to the initial reactants.", "reaction_energy", "=", "previous_reaction_energy", "+", "df_tmp", ".", "iloc", "[", "0", "]", "[", "'reaction_energy'", "]", "+", "FEC", "-", "beta", "*", "G_PE", "+", "net_correction", "# print('FEC : '+str(FEC))", "previous_reaction_energy", "=", "reaction_energy", "energies_system", ".", "append", "(", "reaction_energy", ")", "energies", ".", "append", "(", "energies_system", ")", "transition_states", ".", "append", "(", "ts", ")", "# Dump data into data frame.", "df_new", "=", "pd", ".", "DataFrame", "(", ")", "df_new", "[", "'facet'", "]", "=", "facet", "df_new", "[", "'intermediate_labels'", "]", "=", "intermediate_label", "df_new", "[", "'reaction_coordinate'", "]", "=", "reaction_coordinate", "df_new", "[", "'reaction_energy'", "]", "=", "energies", "df_new", "[", "'system_label'", "]", "=", "unique_system_label", "df_new", "[", "'system'", "]", "=", "surface", "df_new", "[", "'transition_states'", "]", "=", "transition_states", "df_new", "=", "df_new", ".", "sort_values", "(", "by", "=", "[", "'facet'", ",", "'system'", "]", ")", "df_new", "=", "df_new", ".", "reset_index", "(", "drop", "=", "True", ")", "self", ".", "df_react", "=", "df_new", "return", "(", "True", ")" ]
Returns a dataframe with Gibbs free reaction energies. Parameters ---------- df : Pandas DataFrame generated by db_to_df temperature : numeric temperature in K pressure : numeric pressure in mbar pH : PH in bulk solution potential : Electric potential vs. SHE in eV Returns ------- df : DataFrame suitable for plotting.
[ "Returns", "a", "dataframe", "with", "Gibbs", "free", "reaction", "energies", "." ]
python
train
annoviko/pyclustering
pyclustering/cluster/fcm.py
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/fcm.py#L240-L256
def __calculate_centers(self): """! @brief Calculate center using membership of each cluster. @return (list) Updated clusters as list of clusters. Each cluster contains indexes of objects from data. @return (numpy.array) Updated centers. """ dimension = self.__data.shape[1] centers = numpy.zeros((len(self.__centers), dimension)) for i in range(len(self.__centers)): # multiplication '@' requires python version 3.5 centers[i] = numpy.divide(self.__membership[:, i] @ self.__data, numpy.sum(self.__membership[:, i])) return centers
[ "def", "__calculate_centers", "(", "self", ")", ":", "dimension", "=", "self", ".", "__data", ".", "shape", "[", "1", "]", "centers", "=", "numpy", ".", "zeros", "(", "(", "len", "(", "self", ".", "__centers", ")", ",", "dimension", ")", ")", "for", "i", "in", "range", "(", "len", "(", "self", ".", "__centers", ")", ")", ":", "# multiplication '@' requires python version 3.5\r", "centers", "[", "i", "]", "=", "numpy", ".", "divide", "(", "self", ".", "__membership", "[", ":", ",", "i", "]", "@", "self", ".", "__data", ",", "numpy", ".", "sum", "(", "self", ".", "__membership", "[", ":", ",", "i", "]", ")", ")", "return", "centers" ]
! @brief Calculate center using membership of each cluster. @return (list) Updated clusters as list of clusters. Each cluster contains indexes of objects from data. @return (numpy.array) Updated centers.
[ "!" ]
python
valid
DataDog/integrations-core
postgres/datadog_checks/postgres/postgres.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/postgres/datadog_checks/postgres/postgres.py#L759-L834
def _collect_stats( self, key, db, instance_tags, relations, custom_metrics, collect_function_metrics, collect_count_metrics, collect_activity_metrics, collect_database_size_metrics, collect_default_db, interface_error, programming_error, ): """Query pg_stat_* for various metrics If relations is not an empty list, gather per-relation metrics on top of that. If custom_metrics is not an empty list, gather custom metrics defined in postgres.yaml """ db_instance_metrics = self._get_instance_metrics(key, db, collect_database_size_metrics, collect_default_db) bgw_instance_metrics = self._get_bgw_metrics(key, db) archiver_instance_metrics = self._get_archiver_metrics(key, db) metric_scope = [self.CONNECTION_METRICS, self.LOCK_METRICS] if collect_function_metrics: metric_scope.append(self.FUNCTION_METRICS) if collect_count_metrics: metric_scope.append(self.COUNT_METRICS) # Do we need relation-specific metrics? relations_config = {} if relations: metric_scope += [self.REL_METRICS, self.IDX_METRICS, self.SIZE_METRICS, self.STATIO_METRICS] relations_config = self._build_relations_config(relations) replication_metrics = self._get_replication_metrics(key, db) if replication_metrics is not None: # FIXME: constants shouldn't be modified self.REPLICATION_METRICS['metrics'] = replication_metrics metric_scope.append(self.REPLICATION_METRICS) try: cursor = db.cursor() results_len = self._query_scope( cursor, db_instance_metrics, key, db, instance_tags, False, programming_error, relations_config ) if results_len is not None: self.gauge( "postgresql.db.count", results_len, tags=[t for t in instance_tags if not t.startswith("db:")] ) self._query_scope( cursor, bgw_instance_metrics, key, db, instance_tags, False, programming_error, relations_config ) self._query_scope( cursor, archiver_instance_metrics, key, db, instance_tags, False, programming_error, relations_config ) if collect_activity_metrics: activity_metrics = self._get_activity_metrics(key, db) self._query_scope( cursor, activity_metrics, key, db, instance_tags, False, programming_error, relations_config ) for scope in list(metric_scope) + custom_metrics: self._query_scope( cursor, scope, key, db, instance_tags, scope in custom_metrics, programming_error, relations_config ) cursor.close() except (interface_error, socket.error) as e: self.log.error("Connection error: %s" % str(e)) raise ShouldRestartException
[ "def", "_collect_stats", "(", "self", ",", "key", ",", "db", ",", "instance_tags", ",", "relations", ",", "custom_metrics", ",", "collect_function_metrics", ",", "collect_count_metrics", ",", "collect_activity_metrics", ",", "collect_database_size_metrics", ",", "collect_default_db", ",", "interface_error", ",", "programming_error", ",", ")", ":", "db_instance_metrics", "=", "self", ".", "_get_instance_metrics", "(", "key", ",", "db", ",", "collect_database_size_metrics", ",", "collect_default_db", ")", "bgw_instance_metrics", "=", "self", ".", "_get_bgw_metrics", "(", "key", ",", "db", ")", "archiver_instance_metrics", "=", "self", ".", "_get_archiver_metrics", "(", "key", ",", "db", ")", "metric_scope", "=", "[", "self", ".", "CONNECTION_METRICS", ",", "self", ".", "LOCK_METRICS", "]", "if", "collect_function_metrics", ":", "metric_scope", ".", "append", "(", "self", ".", "FUNCTION_METRICS", ")", "if", "collect_count_metrics", ":", "metric_scope", ".", "append", "(", "self", ".", "COUNT_METRICS", ")", "# Do we need relation-specific metrics?", "relations_config", "=", "{", "}", "if", "relations", ":", "metric_scope", "+=", "[", "self", ".", "REL_METRICS", ",", "self", ".", "IDX_METRICS", ",", "self", ".", "SIZE_METRICS", ",", "self", ".", "STATIO_METRICS", "]", "relations_config", "=", "self", ".", "_build_relations_config", "(", "relations", ")", "replication_metrics", "=", "self", ".", "_get_replication_metrics", "(", "key", ",", "db", ")", "if", "replication_metrics", "is", "not", "None", ":", "# FIXME: constants shouldn't be modified", "self", ".", "REPLICATION_METRICS", "[", "'metrics'", "]", "=", "replication_metrics", "metric_scope", ".", "append", "(", "self", ".", "REPLICATION_METRICS", ")", "try", ":", "cursor", "=", "db", ".", "cursor", "(", ")", "results_len", "=", "self", ".", "_query_scope", "(", "cursor", ",", "db_instance_metrics", ",", "key", ",", "db", ",", "instance_tags", ",", "False", ",", "programming_error", ",", "relations_config", ")", "if", "results_len", "is", "not", "None", ":", "self", ".", "gauge", "(", "\"postgresql.db.count\"", ",", "results_len", ",", "tags", "=", "[", "t", "for", "t", "in", "instance_tags", "if", "not", "t", ".", "startswith", "(", "\"db:\"", ")", "]", ")", "self", ".", "_query_scope", "(", "cursor", ",", "bgw_instance_metrics", ",", "key", ",", "db", ",", "instance_tags", ",", "False", ",", "programming_error", ",", "relations_config", ")", "self", ".", "_query_scope", "(", "cursor", ",", "archiver_instance_metrics", ",", "key", ",", "db", ",", "instance_tags", ",", "False", ",", "programming_error", ",", "relations_config", ")", "if", "collect_activity_metrics", ":", "activity_metrics", "=", "self", ".", "_get_activity_metrics", "(", "key", ",", "db", ")", "self", ".", "_query_scope", "(", "cursor", ",", "activity_metrics", ",", "key", ",", "db", ",", "instance_tags", ",", "False", ",", "programming_error", ",", "relations_config", ")", "for", "scope", "in", "list", "(", "metric_scope", ")", "+", "custom_metrics", ":", "self", ".", "_query_scope", "(", "cursor", ",", "scope", ",", "key", ",", "db", ",", "instance_tags", ",", "scope", "in", "custom_metrics", ",", "programming_error", ",", "relations_config", ")", "cursor", ".", "close", "(", ")", "except", "(", "interface_error", ",", "socket", ".", "error", ")", "as", "e", ":", "self", ".", "log", ".", "error", "(", "\"Connection error: %s\"", "%", "str", "(", "e", ")", ")", "raise", "ShouldRestartException" ]
Query pg_stat_* for various metrics If relations is not an empty list, gather per-relation metrics on top of that. If custom_metrics is not an empty list, gather custom metrics defined in postgres.yaml
[ "Query", "pg_stat_", "*", "for", "various", "metrics", "If", "relations", "is", "not", "an", "empty", "list", "gather", "per", "-", "relation", "metrics", "on", "top", "of", "that", ".", "If", "custom_metrics", "is", "not", "an", "empty", "list", "gather", "custom", "metrics", "defined", "in", "postgres", ".", "yaml" ]
python
train
consbio/gis-metadata-parser
gis_metadata/utils.py
https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/utils.py#L201-L208
def get_xpath_branch(xroot, xpath): """ :return: the relative part of an XPATH: that which extends past the root provided """ if xroot and xpath and xpath.startswith(xroot): xpath = xpath[len(xroot):] xpath = xpath.lstrip(XPATH_DELIM) return xpath
[ "def", "get_xpath_branch", "(", "xroot", ",", "xpath", ")", ":", "if", "xroot", "and", "xpath", "and", "xpath", ".", "startswith", "(", "xroot", ")", ":", "xpath", "=", "xpath", "[", "len", "(", "xroot", ")", ":", "]", "xpath", "=", "xpath", ".", "lstrip", "(", "XPATH_DELIM", ")", "return", "xpath" ]
:return: the relative part of an XPATH: that which extends past the root provided
[ ":", "return", ":", "the", "relative", "part", "of", "an", "XPATH", ":", "that", "which", "extends", "past", "the", "root", "provided" ]
python
train
saltstack/salt
salt/cloud/clouds/virtualbox.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/virtualbox.py#L92-L113
def map_clonemode(vm_info): """ Convert the virtualbox config file values for clone_mode into the integers the API requires """ mode_map = { 'state': 0, 'child': 1, 'all': 2 } if not vm_info: return DEFAULT_CLONE_MODE if 'clonemode' not in vm_info: return DEFAULT_CLONE_MODE if vm_info['clonemode'] in mode_map: return mode_map[vm_info['clonemode']] else: raise SaltCloudSystemExit( "Illegal clonemode for virtualbox profile. Legal values are: {}".format(','.join(mode_map.keys())) )
[ "def", "map_clonemode", "(", "vm_info", ")", ":", "mode_map", "=", "{", "'state'", ":", "0", ",", "'child'", ":", "1", ",", "'all'", ":", "2", "}", "if", "not", "vm_info", ":", "return", "DEFAULT_CLONE_MODE", "if", "'clonemode'", "not", "in", "vm_info", ":", "return", "DEFAULT_CLONE_MODE", "if", "vm_info", "[", "'clonemode'", "]", "in", "mode_map", ":", "return", "mode_map", "[", "vm_info", "[", "'clonemode'", "]", "]", "else", ":", "raise", "SaltCloudSystemExit", "(", "\"Illegal clonemode for virtualbox profile. Legal values are: {}\"", ".", "format", "(", "','", ".", "join", "(", "mode_map", ".", "keys", "(", ")", ")", ")", ")" ]
Convert the virtualbox config file values for clone_mode into the integers the API requires
[ "Convert", "the", "virtualbox", "config", "file", "values", "for", "clone_mode", "into", "the", "integers", "the", "API", "requires" ]
python
train
what-studio/profiling
profiling/__main__.py
https://github.com/what-studio/profiling/blob/49666ba3ea295eb73782ae6c18a4ec7929d7d8b7/profiling/__main__.py#L235-L242
def import_(module_name, name): """Imports an object by a relative module path:: Profiler = import_('profiling.profiler', 'Profiler') """ module = importlib.import_module(module_name, __package__) return getattr(module, name)
[ "def", "import_", "(", "module_name", ",", "name", ")", ":", "module", "=", "importlib", ".", "import_module", "(", "module_name", ",", "__package__", ")", "return", "getattr", "(", "module", ",", "name", ")" ]
Imports an object by a relative module path:: Profiler = import_('profiling.profiler', 'Profiler')
[ "Imports", "an", "object", "by", "a", "relative", "module", "path", "::" ]
python
train
ladybug-tools/ladybug
ladybug/skymodel.py
https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/skymodel.py#L164-L224
def zhang_huang_solar_split(altitudes, doys, cloud_cover, relative_humidity, dry_bulb_present, dry_bulb_t3_hrs, wind_speed, atm_pressure, use_disc=False): """Calculate direct and diffuse solar irradiance using the Zhang-Huang model. By default, this function uses the DIRINT method (aka. Perez split) to split global irradiance into direct and diffuse. This is the same method used by EnergyPlus. Args: altitudes: A list of solar altitudes in degrees. doys: A list of days of the year that correspond to the altitudes. cloud_cover: A list of float values between 0 and 10 that represents cloud cover in tenths (0 = clear; 10 = completely overcast) relative_humidity: A list of float values between 0 and 100 that represents the relative humidity in percent. dry_bulb_present: A list of float values that represents the dry bulb temperature at the time of interest (in degrees C). dry_bulb_t3_hrs: A list of float values that represents the dry bulb temperature at three hours before the time of interest (in degrees C). wind_speed: A list of float values that represents the wind speed in m/s. atm_pressure: A list of float values that represent the atmospheric pressure in Pa. use_disc: Set to True to use the original DISC model as opposed to the newer and more accurate DIRINT model. Default is False. Returns: dir_norm_rad: A list of direct normal radiation values for each of the connected altitudes in W/m2. dif_horiz_rad: A list of diffuse horizontall radiation values for each of the connected altitudes in W/m2. """ # Calculate global horizontal irradiance using the original zhang-huang model glob_ir = [] for i in range(len(altitudes)): ghi = zhang_huang_solar(altitudes[i], cloud_cover[i], relative_humidity[i], dry_bulb_present[i], dry_bulb_t3_hrs[i], wind_speed[i]) glob_ir.append(ghi) if use_disc is False: # Calculate dew point temperature to improve the splitting of direct + diffuse temp_dew = [dew_point_from_db_rh(dry_bulb_present[i], relative_humidity[i]) for i in range(len(glob_ir))] # Split global rad into direct + diffuse using dirint method (aka. Perez split) dir_norm_rad = dirint(glob_ir, altitudes, doys, atm_pressure, use_delta_kt_prime=True, temp_dew=temp_dew) # Calculate diffuse horizontal from dni and ghi. dif_horiz_rad = [glob_ir[i] - (dir_norm_rad[i] * math.sin(math.radians(altitudes[i]))) for i in range(len(glob_ir))] else: dir_norm_rad = [] dif_horiz_rad = [] for i in range(len(glob_ir)): dni, kt, am = disc(glob_ir[i], altitudes[i], doys[i], atm_pressure[i]) dhi = glob_ir[i] - (dni * math.sin(math.radians(altitudes[i]))) dir_norm_rad.append(dni) dif_horiz_rad.append(dhi) return dir_norm_rad, dif_horiz_rad
[ "def", "zhang_huang_solar_split", "(", "altitudes", ",", "doys", ",", "cloud_cover", ",", "relative_humidity", ",", "dry_bulb_present", ",", "dry_bulb_t3_hrs", ",", "wind_speed", ",", "atm_pressure", ",", "use_disc", "=", "False", ")", ":", "# Calculate global horizontal irradiance using the original zhang-huang model", "glob_ir", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "altitudes", ")", ")", ":", "ghi", "=", "zhang_huang_solar", "(", "altitudes", "[", "i", "]", ",", "cloud_cover", "[", "i", "]", ",", "relative_humidity", "[", "i", "]", ",", "dry_bulb_present", "[", "i", "]", ",", "dry_bulb_t3_hrs", "[", "i", "]", ",", "wind_speed", "[", "i", "]", ")", "glob_ir", ".", "append", "(", "ghi", ")", "if", "use_disc", "is", "False", ":", "# Calculate dew point temperature to improve the splitting of direct + diffuse", "temp_dew", "=", "[", "dew_point_from_db_rh", "(", "dry_bulb_present", "[", "i", "]", ",", "relative_humidity", "[", "i", "]", ")", "for", "i", "in", "range", "(", "len", "(", "glob_ir", ")", ")", "]", "# Split global rad into direct + diffuse using dirint method (aka. Perez split)", "dir_norm_rad", "=", "dirint", "(", "glob_ir", ",", "altitudes", ",", "doys", ",", "atm_pressure", ",", "use_delta_kt_prime", "=", "True", ",", "temp_dew", "=", "temp_dew", ")", "# Calculate diffuse horizontal from dni and ghi.", "dif_horiz_rad", "=", "[", "glob_ir", "[", "i", "]", "-", "(", "dir_norm_rad", "[", "i", "]", "*", "math", ".", "sin", "(", "math", ".", "radians", "(", "altitudes", "[", "i", "]", ")", ")", ")", "for", "i", "in", "range", "(", "len", "(", "glob_ir", ")", ")", "]", "else", ":", "dir_norm_rad", "=", "[", "]", "dif_horiz_rad", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "glob_ir", ")", ")", ":", "dni", ",", "kt", ",", "am", "=", "disc", "(", "glob_ir", "[", "i", "]", ",", "altitudes", "[", "i", "]", ",", "doys", "[", "i", "]", ",", "atm_pressure", "[", "i", "]", ")", "dhi", "=", "glob_ir", "[", "i", "]", "-", "(", "dni", "*", "math", ".", "sin", "(", "math", ".", "radians", "(", "altitudes", "[", "i", "]", ")", ")", ")", "dir_norm_rad", ".", "append", "(", "dni", ")", "dif_horiz_rad", ".", "append", "(", "dhi", ")", "return", "dir_norm_rad", ",", "dif_horiz_rad" ]
Calculate direct and diffuse solar irradiance using the Zhang-Huang model. By default, this function uses the DIRINT method (aka. Perez split) to split global irradiance into direct and diffuse. This is the same method used by EnergyPlus. Args: altitudes: A list of solar altitudes in degrees. doys: A list of days of the year that correspond to the altitudes. cloud_cover: A list of float values between 0 and 10 that represents cloud cover in tenths (0 = clear; 10 = completely overcast) relative_humidity: A list of float values between 0 and 100 that represents the relative humidity in percent. dry_bulb_present: A list of float values that represents the dry bulb temperature at the time of interest (in degrees C). dry_bulb_t3_hrs: A list of float values that represents the dry bulb temperature at three hours before the time of interest (in degrees C). wind_speed: A list of float values that represents the wind speed in m/s. atm_pressure: A list of float values that represent the atmospheric pressure in Pa. use_disc: Set to True to use the original DISC model as opposed to the newer and more accurate DIRINT model. Default is False. Returns: dir_norm_rad: A list of direct normal radiation values for each of the connected altitudes in W/m2. dif_horiz_rad: A list of diffuse horizontall radiation values for each of the connected altitudes in W/m2.
[ "Calculate", "direct", "and", "diffuse", "solar", "irradiance", "using", "the", "Zhang", "-", "Huang", "model", "." ]
python
train
lepture/python-livereload
livereload/handlers.py
https://github.com/lepture/python-livereload/blob/f80cb3ae0f8f2cdf38203a712fe25ef7f1899c34/livereload/handlers.py#L116-L136
def on_message(self, message): """Handshake with livereload.js 1. client send 'hello' 2. server reply 'hello' 3. client send 'info' """ message = ObjectDict(escape.json_decode(message)) if message.command == 'hello': handshake = { 'command': 'hello', 'protocols': [ 'http://livereload.com/protocols/official-7', ], 'serverName': 'livereload-tornado', } self.send_message(handshake) if message.command == 'info' and 'url' in message: logger.info('Browser Connected: %s' % message.url) LiveReloadHandler.waiters.add(self)
[ "def", "on_message", "(", "self", ",", "message", ")", ":", "message", "=", "ObjectDict", "(", "escape", ".", "json_decode", "(", "message", ")", ")", "if", "message", ".", "command", "==", "'hello'", ":", "handshake", "=", "{", "'command'", ":", "'hello'", ",", "'protocols'", ":", "[", "'http://livereload.com/protocols/official-7'", ",", "]", ",", "'serverName'", ":", "'livereload-tornado'", ",", "}", "self", ".", "send_message", "(", "handshake", ")", "if", "message", ".", "command", "==", "'info'", "and", "'url'", "in", "message", ":", "logger", ".", "info", "(", "'Browser Connected: %s'", "%", "message", ".", "url", ")", "LiveReloadHandler", ".", "waiters", ".", "add", "(", "self", ")" ]
Handshake with livereload.js 1. client send 'hello' 2. server reply 'hello' 3. client send 'info'
[ "Handshake", "with", "livereload", ".", "js" ]
python
train
chrisdev/django-pandas
django_pandas/managers.py
https://github.com/chrisdev/django-pandas/blob/8276d699f25dca7da58e6c3fcebbd46e1c3e35e9/django_pandas/managers.py#L84-L133
def to_pivot_table(self, fieldnames=(), verbose=True, values=None, rows=None, cols=None, aggfunc='mean', fill_value=None, margins=False, dropna=True, coerce_float=True): """ A convenience method for creating a spread sheet style pivot table as a DataFrame Parameters ---------- fieldnames: The model field names(columns) to utilise in creating the DataFrame. You can span a relationships in the usual Django ORM way by using the foreign key field name separated by double underscores and refer to a field in a related model. values: The field to use to calculate the values to aggregate. rows: The list of field names to group on Keys to group on the x-axis of the pivot table cols: The list of column names or arrays to group on Keys to group on the y-axis of the pivot table aggfunc: How to arregate the values. By default this would be ``numpy.mean``. A list of aggregates functions can be passed In this case the resulting pivot table will have hierarchical columns whose top level are the function names (inferred from the function objects themselves) fill_value: A scalar value to replace the missing values with margins: Boolean, default False Add all row / columns (e.g. for subtotal / grand totals) dropna: Boolean, default True. Do not include columns whose entries are all NaN verbose: If this is ``True`` then populate the DataFrame with the human readable versions for foreign key fields else use the actual values set in the model coerce_float: Attempt to convert values to non-string, non-numeric objects (like decimal.Decimal) to floating point. """ df = self.to_dataframe(fieldnames, verbose=verbose, coerce_float=coerce_float) return df.pivot_table(values=values, fill_value=fill_value, index=rows, columns=cols, aggfunc=aggfunc, margins=margins, dropna=dropna)
[ "def", "to_pivot_table", "(", "self", ",", "fieldnames", "=", "(", ")", ",", "verbose", "=", "True", ",", "values", "=", "None", ",", "rows", "=", "None", ",", "cols", "=", "None", ",", "aggfunc", "=", "'mean'", ",", "fill_value", "=", "None", ",", "margins", "=", "False", ",", "dropna", "=", "True", ",", "coerce_float", "=", "True", ")", ":", "df", "=", "self", ".", "to_dataframe", "(", "fieldnames", ",", "verbose", "=", "verbose", ",", "coerce_float", "=", "coerce_float", ")", "return", "df", ".", "pivot_table", "(", "values", "=", "values", ",", "fill_value", "=", "fill_value", ",", "index", "=", "rows", ",", "columns", "=", "cols", ",", "aggfunc", "=", "aggfunc", ",", "margins", "=", "margins", ",", "dropna", "=", "dropna", ")" ]
A convenience method for creating a spread sheet style pivot table as a DataFrame Parameters ---------- fieldnames: The model field names(columns) to utilise in creating the DataFrame. You can span a relationships in the usual Django ORM way by using the foreign key field name separated by double underscores and refer to a field in a related model. values: The field to use to calculate the values to aggregate. rows: The list of field names to group on Keys to group on the x-axis of the pivot table cols: The list of column names or arrays to group on Keys to group on the y-axis of the pivot table aggfunc: How to arregate the values. By default this would be ``numpy.mean``. A list of aggregates functions can be passed In this case the resulting pivot table will have hierarchical columns whose top level are the function names (inferred from the function objects themselves) fill_value: A scalar value to replace the missing values with margins: Boolean, default False Add all row / columns (e.g. for subtotal / grand totals) dropna: Boolean, default True. Do not include columns whose entries are all NaN verbose: If this is ``True`` then populate the DataFrame with the human readable versions for foreign key fields else use the actual values set in the model coerce_float: Attempt to convert values to non-string, non-numeric objects (like decimal.Decimal) to floating point.
[ "A", "convenience", "method", "for", "creating", "a", "spread", "sheet", "style", "pivot", "table", "as", "a", "DataFrame", "Parameters", "----------", "fieldnames", ":", "The", "model", "field", "names", "(", "columns", ")", "to", "utilise", "in", "creating", "the", "DataFrame", ".", "You", "can", "span", "a", "relationships", "in", "the", "usual", "Django", "ORM", "way", "by", "using", "the", "foreign", "key", "field", "name", "separated", "by", "double", "underscores", "and", "refer", "to", "a", "field", "in", "a", "related", "model", "." ]
python
train
senaite/senaite.core
bika/lims/content/analysisrequest.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/content/analysisrequest.py#L2215-L2224
def getAncestors(self, all_ancestors=True): """Returns the ancestor(s) of this Analysis Request param all_ancestors: include all ancestors, not only the parent """ parent = self.getParentAnalysisRequest() if not parent: return list() if not all_ancestors: return [parent] return [parent] + parent.getAncestors(all_ancestors=True)
[ "def", "getAncestors", "(", "self", ",", "all_ancestors", "=", "True", ")", ":", "parent", "=", "self", ".", "getParentAnalysisRequest", "(", ")", "if", "not", "parent", ":", "return", "list", "(", ")", "if", "not", "all_ancestors", ":", "return", "[", "parent", "]", "return", "[", "parent", "]", "+", "parent", ".", "getAncestors", "(", "all_ancestors", "=", "True", ")" ]
Returns the ancestor(s) of this Analysis Request param all_ancestors: include all ancestors, not only the parent
[ "Returns", "the", "ancestor", "(", "s", ")", "of", "this", "Analysis", "Request", "param", "all_ancestors", ":", "include", "all", "ancestors", "not", "only", "the", "parent" ]
python
train
SpamScope/mail-parser
mailparser/mailparser.py
https://github.com/SpamScope/mail-parser/blob/814b56d0b803feab9dea04f054b802ce138097e2/mailparser/mailparser.py#L529-L536
def headers(self): """ Return only the headers as Python object """ d = {} for k, v in self.message.items(): d[k] = decode_header_part(v) return d
[ "def", "headers", "(", "self", ")", ":", "d", "=", "{", "}", "for", "k", ",", "v", "in", "self", ".", "message", ".", "items", "(", ")", ":", "d", "[", "k", "]", "=", "decode_header_part", "(", "v", ")", "return", "d" ]
Return only the headers as Python object
[ "Return", "only", "the", "headers", "as", "Python", "object" ]
python
train
nicferrier/md
src/mdlib/cli.py
https://github.com/nicferrier/md/blob/302ca8882dae060fb15bd5ae470d8e661fb67ec4/src/mdlib/cli.py#L317-L355
def do_storecheck(self, subcmd, opts): """${cmd_name}: checks the store for files that may not be in the maildirs. """ from os.path import basename from os.path import dirname from os.path import exists as existspath from os.path import islink from os.path import join as joinpath maildir = self.maildir cur = joinpath(maildir, "cur") new = joinpath(maildir, "new") store = joinpath(maildir, "store") found_list = [] # Loop through the folders checking that everything maps back to the store for scandir in [cur, new]: for f in os.listdir(scandir): filename = joinpath(scandir, f) try: assert islink(filename) store_location = os.readlink(filename) assert existspath(store_location) and dirname(store_location) == store except AssertionError: print("%s was not a link into the store" % ( "/".join([ filename.split("/")[-2], filename.split("/")[-1] ]) ), file=self.stdout) else: found_list.append(basename(store_location)) for storefile in os.listdir(store): if storefile not in found_list: print( "%s found in store but not folders" % joinpath("store", storefile), file=self.stdout )
[ "def", "do_storecheck", "(", "self", ",", "subcmd", ",", "opts", ")", ":", "from", "os", ".", "path", "import", "basename", "from", "os", ".", "path", "import", "dirname", "from", "os", ".", "path", "import", "exists", "as", "existspath", "from", "os", ".", "path", "import", "islink", "from", "os", ".", "path", "import", "join", "as", "joinpath", "maildir", "=", "self", ".", "maildir", "cur", "=", "joinpath", "(", "maildir", ",", "\"cur\"", ")", "new", "=", "joinpath", "(", "maildir", ",", "\"new\"", ")", "store", "=", "joinpath", "(", "maildir", ",", "\"store\"", ")", "found_list", "=", "[", "]", "# Loop through the folders checking that everything maps back to the store", "for", "scandir", "in", "[", "cur", ",", "new", "]", ":", "for", "f", "in", "os", ".", "listdir", "(", "scandir", ")", ":", "filename", "=", "joinpath", "(", "scandir", ",", "f", ")", "try", ":", "assert", "islink", "(", "filename", ")", "store_location", "=", "os", ".", "readlink", "(", "filename", ")", "assert", "existspath", "(", "store_location", ")", "and", "dirname", "(", "store_location", ")", "==", "store", "except", "AssertionError", ":", "print", "(", "\"%s was not a link into the store\"", "%", "(", "\"/\"", ".", "join", "(", "[", "filename", ".", "split", "(", "\"/\"", ")", "[", "-", "2", "]", ",", "filename", ".", "split", "(", "\"/\"", ")", "[", "-", "1", "]", "]", ")", ")", ",", "file", "=", "self", ".", "stdout", ")", "else", ":", "found_list", ".", "append", "(", "basename", "(", "store_location", ")", ")", "for", "storefile", "in", "os", ".", "listdir", "(", "store", ")", ":", "if", "storefile", "not", "in", "found_list", ":", "print", "(", "\"%s found in store but not folders\"", "%", "joinpath", "(", "\"store\"", ",", "storefile", ")", ",", "file", "=", "self", ".", "stdout", ")" ]
${cmd_name}: checks the store for files that may not be in the maildirs.
[ "$", "{", "cmd_name", "}", ":", "checks", "the", "store", "for", "files", "that", "may", "not", "be", "in", "the", "maildirs", "." ]
python
train
rosenbrockc/fortpy
fortpy/elements.py
https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/elements.py#L687-L698
def search_dependencies(self): """Returns a list of modules that this executable needs in order to run properly. This includes special kind declarations for precision or derived types, but not dependency executable calls. """ #It is understood that this executable's module is obviously required. Just #add any additional modules from the parameters. result = [p.dependency() for p in self.ordered_parameters] result.extend([v.dependency() for k, v in list(self.members.items())]) for ekey, anexec in list(self.executables.items()): result.extend(anexec.search_dependencies()) return [m for m in result if m is not None and m != self.module.name]
[ "def", "search_dependencies", "(", "self", ")", ":", "#It is understood that this executable's module is obviously required. Just", "#add any additional modules from the parameters.", "result", "=", "[", "p", ".", "dependency", "(", ")", "for", "p", "in", "self", ".", "ordered_parameters", "]", "result", ".", "extend", "(", "[", "v", ".", "dependency", "(", ")", "for", "k", ",", "v", "in", "list", "(", "self", ".", "members", ".", "items", "(", ")", ")", "]", ")", "for", "ekey", ",", "anexec", "in", "list", "(", "self", ".", "executables", ".", "items", "(", ")", ")", ":", "result", ".", "extend", "(", "anexec", ".", "search_dependencies", "(", ")", ")", "return", "[", "m", "for", "m", "in", "result", "if", "m", "is", "not", "None", "and", "m", "!=", "self", ".", "module", ".", "name", "]" ]
Returns a list of modules that this executable needs in order to run properly. This includes special kind declarations for precision or derived types, but not dependency executable calls.
[ "Returns", "a", "list", "of", "modules", "that", "this", "executable", "needs", "in", "order", "to", "run", "properly", ".", "This", "includes", "special", "kind", "declarations", "for", "precision", "or", "derived", "types", "but", "not", "dependency", "executable", "calls", "." ]
python
train
belbio/bel
bel/lang/completion.py
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/completion.py#L369-L491
def arg_completions( completion_text: str, parent_function: str, args: list, arg_idx: int, bel_spec: BELSpec, bel_fmt: str, species_id: str, namespace: str, size: int, ): """Function argument completion Only allow legal options for completion given function name, arguments and index of argument to replace. Args: completion_text: text to use for completion - used for creating highlight parent_function: BEL function containing these args args: arguments of BEL function arg_idx: completing on this argument identified by this index bel_spec: BEL Specification bel_fmt: short, medium, long BEL function/relation formats species_id: filter on this species id, e.g. TAX:9606 if available namespace: filter on this namespace if available size: number of completions to return Return: list of replacements """ function_long = bel_spec["functions"]["to_long"].get(parent_function) if not function_long: return [] signatures = bel_spec["functions"]["signatures"][function_long]["signatures"] # Position based argument ################################### function_list = [] entity_types = [] fn_replace_list, ns_arg_replace_list = [], [] position_flag = False # Signature matches position-based argument # Check for position based argument for signature in signatures: sig_arg = signature["arguments"][arg_idx] sig_type = sig_arg["type"] if sig_arg.get("position", False) and arg_idx == sig_arg["position"] - 1: position_flag = True if sig_type in ["Function", "Modifier"]: function_list.extend(sig_arg["values"]) elif sig_type in ["NSArg", "StrArgNSArg"]: entity_types.extend(sig_arg["values"]) if not position_flag: # Collect optional and multiple signature arguments for completion opt_fn_sig_args = [] opt_nsarg_sig_args = [] mult_fn_sig_args = [] mult_nsarg_sig_args = [] for signature in signatures: signature_opt_fn_sig_args = [] signature_opt_nsarg_sig_args = [] signature_mult_fn_sig_args = [] signature_mult_nsarg_sig_args = [] max_position = -1 for sig_arg in signature["arguments"]: if "position" in sig_arg: max_position = sig_arg["position"] continue # Skip position based signature arguments if ( sig_arg.get("optional", False) is True and sig_arg.get("multiple", False) is False ): if sig_arg["type"] in ["Function", "Modifier"]: signature_opt_fn_sig_args.extend(sig_arg["values"]) elif sig_arg["type"] in ["NSArg", "StrArgNSArg"]: signature_opt_nsarg_sig_args.extend(sig_arg["values"]) elif sig_arg.get("multiple", False) is True: if sig_arg["type"] in ["Function", "Modifier"]: signature_mult_fn_sig_args.extend(sig_arg["values"]) elif sig_arg["type"] in ["NSArg", "StrArgNSArg"]: signature_mult_nsarg_sig_args.extend(sig_arg["values"]) # Remove signature non-multiple, optional arguments that are already in args list for idx, arg in enumerate(args): if idx <= max_position - 1: # Skip positional arguments continue if idx == arg_idx: # Skip argument to be completed continue log.debug(f"Remove Optional Args {arg} {signature_opt_fn_sig_args}") opt_fn_sig_args.extend(signature_opt_fn_sig_args) opt_nsarg_sig_args.extend(signature_opt_nsarg_sig_args) mult_fn_sig_args.extend(signature_mult_fn_sig_args) mult_nsarg_sig_args.extend(signature_mult_nsarg_sig_args) function_list.extend(list(set(opt_fn_sig_args + mult_fn_sig_args))) entity_types.extend(list(set(opt_nsarg_sig_args + mult_nsarg_sig_args))) if function_list: log.debug(f"ArgComp - position-based Function list: {function_list}") fn_replace_list = function_completions( completion_text, bel_spec, function_list, bel_fmt, size ) if entity_types: log.debug(f"ArgComp - position-based Entity types: {entity_types}") ns_arg_replace_list = nsarg_completions( completion_text, entity_types, bel_spec, namespace, species_id, bel_fmt, size, ) replace_list = fn_replace_list + ns_arg_replace_list return replace_list
[ "def", "arg_completions", "(", "completion_text", ":", "str", ",", "parent_function", ":", "str", ",", "args", ":", "list", ",", "arg_idx", ":", "int", ",", "bel_spec", ":", "BELSpec", ",", "bel_fmt", ":", "str", ",", "species_id", ":", "str", ",", "namespace", ":", "str", ",", "size", ":", "int", ",", ")", ":", "function_long", "=", "bel_spec", "[", "\"functions\"", "]", "[", "\"to_long\"", "]", ".", "get", "(", "parent_function", ")", "if", "not", "function_long", ":", "return", "[", "]", "signatures", "=", "bel_spec", "[", "\"functions\"", "]", "[", "\"signatures\"", "]", "[", "function_long", "]", "[", "\"signatures\"", "]", "# Position based argument ###################################", "function_list", "=", "[", "]", "entity_types", "=", "[", "]", "fn_replace_list", ",", "ns_arg_replace_list", "=", "[", "]", ",", "[", "]", "position_flag", "=", "False", "# Signature matches position-based argument", "# Check for position based argument", "for", "signature", "in", "signatures", ":", "sig_arg", "=", "signature", "[", "\"arguments\"", "]", "[", "arg_idx", "]", "sig_type", "=", "sig_arg", "[", "\"type\"", "]", "if", "sig_arg", ".", "get", "(", "\"position\"", ",", "False", ")", "and", "arg_idx", "==", "sig_arg", "[", "\"position\"", "]", "-", "1", ":", "position_flag", "=", "True", "if", "sig_type", "in", "[", "\"Function\"", ",", "\"Modifier\"", "]", ":", "function_list", ".", "extend", "(", "sig_arg", "[", "\"values\"", "]", ")", "elif", "sig_type", "in", "[", "\"NSArg\"", ",", "\"StrArgNSArg\"", "]", ":", "entity_types", ".", "extend", "(", "sig_arg", "[", "\"values\"", "]", ")", "if", "not", "position_flag", ":", "# Collect optional and multiple signature arguments for completion", "opt_fn_sig_args", "=", "[", "]", "opt_nsarg_sig_args", "=", "[", "]", "mult_fn_sig_args", "=", "[", "]", "mult_nsarg_sig_args", "=", "[", "]", "for", "signature", "in", "signatures", ":", "signature_opt_fn_sig_args", "=", "[", "]", "signature_opt_nsarg_sig_args", "=", "[", "]", "signature_mult_fn_sig_args", "=", "[", "]", "signature_mult_nsarg_sig_args", "=", "[", "]", "max_position", "=", "-", "1", "for", "sig_arg", "in", "signature", "[", "\"arguments\"", "]", ":", "if", "\"position\"", "in", "sig_arg", ":", "max_position", "=", "sig_arg", "[", "\"position\"", "]", "continue", "# Skip position based signature arguments", "if", "(", "sig_arg", ".", "get", "(", "\"optional\"", ",", "False", ")", "is", "True", "and", "sig_arg", ".", "get", "(", "\"multiple\"", ",", "False", ")", "is", "False", ")", ":", "if", "sig_arg", "[", "\"type\"", "]", "in", "[", "\"Function\"", ",", "\"Modifier\"", "]", ":", "signature_opt_fn_sig_args", ".", "extend", "(", "sig_arg", "[", "\"values\"", "]", ")", "elif", "sig_arg", "[", "\"type\"", "]", "in", "[", "\"NSArg\"", ",", "\"StrArgNSArg\"", "]", ":", "signature_opt_nsarg_sig_args", ".", "extend", "(", "sig_arg", "[", "\"values\"", "]", ")", "elif", "sig_arg", ".", "get", "(", "\"multiple\"", ",", "False", ")", "is", "True", ":", "if", "sig_arg", "[", "\"type\"", "]", "in", "[", "\"Function\"", ",", "\"Modifier\"", "]", ":", "signature_mult_fn_sig_args", ".", "extend", "(", "sig_arg", "[", "\"values\"", "]", ")", "elif", "sig_arg", "[", "\"type\"", "]", "in", "[", "\"NSArg\"", ",", "\"StrArgNSArg\"", "]", ":", "signature_mult_nsarg_sig_args", ".", "extend", "(", "sig_arg", "[", "\"values\"", "]", ")", "# Remove signature non-multiple, optional arguments that are already in args list", "for", "idx", ",", "arg", "in", "enumerate", "(", "args", ")", ":", "if", "idx", "<=", "max_position", "-", "1", ":", "# Skip positional arguments", "continue", "if", "idx", "==", "arg_idx", ":", "# Skip argument to be completed", "continue", "log", ".", "debug", "(", "f\"Remove Optional Args {arg} {signature_opt_fn_sig_args}\"", ")", "opt_fn_sig_args", ".", "extend", "(", "signature_opt_fn_sig_args", ")", "opt_nsarg_sig_args", ".", "extend", "(", "signature_opt_nsarg_sig_args", ")", "mult_fn_sig_args", ".", "extend", "(", "signature_mult_fn_sig_args", ")", "mult_nsarg_sig_args", ".", "extend", "(", "signature_mult_nsarg_sig_args", ")", "function_list", ".", "extend", "(", "list", "(", "set", "(", "opt_fn_sig_args", "+", "mult_fn_sig_args", ")", ")", ")", "entity_types", ".", "extend", "(", "list", "(", "set", "(", "opt_nsarg_sig_args", "+", "mult_nsarg_sig_args", ")", ")", ")", "if", "function_list", ":", "log", ".", "debug", "(", "f\"ArgComp - position-based Function list: {function_list}\"", ")", "fn_replace_list", "=", "function_completions", "(", "completion_text", ",", "bel_spec", ",", "function_list", ",", "bel_fmt", ",", "size", ")", "if", "entity_types", ":", "log", ".", "debug", "(", "f\"ArgComp - position-based Entity types: {entity_types}\"", ")", "ns_arg_replace_list", "=", "nsarg_completions", "(", "completion_text", ",", "entity_types", ",", "bel_spec", ",", "namespace", ",", "species_id", ",", "bel_fmt", ",", "size", ",", ")", "replace_list", "=", "fn_replace_list", "+", "ns_arg_replace_list", "return", "replace_list" ]
Function argument completion Only allow legal options for completion given function name, arguments and index of argument to replace. Args: completion_text: text to use for completion - used for creating highlight parent_function: BEL function containing these args args: arguments of BEL function arg_idx: completing on this argument identified by this index bel_spec: BEL Specification bel_fmt: short, medium, long BEL function/relation formats species_id: filter on this species id, e.g. TAX:9606 if available namespace: filter on this namespace if available size: number of completions to return Return: list of replacements
[ "Function", "argument", "completion" ]
python
train
daler/metaseq
metaseq/results_table.py
https://github.com/daler/metaseq/blob/fa875d1f72317aa7ef95cb128b739956b16eef9f/metaseq/results_table.py#L108-L129
def features(self, ignore_unknown=False): """ Generator of features. If a gffutils.FeatureDB is attached, returns a pybedtools.Interval for every feature in the dataframe's index. Parameters ---------- ignore_unknown : bool If True, silently ignores features that are not found in the db. """ if not self.db: raise ValueError("Please attach a gffutils.FeatureDB") for i in self.data.index: try: yield gffutils.helpers.asinterval(self.db[i]) except gffutils.FeatureNotFoundError: if ignore_unknown: continue else: raise gffutils.FeatureNotFoundError('%s not found' % i)
[ "def", "features", "(", "self", ",", "ignore_unknown", "=", "False", ")", ":", "if", "not", "self", ".", "db", ":", "raise", "ValueError", "(", "\"Please attach a gffutils.FeatureDB\"", ")", "for", "i", "in", "self", ".", "data", ".", "index", ":", "try", ":", "yield", "gffutils", ".", "helpers", ".", "asinterval", "(", "self", ".", "db", "[", "i", "]", ")", "except", "gffutils", ".", "FeatureNotFoundError", ":", "if", "ignore_unknown", ":", "continue", "else", ":", "raise", "gffutils", ".", "FeatureNotFoundError", "(", "'%s not found'", "%", "i", ")" ]
Generator of features. If a gffutils.FeatureDB is attached, returns a pybedtools.Interval for every feature in the dataframe's index. Parameters ---------- ignore_unknown : bool If True, silently ignores features that are not found in the db.
[ "Generator", "of", "features", "." ]
python
train
mnick/scikit-tensor
sktensor/core.py
https://github.com/mnick/scikit-tensor/blob/fe517e9661a08164b8d30d2dddf7c96aeeabcf36/sktensor/core.py#L333-L378
def khatrirao(A, reverse=False): """ Compute the columnwise Khatri-Rao product. Parameters ---------- A : tuple of ndarrays Matrices for which the columnwise Khatri-Rao product should be computed reverse : boolean Compute Khatri-Rao product in reverse order Examples -------- >>> A = np.random.randn(5, 2) >>> B = np.random.randn(4, 2) >>> C = khatrirao((A, B)) >>> C.shape (20, 2) >>> (C[:, 0] == np.kron(A[:, 0], B[:, 0])).all() true >>> (C[:, 1] == np.kron(A[:, 1], B[:, 1])).all() true """ if not isinstance(A, tuple): raise ValueError('A must be a tuple of array likes') N = A[0].shape[1] M = 1 for i in range(len(A)): if A[i].ndim != 2: raise ValueError('A must be a tuple of matrices (A[%d].ndim = %d)' % (i, A[i].ndim)) elif N != A[i].shape[1]: raise ValueError('All matrices must have same number of columns') M *= A[i].shape[0] matorder = arange(len(A)) if reverse: matorder = matorder[::-1] # preallocate P = np.zeros((M, N), dtype=A[0].dtype) for n in range(N): ab = A[matorder[0]][:, n] for j in range(1, len(matorder)): ab = np.kron(ab, A[matorder[j]][:, n]) P[:, n] = ab return P
[ "def", "khatrirao", "(", "A", ",", "reverse", "=", "False", ")", ":", "if", "not", "isinstance", "(", "A", ",", "tuple", ")", ":", "raise", "ValueError", "(", "'A must be a tuple of array likes'", ")", "N", "=", "A", "[", "0", "]", ".", "shape", "[", "1", "]", "M", "=", "1", "for", "i", "in", "range", "(", "len", "(", "A", ")", ")", ":", "if", "A", "[", "i", "]", ".", "ndim", "!=", "2", ":", "raise", "ValueError", "(", "'A must be a tuple of matrices (A[%d].ndim = %d)'", "%", "(", "i", ",", "A", "[", "i", "]", ".", "ndim", ")", ")", "elif", "N", "!=", "A", "[", "i", "]", ".", "shape", "[", "1", "]", ":", "raise", "ValueError", "(", "'All matrices must have same number of columns'", ")", "M", "*=", "A", "[", "i", "]", ".", "shape", "[", "0", "]", "matorder", "=", "arange", "(", "len", "(", "A", ")", ")", "if", "reverse", ":", "matorder", "=", "matorder", "[", ":", ":", "-", "1", "]", "# preallocate", "P", "=", "np", ".", "zeros", "(", "(", "M", ",", "N", ")", ",", "dtype", "=", "A", "[", "0", "]", ".", "dtype", ")", "for", "n", "in", "range", "(", "N", ")", ":", "ab", "=", "A", "[", "matorder", "[", "0", "]", "]", "[", ":", ",", "n", "]", "for", "j", "in", "range", "(", "1", ",", "len", "(", "matorder", ")", ")", ":", "ab", "=", "np", ".", "kron", "(", "ab", ",", "A", "[", "matorder", "[", "j", "]", "]", "[", ":", ",", "n", "]", ")", "P", "[", ":", ",", "n", "]", "=", "ab", "return", "P" ]
Compute the columnwise Khatri-Rao product. Parameters ---------- A : tuple of ndarrays Matrices for which the columnwise Khatri-Rao product should be computed reverse : boolean Compute Khatri-Rao product in reverse order Examples -------- >>> A = np.random.randn(5, 2) >>> B = np.random.randn(4, 2) >>> C = khatrirao((A, B)) >>> C.shape (20, 2) >>> (C[:, 0] == np.kron(A[:, 0], B[:, 0])).all() true >>> (C[:, 1] == np.kron(A[:, 1], B[:, 1])).all() true
[ "Compute", "the", "columnwise", "Khatri", "-", "Rao", "product", "." ]
python
train
cltrudeau/django-awl
awl/utils.py
https://github.com/cltrudeau/django-awl/blob/70d469ef9a161c1170b53aa017cf02d7c15eb90c/awl/utils.py#L46-L56
def refetch_for_update(obj): """Queries the database for the same object that is passed in, refetching its contents and runs ``select_for_update()`` to lock the corresponding row until the next commit. :param obj: Object to refetch :returns: Refreshed version of the object """ return obj.__class__.objects.select_for_update().get(id=obj.id)
[ "def", "refetch_for_update", "(", "obj", ")", ":", "return", "obj", ".", "__class__", ".", "objects", ".", "select_for_update", "(", ")", ".", "get", "(", "id", "=", "obj", ".", "id", ")" ]
Queries the database for the same object that is passed in, refetching its contents and runs ``select_for_update()`` to lock the corresponding row until the next commit. :param obj: Object to refetch :returns: Refreshed version of the object
[ "Queries", "the", "database", "for", "the", "same", "object", "that", "is", "passed", "in", "refetching", "its", "contents", "and", "runs", "select_for_update", "()", "to", "lock", "the", "corresponding", "row", "until", "the", "next", "commit", "." ]
python
valid
flask-restful/flask-restful
flask_restful/reqparse.py
https://github.com/flask-restful/flask-restful/blob/25544d697c1f82bafbd1320960df459f58a58e03/flask_restful/reqparse.py#L348-L356
def replace_argument(self, name, *args, **kwargs): """ Replace the argument matching the given name with a new version. """ new_arg = self.argument_class(name, *args, **kwargs) for index, arg in enumerate(self.args[:]): if new_arg.name == arg.name: del self.args[index] self.args.append(new_arg) break return self
[ "def", "replace_argument", "(", "self", ",", "name", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "new_arg", "=", "self", ".", "argument_class", "(", "name", ",", "*", "args", ",", "*", "*", "kwargs", ")", "for", "index", ",", "arg", "in", "enumerate", "(", "self", ".", "args", "[", ":", "]", ")", ":", "if", "new_arg", ".", "name", "==", "arg", ".", "name", ":", "del", "self", ".", "args", "[", "index", "]", "self", ".", "args", ".", "append", "(", "new_arg", ")", "break", "return", "self" ]
Replace the argument matching the given name with a new version.
[ "Replace", "the", "argument", "matching", "the", "given", "name", "with", "a", "new", "version", "." ]
python
train
cameronbwhite/Flask-CAS
flask_cas/cas_urls.py
https://github.com/cameronbwhite/Flask-CAS/blob/f85173938654cb9b9316a5c869000b74b008422e/flask_cas/cas_urls.py#L50-L74
def create_cas_login_url(cas_url, cas_route, service, renew=None, gateway=None): """ Create a CAS login URL . Keyword arguments: cas_url -- The url to the CAS (ex. http://sso.pdx.edu) cas_route -- The route where the CAS lives on server (ex. /cas) service -- (ex. http://localhost:5000/login) renew -- "true" or "false" gateway -- "true" or "false" Example usage: >>> create_cas_login_url( ... 'http://sso.pdx.edu', ... '/cas', ... 'http://localhost:5000', ... ) 'http://sso.pdx.edu/cas?service=http%3A%2F%2Flocalhost%3A5000' """ return create_url( cas_url, cas_route, ('service', service), ('renew', renew), ('gateway', gateway), )
[ "def", "create_cas_login_url", "(", "cas_url", ",", "cas_route", ",", "service", ",", "renew", "=", "None", ",", "gateway", "=", "None", ")", ":", "return", "create_url", "(", "cas_url", ",", "cas_route", ",", "(", "'service'", ",", "service", ")", ",", "(", "'renew'", ",", "renew", ")", ",", "(", "'gateway'", ",", "gateway", ")", ",", ")" ]
Create a CAS login URL . Keyword arguments: cas_url -- The url to the CAS (ex. http://sso.pdx.edu) cas_route -- The route where the CAS lives on server (ex. /cas) service -- (ex. http://localhost:5000/login) renew -- "true" or "false" gateway -- "true" or "false" Example usage: >>> create_cas_login_url( ... 'http://sso.pdx.edu', ... '/cas', ... 'http://localhost:5000', ... ) 'http://sso.pdx.edu/cas?service=http%3A%2F%2Flocalhost%3A5000'
[ "Create", "a", "CAS", "login", "URL", "." ]
python
train
Contraz/demosys-py
demosys/effects/registry.py
https://github.com/Contraz/demosys-py/blob/6466128a3029c4d09631420ccce73024025bd5b6/demosys/effects/registry.py#L76-L94
def add_package(self, name): """ Registers a single package :param name: (str) The effect package to add """ name, cls_name = parse_package_string(name) if name in self.package_map: return package = EffectPackage(name) package.load() self.packages.append(package) self.package_map[package.name] = package # Load effect package dependencies self.polulate(package.effect_packages)
[ "def", "add_package", "(", "self", ",", "name", ")", ":", "name", ",", "cls_name", "=", "parse_package_string", "(", "name", ")", "if", "name", "in", "self", ".", "package_map", ":", "return", "package", "=", "EffectPackage", "(", "name", ")", "package", ".", "load", "(", ")", "self", ".", "packages", ".", "append", "(", "package", ")", "self", ".", "package_map", "[", "package", ".", "name", "]", "=", "package", "# Load effect package dependencies", "self", ".", "polulate", "(", "package", ".", "effect_packages", ")" ]
Registers a single package :param name: (str) The effect package to add
[ "Registers", "a", "single", "package" ]
python
valid
PGower/PyCanvas
pycanvas/apis/base.py
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/base.py#L102-L152
def generic_request(self, method, uri, all_pages=False, data_key=None, no_data=False, do_not_process=False, force_urlencode_data=False, data=None, params=None, files=None, single_item=False): """Generic Canvas Request Method.""" if not uri.startswith('http'): uri = self.uri_for(uri) if force_urlencode_data is True: uri += '?' + urllib.urlencode(data) assert method in ['GET', 'POST', 'PUT', 'DELETE', 'HEAD', 'OPTIONS'] if method == 'GET': response = self.session.get(uri, params=params) elif method == 'POST': response = self.session.post(uri, data=data, files=files) elif method == 'PUT': response = self.session.put(uri, data=data) elif method == 'DELETE': response = self.session.delete(uri, params=params) elif method == 'HEAD': response = self.session.head(uri, params=params) elif method == 'OPTIONS': response = self.session.options(uri, params=params) response.raise_for_status() if do_not_process is True: return response if no_data: return response.status_code if all_pages: return self.depaginate(response, data_key) if single_item: r = response.json() if data_key: return r[data_key] else: return r return response.json()
[ "def", "generic_request", "(", "self", ",", "method", ",", "uri", ",", "all_pages", "=", "False", ",", "data_key", "=", "None", ",", "no_data", "=", "False", ",", "do_not_process", "=", "False", ",", "force_urlencode_data", "=", "False", ",", "data", "=", "None", ",", "params", "=", "None", ",", "files", "=", "None", ",", "single_item", "=", "False", ")", ":", "if", "not", "uri", ".", "startswith", "(", "'http'", ")", ":", "uri", "=", "self", ".", "uri_for", "(", "uri", ")", "if", "force_urlencode_data", "is", "True", ":", "uri", "+=", "'?'", "+", "urllib", ".", "urlencode", "(", "data", ")", "assert", "method", "in", "[", "'GET'", ",", "'POST'", ",", "'PUT'", ",", "'DELETE'", ",", "'HEAD'", ",", "'OPTIONS'", "]", "if", "method", "==", "'GET'", ":", "response", "=", "self", ".", "session", ".", "get", "(", "uri", ",", "params", "=", "params", ")", "elif", "method", "==", "'POST'", ":", "response", "=", "self", ".", "session", ".", "post", "(", "uri", ",", "data", "=", "data", ",", "files", "=", "files", ")", "elif", "method", "==", "'PUT'", ":", "response", "=", "self", ".", "session", ".", "put", "(", "uri", ",", "data", "=", "data", ")", "elif", "method", "==", "'DELETE'", ":", "response", "=", "self", ".", "session", ".", "delete", "(", "uri", ",", "params", "=", "params", ")", "elif", "method", "==", "'HEAD'", ":", "response", "=", "self", ".", "session", ".", "head", "(", "uri", ",", "params", "=", "params", ")", "elif", "method", "==", "'OPTIONS'", ":", "response", "=", "self", ".", "session", ".", "options", "(", "uri", ",", "params", "=", "params", ")", "response", ".", "raise_for_status", "(", ")", "if", "do_not_process", "is", "True", ":", "return", "response", "if", "no_data", ":", "return", "response", ".", "status_code", "if", "all_pages", ":", "return", "self", ".", "depaginate", "(", "response", ",", "data_key", ")", "if", "single_item", ":", "r", "=", "response", ".", "json", "(", ")", "if", "data_key", ":", "return", "r", "[", "data_key", "]", "else", ":", "return", "r", "return", "response", ".", "json", "(", ")" ]
Generic Canvas Request Method.
[ "Generic", "Canvas", "Request", "Method", "." ]
python
train
tensorflow/probability
tensorflow_probability/python/distributions/half_normal.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/half_normal.py#L180-L196
def _kl_half_normal_half_normal(a, b, name=None): """Calculate the batched KL divergence KL(a || b) with a and b `HalfNormal`. Args: a: Instance of a `HalfNormal` distribution object. b: Instance of a `HalfNormal` distribution object. name: (optional) Name to use for created operations. default is "kl_half_normal_half_normal". Returns: Batchwise KL(a || b) """ with tf.name_scope(name or "kl_half_normal_half_normal"): # Consistent with # http://www.mast.queensu.ca/~communications/Papers/gil-msc11.pdf, page 119 return (tf.math.log(b.scale) - tf.math.log(a.scale) + (a.scale**2 - b.scale**2) / (2 * b.scale**2))
[ "def", "_kl_half_normal_half_normal", "(", "a", ",", "b", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "name_scope", "(", "name", "or", "\"kl_half_normal_half_normal\"", ")", ":", "# Consistent with", "# http://www.mast.queensu.ca/~communications/Papers/gil-msc11.pdf, page 119", "return", "(", "tf", ".", "math", ".", "log", "(", "b", ".", "scale", ")", "-", "tf", ".", "math", ".", "log", "(", "a", ".", "scale", ")", "+", "(", "a", ".", "scale", "**", "2", "-", "b", ".", "scale", "**", "2", ")", "/", "(", "2", "*", "b", ".", "scale", "**", "2", ")", ")" ]
Calculate the batched KL divergence KL(a || b) with a and b `HalfNormal`. Args: a: Instance of a `HalfNormal` distribution object. b: Instance of a `HalfNormal` distribution object. name: (optional) Name to use for created operations. default is "kl_half_normal_half_normal". Returns: Batchwise KL(a || b)
[ "Calculate", "the", "batched", "KL", "divergence", "KL", "(", "a", "||", "b", ")", "with", "a", "and", "b", "HalfNormal", "." ]
python
test
tanghaibao/jcvi
jcvi/formats/agp.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/agp.py#L887-L930
def infer(args): """ %prog infer scaffolds.fasta genome.fasta Infer where the components are in the genome. This function is rarely used, but can be useful when distributor does not ship an AGP file. """ from jcvi.apps.grid import WriteJobs from jcvi.formats.bed import sort p = OptionParser(infer.__doc__) p.set_cpus() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) scaffoldsf, genomef = args inferbed = "infer-components.bed" if need_update((scaffoldsf, genomef), inferbed): scaffolds = Fasta(scaffoldsf, lazy=True) genome = Fasta(genomef) genome = genome.tostring() args = [(scaffold_name, scaffold, genome) \ for scaffold_name, scaffold in scaffolds.iteritems_ordered()] pool = WriteJobs(map_one_scaffold, args, inferbed, cpus=opts.cpus) pool.run() sort([inferbed, "-i"]) bed = Bed(inferbed) inferagpbed = "infer.bed" fw = open(inferagpbed, "w") seen = [] for b in bed: r = (b.seqid, b.start, b.end) if check_seen(r, seen): continue print("\t".join(str(x) for x in \ (b.accn, 0, b.span, b.seqid, b.score, b.strand)), file=fw) seen.append(r) fw.close() frombed([inferagpbed])
[ "def", "infer", "(", "args", ")", ":", "from", "jcvi", ".", "apps", ".", "grid", "import", "WriteJobs", "from", "jcvi", ".", "formats", ".", "bed", "import", "sort", "p", "=", "OptionParser", "(", "infer", ".", "__doc__", ")", "p", ".", "set_cpus", "(", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "2", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "scaffoldsf", ",", "genomef", "=", "args", "inferbed", "=", "\"infer-components.bed\"", "if", "need_update", "(", "(", "scaffoldsf", ",", "genomef", ")", ",", "inferbed", ")", ":", "scaffolds", "=", "Fasta", "(", "scaffoldsf", ",", "lazy", "=", "True", ")", "genome", "=", "Fasta", "(", "genomef", ")", "genome", "=", "genome", ".", "tostring", "(", ")", "args", "=", "[", "(", "scaffold_name", ",", "scaffold", ",", "genome", ")", "for", "scaffold_name", ",", "scaffold", "in", "scaffolds", ".", "iteritems_ordered", "(", ")", "]", "pool", "=", "WriteJobs", "(", "map_one_scaffold", ",", "args", ",", "inferbed", ",", "cpus", "=", "opts", ".", "cpus", ")", "pool", ".", "run", "(", ")", "sort", "(", "[", "inferbed", ",", "\"-i\"", "]", ")", "bed", "=", "Bed", "(", "inferbed", ")", "inferagpbed", "=", "\"infer.bed\"", "fw", "=", "open", "(", "inferagpbed", ",", "\"w\"", ")", "seen", "=", "[", "]", "for", "b", "in", "bed", ":", "r", "=", "(", "b", ".", "seqid", ",", "b", ".", "start", ",", "b", ".", "end", ")", "if", "check_seen", "(", "r", ",", "seen", ")", ":", "continue", "print", "(", "\"\\t\"", ".", "join", "(", "str", "(", "x", ")", "for", "x", "in", "(", "b", ".", "accn", ",", "0", ",", "b", ".", "span", ",", "b", ".", "seqid", ",", "b", ".", "score", ",", "b", ".", "strand", ")", ")", ",", "file", "=", "fw", ")", "seen", ".", "append", "(", "r", ")", "fw", ".", "close", "(", ")", "frombed", "(", "[", "inferagpbed", "]", ")" ]
%prog infer scaffolds.fasta genome.fasta Infer where the components are in the genome. This function is rarely used, but can be useful when distributor does not ship an AGP file.
[ "%prog", "infer", "scaffolds", ".", "fasta", "genome", ".", "fasta" ]
python
train
SurveyMonkey/pyramid_autodoc
pyramid_autodoc/__init__.py
https://github.com/SurveyMonkey/pyramid_autodoc/blob/8d669c7165de73cba5268bba97617c552d6b2185/pyramid_autodoc/__init__.py#L333-L338
def setup(app): """Hook the directives when Sphinx ask for it.""" if 'http' not in app.domains: httpdomain.setup(app) app.add_directive('autopyramid', RouteDirective)
[ "def", "setup", "(", "app", ")", ":", "if", "'http'", "not", "in", "app", ".", "domains", ":", "httpdomain", ".", "setup", "(", "app", ")", "app", ".", "add_directive", "(", "'autopyramid'", ",", "RouteDirective", ")" ]
Hook the directives when Sphinx ask for it.
[ "Hook", "the", "directives", "when", "Sphinx", "ask", "for", "it", "." ]
python
valid
MultipedRobotics/pyxl320
bin/servo_ping.py
https://github.com/MultipedRobotics/pyxl320/blob/1a56540e208b028ee47d5fa0a7c7babcee0d9214/bin/servo_ping.py#L23-L42
def packetToDict(pkt): """ Given a packet, this turns it into a dictionary ... is this useful? in: packet, array of numbers out: dictionary (key, value) """ d = { 'id': pkt[4], # 'instruction': xl320.InstrToStr[pkt[7]], # 'length': (pkt[6] << 8) + pkt[5], # 'params': pkt[8:-2], 'Model Number': (pkt[10] << 8) + pkt[9], 'Firmware Ver': pkt[11], 'Error': ErrorStatusMsg[pkt[8]], # 'crc': pkt[-2:] } return d
[ "def", "packetToDict", "(", "pkt", ")", ":", "d", "=", "{", "'id'", ":", "pkt", "[", "4", "]", ",", "# 'instruction': xl320.InstrToStr[pkt[7]],", "# 'length': (pkt[6] << 8) + pkt[5],", "# 'params': pkt[8:-2],", "'Model Number'", ":", "(", "pkt", "[", "10", "]", "<<", "8", ")", "+", "pkt", "[", "9", "]", ",", "'Firmware Ver'", ":", "pkt", "[", "11", "]", ",", "'Error'", ":", "ErrorStatusMsg", "[", "pkt", "[", "8", "]", "]", ",", "# 'crc': pkt[-2:]", "}", "return", "d" ]
Given a packet, this turns it into a dictionary ... is this useful? in: packet, array of numbers out: dictionary (key, value)
[ "Given", "a", "packet", "this", "turns", "it", "into", "a", "dictionary", "...", "is", "this", "useful?" ]
python
train
BlackEarth/bl
bl/progress.py
https://github.com/BlackEarth/bl/blob/edf6f37dac718987260b90ad0e7f7fe084a7c1a3/bl/progress.py#L19-L24
def start(self, key=None, **params): """initialize process timing for the current stack""" self.params.update(**params) key = key or self.stack_key if key is not None: self.current_times[key] = time()
[ "def", "start", "(", "self", ",", "key", "=", "None", ",", "*", "*", "params", ")", ":", "self", ".", "params", ".", "update", "(", "*", "*", "params", ")", "key", "=", "key", "or", "self", ".", "stack_key", "if", "key", "is", "not", "None", ":", "self", ".", "current_times", "[", "key", "]", "=", "time", "(", ")" ]
initialize process timing for the current stack
[ "initialize", "process", "timing", "for", "the", "current", "stack" ]
python
train
python-xlib/python-xlib
Xlib/display.py
https://github.com/python-xlib/python-xlib/blob/8901e831737e79fe5645f48089d70e1d1046d2f2/Xlib/display.py#L710-L713
def list_extensions(self): """Return a list of all the extensions provided by the server.""" r = request.ListExtensions(display = self.display) return r.names
[ "def", "list_extensions", "(", "self", ")", ":", "r", "=", "request", ".", "ListExtensions", "(", "display", "=", "self", ".", "display", ")", "return", "r", ".", "names" ]
Return a list of all the extensions provided by the server.
[ "Return", "a", "list", "of", "all", "the", "extensions", "provided", "by", "the", "server", "." ]
python
train
KelSolaar/Umbra
umbra/components/addons/trace_ui/trace_ui.py
https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/components/addons/trace_ui/trace_ui.py#L407-L426
def uninitialize_ui(self): """ Uninitializes the Component ui. :return: Method success. :rtype: bool """ LOGGER.debug("> Uninitializing '{0}' Component ui.".format(self.__class__.__name__)) # Signals / Slots. self.refresh_nodes.disconnect(self.__model__refresh_nodes) self.__view_remove_actions() self.__model = None self.__view = None self.initialized_ui = False return True
[ "def", "uninitialize_ui", "(", "self", ")", ":", "LOGGER", ".", "debug", "(", "\"> Uninitializing '{0}' Component ui.\"", ".", "format", "(", "self", ".", "__class__", ".", "__name__", ")", ")", "# Signals / Slots.", "self", ".", "refresh_nodes", ".", "disconnect", "(", "self", ".", "__model__refresh_nodes", ")", "self", ".", "__view_remove_actions", "(", ")", "self", ".", "__model", "=", "None", "self", ".", "__view", "=", "None", "self", ".", "initialized_ui", "=", "False", "return", "True" ]
Uninitializes the Component ui. :return: Method success. :rtype: bool
[ "Uninitializes", "the", "Component", "ui", "." ]
python
train
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/job.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/job.py#L427-L430
def _job_statistics(self): """Helper for job-type specific statistics-based properties.""" statistics = self._properties.get("statistics", {}) return statistics.get(self._JOB_TYPE, {})
[ "def", "_job_statistics", "(", "self", ")", ":", "statistics", "=", "self", ".", "_properties", ".", "get", "(", "\"statistics\"", ",", "{", "}", ")", "return", "statistics", ".", "get", "(", "self", ".", "_JOB_TYPE", ",", "{", "}", ")" ]
Helper for job-type specific statistics-based properties.
[ "Helper", "for", "job", "-", "type", "specific", "statistics", "-", "based", "properties", "." ]
python
train
spotify/luigi
luigi/contrib/postgres.py
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/postgres.py#L293-L349
def run(self): """ Inserts data generated by rows() into target table. If the target table doesn't exist, self.create_table will be called to attempt to create the table. Normally you don't want to override this. """ if not (self.table and self.columns): raise Exception("table and columns need to be specified") connection = self.output().connect() # transform all data generated by rows() using map_column and write data # to a temporary file for import using postgres COPY tmp_dir = luigi.configuration.get_config().get('postgres', 'local-tmp-dir', None) tmp_file = tempfile.TemporaryFile(dir=tmp_dir) n = 0 for row in self.rows(): n += 1 if n % 100000 == 0: logger.info("Wrote %d lines", n) rowstr = self.column_separator.join(self.map_column(val) for val in row) rowstr += "\n" tmp_file.write(rowstr.encode('utf-8')) logger.info("Done writing, importing at %s", datetime.datetime.now()) tmp_file.seek(0) # attempt to copy the data into postgres # if it fails because the target table doesn't exist # try to create it by running self.create_table for attempt in range(2): try: cursor = connection.cursor() self.init_copy(connection) self.copy(cursor, tmp_file) self.post_copy(connection) if self.enable_metadata_columns: self.post_copy_metacolumns(cursor) except psycopg2.ProgrammingError as e: if e.pgcode == psycopg2.errorcodes.UNDEFINED_TABLE and attempt == 0: # if first attempt fails with "relation not found", try creating table logger.info("Creating table %s", self.table) connection.reset() self.create_table(connection) else: raise else: break # mark as complete in same transaction self.output().touch(connection) # commit and clean up connection.commit() connection.close() tmp_file.close()
[ "def", "run", "(", "self", ")", ":", "if", "not", "(", "self", ".", "table", "and", "self", ".", "columns", ")", ":", "raise", "Exception", "(", "\"table and columns need to be specified\"", ")", "connection", "=", "self", ".", "output", "(", ")", ".", "connect", "(", ")", "# transform all data generated by rows() using map_column and write data", "# to a temporary file for import using postgres COPY", "tmp_dir", "=", "luigi", ".", "configuration", ".", "get_config", "(", ")", ".", "get", "(", "'postgres'", ",", "'local-tmp-dir'", ",", "None", ")", "tmp_file", "=", "tempfile", ".", "TemporaryFile", "(", "dir", "=", "tmp_dir", ")", "n", "=", "0", "for", "row", "in", "self", ".", "rows", "(", ")", ":", "n", "+=", "1", "if", "n", "%", "100000", "==", "0", ":", "logger", ".", "info", "(", "\"Wrote %d lines\"", ",", "n", ")", "rowstr", "=", "self", ".", "column_separator", ".", "join", "(", "self", ".", "map_column", "(", "val", ")", "for", "val", "in", "row", ")", "rowstr", "+=", "\"\\n\"", "tmp_file", ".", "write", "(", "rowstr", ".", "encode", "(", "'utf-8'", ")", ")", "logger", ".", "info", "(", "\"Done writing, importing at %s\"", ",", "datetime", ".", "datetime", ".", "now", "(", ")", ")", "tmp_file", ".", "seek", "(", "0", ")", "# attempt to copy the data into postgres", "# if it fails because the target table doesn't exist", "# try to create it by running self.create_table", "for", "attempt", "in", "range", "(", "2", ")", ":", "try", ":", "cursor", "=", "connection", ".", "cursor", "(", ")", "self", ".", "init_copy", "(", "connection", ")", "self", ".", "copy", "(", "cursor", ",", "tmp_file", ")", "self", ".", "post_copy", "(", "connection", ")", "if", "self", ".", "enable_metadata_columns", ":", "self", ".", "post_copy_metacolumns", "(", "cursor", ")", "except", "psycopg2", ".", "ProgrammingError", "as", "e", ":", "if", "e", ".", "pgcode", "==", "psycopg2", ".", "errorcodes", ".", "UNDEFINED_TABLE", "and", "attempt", "==", "0", ":", "# if first attempt fails with \"relation not found\", try creating table", "logger", ".", "info", "(", "\"Creating table %s\"", ",", "self", ".", "table", ")", "connection", ".", "reset", "(", ")", "self", ".", "create_table", "(", "connection", ")", "else", ":", "raise", "else", ":", "break", "# mark as complete in same transaction", "self", ".", "output", "(", ")", ".", "touch", "(", "connection", ")", "# commit and clean up", "connection", ".", "commit", "(", ")", "connection", ".", "close", "(", ")", "tmp_file", ".", "close", "(", ")" ]
Inserts data generated by rows() into target table. If the target table doesn't exist, self.create_table will be called to attempt to create the table. Normally you don't want to override this.
[ "Inserts", "data", "generated", "by", "rows", "()", "into", "target", "table", "." ]
python
train
oasis-open/cti-stix-validator
stix2validator/v21/shoulds.py
https://github.com/oasis-open/cti-stix-validator/blob/a607014e3fa500a7678f8b61b278456ca581f9d0/stix2validator/v21/shoulds.py#L874-L893
def pdf_doc_info(instance): """Ensure the keys of the 'document_info_dict' property of the pdf-ext extension of file objects are only valid PDF Document Information Dictionary Keys. """ for key, obj in instance['objects'].items(): if ('type' in obj and obj['type'] == 'file'): try: did = obj['extensions']['pdf-ext']['document_info_dict'] except KeyError: continue for elem in did: if elem not in enums.PDF_DID: yield JSONError("The 'document_info_dict' property of " "object '%s' contains a key ('%s') that is" " not a valid PDF Document Information " "Dictionary key." % (key, elem), instance['id'], 'pdf-doc-info')
[ "def", "pdf_doc_info", "(", "instance", ")", ":", "for", "key", ",", "obj", "in", "instance", "[", "'objects'", "]", ".", "items", "(", ")", ":", "if", "(", "'type'", "in", "obj", "and", "obj", "[", "'type'", "]", "==", "'file'", ")", ":", "try", ":", "did", "=", "obj", "[", "'extensions'", "]", "[", "'pdf-ext'", "]", "[", "'document_info_dict'", "]", "except", "KeyError", ":", "continue", "for", "elem", "in", "did", ":", "if", "elem", "not", "in", "enums", ".", "PDF_DID", ":", "yield", "JSONError", "(", "\"The 'document_info_dict' property of \"", "\"object '%s' contains a key ('%s') that is\"", "\" not a valid PDF Document Information \"", "\"Dictionary key.\"", "%", "(", "key", ",", "elem", ")", ",", "instance", "[", "'id'", "]", ",", "'pdf-doc-info'", ")" ]
Ensure the keys of the 'document_info_dict' property of the pdf-ext extension of file objects are only valid PDF Document Information Dictionary Keys.
[ "Ensure", "the", "keys", "of", "the", "document_info_dict", "property", "of", "the", "pdf", "-", "ext", "extension", "of", "file", "objects", "are", "only", "valid", "PDF", "Document", "Information", "Dictionary", "Keys", "." ]
python
train
Erotemic/utool
utool/util_alg.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L514-L544
def diagonalized_iter(size): r""" TODO: generalize to more than 2 dimensions to be more like itertools.product. CommandLine: python -m utool.util_alg --exec-diagonalized_iter python -m utool.util_alg --exec-diagonalized_iter --size=5 Example: >>> # ENABLE_DOCTEST >>> from utool.util_alg import * # NOQA >>> import utool as ut >>> size = ut.get_argval('--size', default=4) >>> iter_ = diagonalized_iter(size) >>> mat = [[None] * size for _ in range(size)] >>> for count, (r, c) in enumerate(iter_): >>> mat[r][c] = count >>> result = ut.repr2(mat, nl=1, packed=True) >>> print(result) [[0, 2, 5, 9], [1, 4, 8, 12], [3, 7, 11, 14], [6, 10, 13, 15],] """ for i in range(0, size + 1): for r, c in zip(reversed(range(i)), (range(i))): yield (r, c) for i in range(1, size): for r, c in zip(reversed(range(i, size)), (range(i, size))): yield (r, c)
[ "def", "diagonalized_iter", "(", "size", ")", ":", "for", "i", "in", "range", "(", "0", ",", "size", "+", "1", ")", ":", "for", "r", ",", "c", "in", "zip", "(", "reversed", "(", "range", "(", "i", ")", ")", ",", "(", "range", "(", "i", ")", ")", ")", ":", "yield", "(", "r", ",", "c", ")", "for", "i", "in", "range", "(", "1", ",", "size", ")", ":", "for", "r", ",", "c", "in", "zip", "(", "reversed", "(", "range", "(", "i", ",", "size", ")", ")", ",", "(", "range", "(", "i", ",", "size", ")", ")", ")", ":", "yield", "(", "r", ",", "c", ")" ]
r""" TODO: generalize to more than 2 dimensions to be more like itertools.product. CommandLine: python -m utool.util_alg --exec-diagonalized_iter python -m utool.util_alg --exec-diagonalized_iter --size=5 Example: >>> # ENABLE_DOCTEST >>> from utool.util_alg import * # NOQA >>> import utool as ut >>> size = ut.get_argval('--size', default=4) >>> iter_ = diagonalized_iter(size) >>> mat = [[None] * size for _ in range(size)] >>> for count, (r, c) in enumerate(iter_): >>> mat[r][c] = count >>> result = ut.repr2(mat, nl=1, packed=True) >>> print(result) [[0, 2, 5, 9], [1, 4, 8, 12], [3, 7, 11, 14], [6, 10, 13, 15],]
[ "r", "TODO", ":", "generalize", "to", "more", "than", "2", "dimensions", "to", "be", "more", "like", "itertools", ".", "product", "." ]
python
train
lesscpy/lesscpy
lesscpy/lessc/lexer.py
https://github.com/lesscpy/lesscpy/blob/51e392fb4a3cd4ccfb6175e0e42ce7d2f6b78126/lesscpy/lessc/lexer.py#L370-L373
def t_css_string(self, t): r'"[^"@]*"|\'[^\'@]*\'' t.lexer.lineno += t.value.count('\n') return t
[ "def", "t_css_string", "(", "self", ",", "t", ")", ":", "t", ".", "lexer", ".", "lineno", "+=", "t", ".", "value", ".", "count", "(", "'\\n'", ")", "return", "t" ]
r'"[^"@]*"|\'[^\'@]*\
[ "r", "[", "^" ]
python
valid
mottosso/be
be/vendor/click/core.py
https://github.com/mottosso/be/blob/0f3d4f3597c71223f616d78c6d9b2c8dffcd8a71/be/vendor/click/core.py#L790-L795
def format_help_text(self, ctx, formatter): """Writes the help text to the formatter if it exists.""" if self.help: formatter.write_paragraph() with formatter.indentation(): formatter.write_text(self.help)
[ "def", "format_help_text", "(", "self", ",", "ctx", ",", "formatter", ")", ":", "if", "self", ".", "help", ":", "formatter", ".", "write_paragraph", "(", ")", "with", "formatter", ".", "indentation", "(", ")", ":", "formatter", ".", "write_text", "(", "self", ".", "help", ")" ]
Writes the help text to the formatter if it exists.
[ "Writes", "the", "help", "text", "to", "the", "formatter", "if", "it", "exists", "." ]
python
train
saltstack/salt
salt/states/onyx.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/onyx.py#L153-L197
def user_absent(name): ''' Ensure a user is not present name username to remove if it exists Examples: .. code-block:: yaml delete: onyx.user_absent: - name: daniel ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} old_user = __salt__['onyx.cmd']('get_user', username=name) if not old_user: ret['result'] = True ret['comment'] = 'User does not exist' return ret if __opts__['test'] is True and old_user: ret['result'] = None ret['comment'] = 'User will be removed' ret['changes']['old'] = old_user ret['changes']['new'] = '' return ret __salt__['onyx.cmd']('remove_user', username=name) if __salt__['onyx.cmd']('get_user', username=name): ret['comment'] = 'Failed to remove user' else: ret['result'] = True ret['comment'] = 'User removed' ret['changes']['old'] = old_user ret['changes']['new'] = '' return ret
[ "def", "user_absent", "(", "name", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "False", ",", "'changes'", ":", "{", "}", ",", "'comment'", ":", "''", "}", "old_user", "=", "__salt__", "[", "'onyx.cmd'", "]", "(", "'get_user'", ",", "username", "=", "name", ")", "if", "not", "old_user", ":", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'comment'", "]", "=", "'User does not exist'", "return", "ret", "if", "__opts__", "[", "'test'", "]", "is", "True", "and", "old_user", ":", "ret", "[", "'result'", "]", "=", "None", "ret", "[", "'comment'", "]", "=", "'User will be removed'", "ret", "[", "'changes'", "]", "[", "'old'", "]", "=", "old_user", "ret", "[", "'changes'", "]", "[", "'new'", "]", "=", "''", "return", "ret", "__salt__", "[", "'onyx.cmd'", "]", "(", "'remove_user'", ",", "username", "=", "name", ")", "if", "__salt__", "[", "'onyx.cmd'", "]", "(", "'get_user'", ",", "username", "=", "name", ")", ":", "ret", "[", "'comment'", "]", "=", "'Failed to remove user'", "else", ":", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'comment'", "]", "=", "'User removed'", "ret", "[", "'changes'", "]", "[", "'old'", "]", "=", "old_user", "ret", "[", "'changes'", "]", "[", "'new'", "]", "=", "''", "return", "ret" ]
Ensure a user is not present name username to remove if it exists Examples: .. code-block:: yaml delete: onyx.user_absent: - name: daniel
[ "Ensure", "a", "user", "is", "not", "present" ]
python
train
uber/doubles
doubles/expectation.py
https://github.com/uber/doubles/blob/15e68dcf98f709b19a581915fa6af5ef49ebdd8a/doubles/expectation.py#L34-L47
def satisfy_exact_match(self, args, kwargs): """ Returns a boolean indicating whether or not the mock will accept the provided arguments. :return: Whether or not the mock accepts the provided arguments. :rtype: bool """ is_match = super(Expectation, self).satisfy_exact_match(args, kwargs) if is_match: self._satisfy() return is_match
[ "def", "satisfy_exact_match", "(", "self", ",", "args", ",", "kwargs", ")", ":", "is_match", "=", "super", "(", "Expectation", ",", "self", ")", ".", "satisfy_exact_match", "(", "args", ",", "kwargs", ")", "if", "is_match", ":", "self", ".", "_satisfy", "(", ")", "return", "is_match" ]
Returns a boolean indicating whether or not the mock will accept the provided arguments. :return: Whether or not the mock accepts the provided arguments. :rtype: bool
[ "Returns", "a", "boolean", "indicating", "whether", "or", "not", "the", "mock", "will", "accept", "the", "provided", "arguments", "." ]
python
train
skorch-dev/skorch
skorch/utils.py
https://github.com/skorch-dev/skorch/blob/5b9b8b7b7712cb6e5aaa759d9608ea6269d5bcd3/skorch/utils.py#L374-L381
def is_skorch_dataset(ds): """Checks if the supplied dataset is an instance of ``skorch.dataset.Dataset`` even when it is nested inside ``torch.util.data.Subset``.""" from skorch.dataset import Dataset if isinstance(ds, Subset): return is_skorch_dataset(ds.dataset) return isinstance(ds, Dataset)
[ "def", "is_skorch_dataset", "(", "ds", ")", ":", "from", "skorch", ".", "dataset", "import", "Dataset", "if", "isinstance", "(", "ds", ",", "Subset", ")", ":", "return", "is_skorch_dataset", "(", "ds", ".", "dataset", ")", "return", "isinstance", "(", "ds", ",", "Dataset", ")" ]
Checks if the supplied dataset is an instance of ``skorch.dataset.Dataset`` even when it is nested inside ``torch.util.data.Subset``.
[ "Checks", "if", "the", "supplied", "dataset", "is", "an", "instance", "of", "skorch", ".", "dataset", ".", "Dataset", "even", "when", "it", "is", "nested", "inside", "torch", ".", "util", ".", "data", ".", "Subset", "." ]
python
train
vaexio/vaex
packages/vaex-core/vaex/dataframe.py
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/dataframe.py#L4202-L4226
def select_circle(self, x, y, xc, yc, r, mode="replace", name="default", inclusive=True): """ Select a circular region centred on xc, yc, with a radius of r. Example: >>> df.select_circle('x','y',2,3,1) :param x: expression for the x space :param y: expression for the y space :param xc: location of the centre of the circle in x :param yc: location of the centre of the circle in y :param r: the radius of the circle :param name: name of the selection :param mode: :return: """ # expr = "({x}-{xc})**2 + ({y}-{yc})**2 <={r}**2".format(**locals()) if inclusive: expr = (self[x] - xc)**2 + (self[y] - yc)**2 <= r**2 else: expr = (self[x] - xc)**2 + (self[y] - yc)**2 < r**2 self.select(boolean_expression=expr, mode=mode, name=name)
[ "def", "select_circle", "(", "self", ",", "x", ",", "y", ",", "xc", ",", "yc", ",", "r", ",", "mode", "=", "\"replace\"", ",", "name", "=", "\"default\"", ",", "inclusive", "=", "True", ")", ":", "# expr = \"({x}-{xc})**2 + ({y}-{yc})**2 <={r}**2\".format(**locals())", "if", "inclusive", ":", "expr", "=", "(", "self", "[", "x", "]", "-", "xc", ")", "**", "2", "+", "(", "self", "[", "y", "]", "-", "yc", ")", "**", "2", "<=", "r", "**", "2", "else", ":", "expr", "=", "(", "self", "[", "x", "]", "-", "xc", ")", "**", "2", "+", "(", "self", "[", "y", "]", "-", "yc", ")", "**", "2", "<", "r", "**", "2", "self", ".", "select", "(", "boolean_expression", "=", "expr", ",", "mode", "=", "mode", ",", "name", "=", "name", ")" ]
Select a circular region centred on xc, yc, with a radius of r. Example: >>> df.select_circle('x','y',2,3,1) :param x: expression for the x space :param y: expression for the y space :param xc: location of the centre of the circle in x :param yc: location of the centre of the circle in y :param r: the radius of the circle :param name: name of the selection :param mode: :return:
[ "Select", "a", "circular", "region", "centred", "on", "xc", "yc", "with", "a", "radius", "of", "r", "." ]
python
test
kontron/python-ipmi
pyipmi/sel.py
https://github.com/kontron/python-ipmi/blob/ce46da47a37dd683615f32d04a10eda069aa569a/pyipmi/sel.py#L49-L91
def sel_entries(self): """Generator which returns all SEL entries.""" ENTIRE_RECORD = 0xff rsp = self.send_message_with_name('GetSelInfo') if rsp.entries == 0: return reservation_id = self.get_sel_reservation_id() next_record_id = 0 while True: req = create_request_by_name('GetSelEntry') req.reservation_id = reservation_id req.record_id = next_record_id req.offset = 0 self.max_req_len = ENTIRE_RECORD record_data = ByteBuffer() while True: req.length = self.max_req_len if (self.max_req_len != 0xff and (req.offset + req.length) > 16): req.length = 16 - req.offset rsp = self.send_message(req) if rsp.completion_code == constants.CC_CANT_RET_NUM_REQ_BYTES: if self.max_req_len == 0xff: self.max_req_len = 16 else: self.max_req_len -= 1 continue else: check_completion_code(rsp.completion_code) record_data.extend(rsp.record_data) req.offset = len(record_data) if len(record_data) >= 16: break next_record_id = rsp.next_record_id yield SelEntry(record_data) if next_record_id == 0xffff: break
[ "def", "sel_entries", "(", "self", ")", ":", "ENTIRE_RECORD", "=", "0xff", "rsp", "=", "self", ".", "send_message_with_name", "(", "'GetSelInfo'", ")", "if", "rsp", ".", "entries", "==", "0", ":", "return", "reservation_id", "=", "self", ".", "get_sel_reservation_id", "(", ")", "next_record_id", "=", "0", "while", "True", ":", "req", "=", "create_request_by_name", "(", "'GetSelEntry'", ")", "req", ".", "reservation_id", "=", "reservation_id", "req", ".", "record_id", "=", "next_record_id", "req", ".", "offset", "=", "0", "self", ".", "max_req_len", "=", "ENTIRE_RECORD", "record_data", "=", "ByteBuffer", "(", ")", "while", "True", ":", "req", ".", "length", "=", "self", ".", "max_req_len", "if", "(", "self", ".", "max_req_len", "!=", "0xff", "and", "(", "req", ".", "offset", "+", "req", ".", "length", ")", ">", "16", ")", ":", "req", ".", "length", "=", "16", "-", "req", ".", "offset", "rsp", "=", "self", ".", "send_message", "(", "req", ")", "if", "rsp", ".", "completion_code", "==", "constants", ".", "CC_CANT_RET_NUM_REQ_BYTES", ":", "if", "self", ".", "max_req_len", "==", "0xff", ":", "self", ".", "max_req_len", "=", "16", "else", ":", "self", ".", "max_req_len", "-=", "1", "continue", "else", ":", "check_completion_code", "(", "rsp", ".", "completion_code", ")", "record_data", ".", "extend", "(", "rsp", ".", "record_data", ")", "req", ".", "offset", "=", "len", "(", "record_data", ")", "if", "len", "(", "record_data", ")", ">=", "16", ":", "break", "next_record_id", "=", "rsp", ".", "next_record_id", "yield", "SelEntry", "(", "record_data", ")", "if", "next_record_id", "==", "0xffff", ":", "break" ]
Generator which returns all SEL entries.
[ "Generator", "which", "returns", "all", "SEL", "entries", "." ]
python
train
saltstack/salt
salt/modules/azurearm_network.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/azurearm_network.py#L1326-L1356
def load_balancer_delete(name, resource_group, **kwargs): ''' .. versionadded:: 2019.2.0 Delete a load balancer. :param name: The name of the load balancer to delete. :param resource_group: The resource group name assigned to the load balancer. CLI Example: .. code-block:: bash salt-call azurearm_network.load_balancer_delete testlb testgroup ''' result = False netconn = __utils__['azurearm.get_client']('network', **kwargs) try: load_balancer = netconn.load_balancers.delete( load_balancer_name=name, resource_group_name=resource_group ) load_balancer.wait() result = True except CloudError as exc: __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs) return result
[ "def", "load_balancer_delete", "(", "name", ",", "resource_group", ",", "*", "*", "kwargs", ")", ":", "result", "=", "False", "netconn", "=", "__utils__", "[", "'azurearm.get_client'", "]", "(", "'network'", ",", "*", "*", "kwargs", ")", "try", ":", "load_balancer", "=", "netconn", ".", "load_balancers", ".", "delete", "(", "load_balancer_name", "=", "name", ",", "resource_group_name", "=", "resource_group", ")", "load_balancer", ".", "wait", "(", ")", "result", "=", "True", "except", "CloudError", "as", "exc", ":", "__utils__", "[", "'azurearm.log_cloud_error'", "]", "(", "'network'", ",", "str", "(", "exc", ")", ",", "*", "*", "kwargs", ")", "return", "result" ]
.. versionadded:: 2019.2.0 Delete a load balancer. :param name: The name of the load balancer to delete. :param resource_group: The resource group name assigned to the load balancer. CLI Example: .. code-block:: bash salt-call azurearm_network.load_balancer_delete testlb testgroup
[ "..", "versionadded", "::", "2019", ".", "2", ".", "0" ]
python
train
pysathq/pysat
pysat/solvers.py
https://github.com/pysathq/pysat/blob/522742e8f2d4c6ac50ecd9087f7a346206774c67/pysat/solvers.py#L3301-L3307
def prop_budget(self, budget): """ Set limit on the number of propagations. """ if self.minisat: pysolvers.minisatgh_pbudget(self.minisat, budget)
[ "def", "prop_budget", "(", "self", ",", "budget", ")", ":", "if", "self", ".", "minisat", ":", "pysolvers", ".", "minisatgh_pbudget", "(", "self", ".", "minisat", ",", "budget", ")" ]
Set limit on the number of propagations.
[ "Set", "limit", "on", "the", "number", "of", "propagations", "." ]
python
train
inveniosoftware/invenio-records-files
invenio_records_files/utils.py
https://github.com/inveniosoftware/invenio-records-files/blob/c410eba986ea43be7e97082d5dcbbdc19ccec39c/invenio_records_files/utils.py#L34-L51
def record_file_factory(pid, record, filename): """Get file from a record. :param pid: Not used. It keeps the function signature. :param record: Record which contains the files. :param filename: Name of the file to be returned. :returns: File object or ``None`` if not found. """ try: if not (hasattr(record, 'files') and record.files): return None except MissingModelError: return None try: return record.files[filename] except KeyError: return None
[ "def", "record_file_factory", "(", "pid", ",", "record", ",", "filename", ")", ":", "try", ":", "if", "not", "(", "hasattr", "(", "record", ",", "'files'", ")", "and", "record", ".", "files", ")", ":", "return", "None", "except", "MissingModelError", ":", "return", "None", "try", ":", "return", "record", ".", "files", "[", "filename", "]", "except", "KeyError", ":", "return", "None" ]
Get file from a record. :param pid: Not used. It keeps the function signature. :param record: Record which contains the files. :param filename: Name of the file to be returned. :returns: File object or ``None`` if not found.
[ "Get", "file", "from", "a", "record", "." ]
python
train
Caramel/treacle
treacle/treacle.py
https://github.com/Caramel/treacle/blob/70f85a505c0f345659850aec1715c46c687d0e48/treacle/treacle.py#L202-L230
def in_hours(self, office=None, when=None): """ Finds if it is business hours in the given office. :param office: Office ID to look up, or None to check if any office is in business hours. :type office: str or None :param datetime.datetime when: When to check the office is open, or None for now. :returns: True if it is business hours, False otherwise. :rtype: bool :raises KeyError: If the office is unknown. """ if when == None: when = datetime.now(tz=utc) if office == None: for office in self.offices.itervalues(): if office.in_hours(when): return True return False else: # check specific office return self.offices[office].in_hours(when)
[ "def", "in_hours", "(", "self", ",", "office", "=", "None", ",", "when", "=", "None", ")", ":", "if", "when", "==", "None", ":", "when", "=", "datetime", ".", "now", "(", "tz", "=", "utc", ")", "if", "office", "==", "None", ":", "for", "office", "in", "self", ".", "offices", ".", "itervalues", "(", ")", ":", "if", "office", ".", "in_hours", "(", "when", ")", ":", "return", "True", "return", "False", "else", ":", "# check specific office", "return", "self", ".", "offices", "[", "office", "]", ".", "in_hours", "(", "when", ")" ]
Finds if it is business hours in the given office. :param office: Office ID to look up, or None to check if any office is in business hours. :type office: str or None :param datetime.datetime when: When to check the office is open, or None for now. :returns: True if it is business hours, False otherwise. :rtype: bool :raises KeyError: If the office is unknown.
[ "Finds", "if", "it", "is", "business", "hours", "in", "the", "given", "office", "." ]
python
train
annayqho/TheCannon
code/aaomega/aaomega_munge_data.py
https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/code/aaomega/aaomega_munge_data.py#L160-L183
def make_full_ivar(): """ take the scatters and skylines and make final ivars """ # skylines come as an ivar # don't use them for now, because I don't really trust them... # skylines = np.load("%s/skylines.npz" %DATA_DIR)['arr_0'] ref_flux = np.load("%s/ref_flux_all.npz" %DATA_DIR)['arr_0'] ref_scat = np.load("%s/ref_spec_scat_all.npz" %DATA_DIR)['arr_0'] test_flux = np.load("%s/test_flux.npz" %DATA_DIR)['arr_0'] test_scat = np.load("%s/test_spec_scat.npz" %DATA_DIR)['arr_0'] ref_ivar = np.ones(ref_flux.shape) / ref_scat[:,None]**2 test_ivar = np.ones(test_flux.shape) / test_scat[:,None]**2 # ref_ivar = (ref_ivar_temp * skylines[None,:]) / (ref_ivar_temp + skylines) # test_ivar = (test_ivar_temp * skylines[None,:]) / (test_ivar_temp + skylines) ref_bad = np.logical_or(ref_flux <= 0, ref_flux > 1.1) test_bad = np.logical_or(test_flux <= 0, test_flux > 1.1) SMALL = 1.0 / 1000000000.0 ref_ivar[ref_bad] = SMALL test_ivar[test_bad] = SMALL np.savez("%s/ref_ivar_corr.npz" %DATA_DIR, ref_ivar) np.savez("%s/test_ivar_corr.npz" %DATA_DIR, test_ivar)
[ "def", "make_full_ivar", "(", ")", ":", "# skylines come as an ivar", "# don't use them for now, because I don't really trust them...", "# skylines = np.load(\"%s/skylines.npz\" %DATA_DIR)['arr_0']", "ref_flux", "=", "np", ".", "load", "(", "\"%s/ref_flux_all.npz\"", "%", "DATA_DIR", ")", "[", "'arr_0'", "]", "ref_scat", "=", "np", ".", "load", "(", "\"%s/ref_spec_scat_all.npz\"", "%", "DATA_DIR", ")", "[", "'arr_0'", "]", "test_flux", "=", "np", ".", "load", "(", "\"%s/test_flux.npz\"", "%", "DATA_DIR", ")", "[", "'arr_0'", "]", "test_scat", "=", "np", ".", "load", "(", "\"%s/test_spec_scat.npz\"", "%", "DATA_DIR", ")", "[", "'arr_0'", "]", "ref_ivar", "=", "np", ".", "ones", "(", "ref_flux", ".", "shape", ")", "/", "ref_scat", "[", ":", ",", "None", "]", "**", "2", "test_ivar", "=", "np", ".", "ones", "(", "test_flux", ".", "shape", ")", "/", "test_scat", "[", ":", ",", "None", "]", "**", "2", "# ref_ivar = (ref_ivar_temp * skylines[None,:]) / (ref_ivar_temp + skylines)", "# test_ivar = (test_ivar_temp * skylines[None,:]) / (test_ivar_temp + skylines)", "ref_bad", "=", "np", ".", "logical_or", "(", "ref_flux", "<=", "0", ",", "ref_flux", ">", "1.1", ")", "test_bad", "=", "np", ".", "logical_or", "(", "test_flux", "<=", "0", ",", "test_flux", ">", "1.1", ")", "SMALL", "=", "1.0", "/", "1000000000.0", "ref_ivar", "[", "ref_bad", "]", "=", "SMALL", "test_ivar", "[", "test_bad", "]", "=", "SMALL", "np", ".", "savez", "(", "\"%s/ref_ivar_corr.npz\"", "%", "DATA_DIR", ",", "ref_ivar", ")", "np", ".", "savez", "(", "\"%s/test_ivar_corr.npz\"", "%", "DATA_DIR", ",", "test_ivar", ")" ]
take the scatters and skylines and make final ivars
[ "take", "the", "scatters", "and", "skylines", "and", "make", "final", "ivars" ]
python
train
thisfred/val
val/tp.py
https://github.com/thisfred/val/blob/ba022e0c6c47acb3b8a45e7c44c84cc0f495c41c/val/tp.py#L131-L148
def _dict_to_teleport(dict_value): """Convert a val schema dictionary to teleport.""" if len(dict_value) == 1: for key, value in dict_value.items(): if key is str: return {"Map": from_val(value)} optional = {} required = {} for key, value in dict_value.items(): if isinstance(key, Optional): optional[key.value] = from_val(value) else: required[key] = from_val(value) return {"Struct": { "required": required, "optional": optional}}
[ "def", "_dict_to_teleport", "(", "dict_value", ")", ":", "if", "len", "(", "dict_value", ")", "==", "1", ":", "for", "key", ",", "value", "in", "dict_value", ".", "items", "(", ")", ":", "if", "key", "is", "str", ":", "return", "{", "\"Map\"", ":", "from_val", "(", "value", ")", "}", "optional", "=", "{", "}", "required", "=", "{", "}", "for", "key", ",", "value", "in", "dict_value", ".", "items", "(", ")", ":", "if", "isinstance", "(", "key", ",", "Optional", ")", ":", "optional", "[", "key", ".", "value", "]", "=", "from_val", "(", "value", ")", "else", ":", "required", "[", "key", "]", "=", "from_val", "(", "value", ")", "return", "{", "\"Struct\"", ":", "{", "\"required\"", ":", "required", ",", "\"optional\"", ":", "optional", "}", "}" ]
Convert a val schema dictionary to teleport.
[ "Convert", "a", "val", "schema", "dictionary", "to", "teleport", "." ]
python
train
googleapis/google-cloud-python
dlp/google/cloud/dlp_v2/gapic/dlp_service_client.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/dlp/google/cloud/dlp_v2/gapic/dlp_service_client.py#L128-L134
def project_job_trigger_path(cls, project, job_trigger): """Return a fully-qualified project_job_trigger string.""" return google.api_core.path_template.expand( "projects/{project}/jobTriggers/{job_trigger}", project=project, job_trigger=job_trigger, )
[ "def", "project_job_trigger_path", "(", "cls", ",", "project", ",", "job_trigger", ")", ":", "return", "google", ".", "api_core", ".", "path_template", ".", "expand", "(", "\"projects/{project}/jobTriggers/{job_trigger}\"", ",", "project", "=", "project", ",", "job_trigger", "=", "job_trigger", ",", ")" ]
Return a fully-qualified project_job_trigger string.
[ "Return", "a", "fully", "-", "qualified", "project_job_trigger", "string", "." ]
python
train
jadolg/rocketchat_API
rocketchat_API/rocketchat.py
https://github.com/jadolg/rocketchat_API/blob/f220d094434991cb9892418245f054ea06f28aad/rocketchat_API/rocketchat.py#L324-L326
def channels_close(self, room_id, **kwargs): """Removes the channel from the user’s list of channels.""" return self.__call_api_post('channels.close', roomId=room_id, kwargs=kwargs)
[ "def", "channels_close", "(", "self", ",", "room_id", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "__call_api_post", "(", "'channels.close'", ",", "roomId", "=", "room_id", ",", "kwargs", "=", "kwargs", ")" ]
Removes the channel from the user’s list of channels.
[ "Removes", "the", "channel", "from", "the", "user’s", "list", "of", "channels", "." ]
python
train
rootpy/rootpy
rootpy/plotting/hist.py
https://github.com/rootpy/rootpy/blob/3926935e1f2100d8ba68070c2ab44055d4800f73/rootpy/plotting/hist.py#L749-L775
def uniform_binned(self, name=None): """ Return a new histogram with constant width bins along all axes by using the bin indices as the bin edges of the new histogram. """ if self.GetDimension() == 1: new_hist = Hist( self.GetNbinsX(), 0, self.GetNbinsX(), name=name, type=self.TYPE) elif self.GetDimension() == 2: new_hist = Hist2D( self.GetNbinsX(), 0, self.GetNbinsX(), self.GetNbinsY(), 0, self.GetNbinsY(), name=name, type=self.TYPE) else: new_hist = Hist3D( self.GetNbinsX(), 0, self.GetNbinsX(), self.GetNbinsY(), 0, self.GetNbinsY(), self.GetNbinsZ(), 0, self.GetNbinsZ(), name=name, type=self.TYPE) # copy over the bin contents and errors for outbin, inbin in zip(new_hist.bins(), self.bins()): outbin.value = inbin.value outbin.error = inbin.error new_hist.decorate(self) new_hist.entries = self.entries return new_hist
[ "def", "uniform_binned", "(", "self", ",", "name", "=", "None", ")", ":", "if", "self", ".", "GetDimension", "(", ")", "==", "1", ":", "new_hist", "=", "Hist", "(", "self", ".", "GetNbinsX", "(", ")", ",", "0", ",", "self", ".", "GetNbinsX", "(", ")", ",", "name", "=", "name", ",", "type", "=", "self", ".", "TYPE", ")", "elif", "self", ".", "GetDimension", "(", ")", "==", "2", ":", "new_hist", "=", "Hist2D", "(", "self", ".", "GetNbinsX", "(", ")", ",", "0", ",", "self", ".", "GetNbinsX", "(", ")", ",", "self", ".", "GetNbinsY", "(", ")", ",", "0", ",", "self", ".", "GetNbinsY", "(", ")", ",", "name", "=", "name", ",", "type", "=", "self", ".", "TYPE", ")", "else", ":", "new_hist", "=", "Hist3D", "(", "self", ".", "GetNbinsX", "(", ")", ",", "0", ",", "self", ".", "GetNbinsX", "(", ")", ",", "self", ".", "GetNbinsY", "(", ")", ",", "0", ",", "self", ".", "GetNbinsY", "(", ")", ",", "self", ".", "GetNbinsZ", "(", ")", ",", "0", ",", "self", ".", "GetNbinsZ", "(", ")", ",", "name", "=", "name", ",", "type", "=", "self", ".", "TYPE", ")", "# copy over the bin contents and errors", "for", "outbin", ",", "inbin", "in", "zip", "(", "new_hist", ".", "bins", "(", ")", ",", "self", ".", "bins", "(", ")", ")", ":", "outbin", ".", "value", "=", "inbin", ".", "value", "outbin", ".", "error", "=", "inbin", ".", "error", "new_hist", ".", "decorate", "(", "self", ")", "new_hist", ".", "entries", "=", "self", ".", "entries", "return", "new_hist" ]
Return a new histogram with constant width bins along all axes by using the bin indices as the bin edges of the new histogram.
[ "Return", "a", "new", "histogram", "with", "constant", "width", "bins", "along", "all", "axes", "by", "using", "the", "bin", "indices", "as", "the", "bin", "edges", "of", "the", "new", "histogram", "." ]
python
train
sprockets/sprockets-influxdb
sprockets_influxdb.py
https://github.com/sprockets/sprockets-influxdb/blob/cce73481b8f26b02e65e3f9914a9a22eceff3063/sprockets_influxdb.py#L858-L869
def marshall(self): """Return the measurement in the line protocol format. :rtype: str """ return '{},{} {} {}'.format( self._escape(self.name), ','.join(['{}={}'.format(self._escape(k), self._escape(v)) for k, v in self.tags.items()]), self._marshall_fields(), int(self.timestamp * 1000))
[ "def", "marshall", "(", "self", ")", ":", "return", "'{},{} {} {}'", ".", "format", "(", "self", ".", "_escape", "(", "self", ".", "name", ")", ",", "','", ".", "join", "(", "[", "'{}={}'", ".", "format", "(", "self", ".", "_escape", "(", "k", ")", ",", "self", ".", "_escape", "(", "v", ")", ")", "for", "k", ",", "v", "in", "self", ".", "tags", ".", "items", "(", ")", "]", ")", ",", "self", ".", "_marshall_fields", "(", ")", ",", "int", "(", "self", ".", "timestamp", "*", "1000", ")", ")" ]
Return the measurement in the line protocol format. :rtype: str
[ "Return", "the", "measurement", "in", "the", "line", "protocol", "format", "." ]
python
train
limodou/uliweb
uliweb/utils/generic.py
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/utils/generic.py#L1980-L2016
def download(self, filename, timeout=3600, action=None, query=None, fields_convert_map=None, type=None, domain=None, template_filename='', sheet_name='', **kwargs): """ Default domain option is PARA/DOMAIN :param template_filename: Excel template filename, it'll xltools to writer it, only can be used in xlsx """ from uliweb import settings fields_convert_map = fields_convert_map or self.fields_convert_map t_filename = self.get_real_file(filename) if os.path.exists(t_filename): if timeout and os.path.getmtime(t_filename) + timeout > time.time(): return self.downloader.download(filename, action) if query is None: query = self.query_all() if not type: type = os.path.splitext(filename)[1] if type: type = type[1:] else: type = 'csv' if type in ('xlt', 'xls'): if not domain: domain = settings.get_var('PARA/DOMAIN') return self.download_xlt(filename, query, action, fields_convert_map, domain, not_tempfile=bool(timeout), **kwargs) if type in ('xlsx',): if not domain: domain = settings.get_var('PARA/DOMAIN') return self.download_xlsx(filename, query, action, fields_convert_map, domain, not_tempfile=bool(timeout), template_filename=template_filename, sheet_name=sheet_name, **kwargs) else: return self.download_csv(filename, query, action, fields_convert_map, not_tempfile=bool(timeout), **kwargs)
[ "def", "download", "(", "self", ",", "filename", ",", "timeout", "=", "3600", ",", "action", "=", "None", ",", "query", "=", "None", ",", "fields_convert_map", "=", "None", ",", "type", "=", "None", ",", "domain", "=", "None", ",", "template_filename", "=", "''", ",", "sheet_name", "=", "''", ",", "*", "*", "kwargs", ")", ":", "from", "uliweb", "import", "settings", "fields_convert_map", "=", "fields_convert_map", "or", "self", ".", "fields_convert_map", "t_filename", "=", "self", ".", "get_real_file", "(", "filename", ")", "if", "os", ".", "path", ".", "exists", "(", "t_filename", ")", ":", "if", "timeout", "and", "os", ".", "path", ".", "getmtime", "(", "t_filename", ")", "+", "timeout", ">", "time", ".", "time", "(", ")", ":", "return", "self", ".", "downloader", ".", "download", "(", "filename", ",", "action", ")", "if", "query", "is", "None", ":", "query", "=", "self", ".", "query_all", "(", ")", "if", "not", "type", ":", "type", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "[", "1", "]", "if", "type", ":", "type", "=", "type", "[", "1", ":", "]", "else", ":", "type", "=", "'csv'", "if", "type", "in", "(", "'xlt'", ",", "'xls'", ")", ":", "if", "not", "domain", ":", "domain", "=", "settings", ".", "get_var", "(", "'PARA/DOMAIN'", ")", "return", "self", ".", "download_xlt", "(", "filename", ",", "query", ",", "action", ",", "fields_convert_map", ",", "domain", ",", "not_tempfile", "=", "bool", "(", "timeout", ")", ",", "*", "*", "kwargs", ")", "if", "type", "in", "(", "'xlsx'", ",", ")", ":", "if", "not", "domain", ":", "domain", "=", "settings", ".", "get_var", "(", "'PARA/DOMAIN'", ")", "return", "self", ".", "download_xlsx", "(", "filename", ",", "query", ",", "action", ",", "fields_convert_map", ",", "domain", ",", "not_tempfile", "=", "bool", "(", "timeout", ")", ",", "template_filename", "=", "template_filename", ",", "sheet_name", "=", "sheet_name", ",", "*", "*", "kwargs", ")", "else", ":", "return", "self", ".", "download_csv", "(", "filename", ",", "query", ",", "action", ",", "fields_convert_map", ",", "not_tempfile", "=", "bool", "(", "timeout", ")", ",", "*", "*", "kwargs", ")" ]
Default domain option is PARA/DOMAIN :param template_filename: Excel template filename, it'll xltools to writer it, only can be used in xlsx
[ "Default", "domain", "option", "is", "PARA", "/", "DOMAIN", ":", "param", "template_filename", ":", "Excel", "template", "filename", "it", "ll", "xltools", "to", "writer", "it", "only", "can", "be", "used", "in", "xlsx" ]
python
train
vrtsystems/hszinc
hszinc/pintutil.py
https://github.com/vrtsystems/hszinc/blob/d52a7c6b5bc466f3c1a77b71814c8c0776aba995/hszinc/pintutil.py#L171-L236
def define_haystack_units(): """ Missing units found in project-haystack Added to the registry """ ureg = UnitRegistry() ureg.define('% = [] = percent') ureg.define('pixel = [] = px = dot = picture_element = pel') ureg.define('decibel = [] = dB') ureg.define('ppu = [] = parts_per_unit') ureg.define('ppm = [] = parts_per_million') ureg.define('ppb = [] = parts_per_billion') ureg.define('%RH = [] = percent_relative_humidity = percentRH') ureg.define('cubic_feet = ft ** 3 = cu_ft') ureg.define('cfm = cu_ft * minute = liter_per_second / 0.4719475') ureg.define('cfh = cu_ft * hour') ureg.define('cfs = cu_ft * second') ureg.define('VAR = volt * ampere') ureg.define('kVAR = 1000 * volt * ampere') ureg.define('MVAR = 1000000 * volt * ampere') ureg.define('inH2O = in_H2O') ureg.define('dry_air = []') ureg.define('gas = []') ureg.define('energy_efficiency_ratio = [] = EER') ureg.define('coefficient_of_performance = [] = COP') ureg.define('data_center_infrastructure_efficiency = [] = DCIE') ureg.define('power_usage_effectiveness = [] = PUE') ureg.define('formazin_nephelometric_unit = [] = fnu') ureg.define('nephelometric_turbidity_units = [] = ntu') ureg.define('power_factor = [] = PF') ureg.define('degree_day_celsius = [] = degdaysC') ureg.define('degree_day_farenheit = degree_day_celsius * 9 / 5 = degdaysF') ureg.define('footcandle = lumen / sq_ft = ftcd') ureg.define('Nm = newton * meter') ureg.define('%obsc = [] = percent_obscuration = percentobsc') ureg.define('cycle = []') ureg.define('cph = cycle / hour') ureg.define('cpm = cycle / minute') ureg.define('cps = cycle / second') ureg.define('hecto_cubic_foot = 100 * cubic_foot') ureg.define('tenths_second = second / 10') ureg.define('hundredths_second = second / 100') #ureg.define('irradiance = W / sq_meter = irr') # In the definition of project haystack, there's a redundancy as irr = W/m^2 # no need to use : watts_per_square_meter_irradiance # CURRENCY # I know...we won'T be able to convert right now ! ureg.define('australian_dollar = [] = AUD') ureg.define('british_pound = [] = GBP = £') ureg.define('canadian_dollar = [] = CAD') ureg.define('chinese_yuan = [] = CNY = 元') ureg.define('emerati_dirham = [] = AED') ureg.define('euro = [] = EUR = €') ureg.define('indian_rupee = [] = INR = ₹') ureg.define('japanese_yen = [] = JPY = ¥') ureg.define('russian_ruble = [] = RUB = руб') ureg.define('south_korean_won = [] = KRW = ₩') ureg.define('swedish_krona = [] = SEK = kr') ureg.define('swiss_franc = [] = CHF = Fr') ureg.define('taiwan_dollar = [] = TWD') ureg.define('us_dollar = [] = USD = $') ureg.define('new_israeli_shekel = [] = NIS') return ureg
[ "def", "define_haystack_units", "(", ")", ":", "ureg", "=", "UnitRegistry", "(", ")", "ureg", ".", "define", "(", "'% = [] = percent'", ")", "ureg", ".", "define", "(", "'pixel = [] = px = dot = picture_element = pel'", ")", "ureg", ".", "define", "(", "'decibel = [] = dB'", ")", "ureg", ".", "define", "(", "'ppu = [] = parts_per_unit'", ")", "ureg", ".", "define", "(", "'ppm = [] = parts_per_million'", ")", "ureg", ".", "define", "(", "'ppb = [] = parts_per_billion'", ")", "ureg", ".", "define", "(", "'%RH = [] = percent_relative_humidity = percentRH'", ")", "ureg", ".", "define", "(", "'cubic_feet = ft ** 3 = cu_ft'", ")", "ureg", ".", "define", "(", "'cfm = cu_ft * minute = liter_per_second / 0.4719475'", ")", "ureg", ".", "define", "(", "'cfh = cu_ft * hour'", ")", "ureg", ".", "define", "(", "'cfs = cu_ft * second'", ")", "ureg", ".", "define", "(", "'VAR = volt * ampere'", ")", "ureg", ".", "define", "(", "'kVAR = 1000 * volt * ampere'", ")", "ureg", ".", "define", "(", "'MVAR = 1000000 * volt * ampere'", ")", "ureg", ".", "define", "(", "'inH2O = in_H2O'", ")", "ureg", ".", "define", "(", "'dry_air = []'", ")", "ureg", ".", "define", "(", "'gas = []'", ")", "ureg", ".", "define", "(", "'energy_efficiency_ratio = [] = EER'", ")", "ureg", ".", "define", "(", "'coefficient_of_performance = [] = COP'", ")", "ureg", ".", "define", "(", "'data_center_infrastructure_efficiency = [] = DCIE'", ")", "ureg", ".", "define", "(", "'power_usage_effectiveness = [] = PUE'", ")", "ureg", ".", "define", "(", "'formazin_nephelometric_unit = [] = fnu'", ")", "ureg", ".", "define", "(", "'nephelometric_turbidity_units = [] = ntu'", ")", "ureg", ".", "define", "(", "'power_factor = [] = PF'", ")", "ureg", ".", "define", "(", "'degree_day_celsius = [] = degdaysC'", ")", "ureg", ".", "define", "(", "'degree_day_farenheit = degree_day_celsius * 9 / 5 = degdaysF'", ")", "ureg", ".", "define", "(", "'footcandle = lumen / sq_ft = ftcd'", ")", "ureg", ".", "define", "(", "'Nm = newton * meter'", ")", "ureg", ".", "define", "(", "'%obsc = [] = percent_obscuration = percentobsc'", ")", "ureg", ".", "define", "(", "'cycle = []'", ")", "ureg", ".", "define", "(", "'cph = cycle / hour'", ")", "ureg", ".", "define", "(", "'cpm = cycle / minute'", ")", "ureg", ".", "define", "(", "'cps = cycle / second'", ")", "ureg", ".", "define", "(", "'hecto_cubic_foot = 100 * cubic_foot'", ")", "ureg", ".", "define", "(", "'tenths_second = second / 10'", ")", "ureg", ".", "define", "(", "'hundredths_second = second / 100'", ")", "#ureg.define('irradiance = W / sq_meter = irr')", "# In the definition of project haystack, there's a redundancy as irr = W/m^2", "# no need to use : watts_per_square_meter_irradiance", "# CURRENCY", "# I know...we won'T be able to convert right now !", "ureg", ".", "define", "(", "'australian_dollar = [] = AUD'", ")", "ureg", ".", "define", "(", "'british_pound = [] = GBP = £')", "", "ureg", ".", "define", "(", "'canadian_dollar = [] = CAD'", ")", "ureg", ".", "define", "(", "'chinese_yuan = [] = CNY = 元')", "", "ureg", ".", "define", "(", "'emerati_dirham = [] = AED'", ")", "ureg", ".", "define", "(", "'euro = [] = EUR = €')", "", "ureg", ".", "define", "(", "'indian_rupee = [] = INR = ₹')", "", "ureg", ".", "define", "(", "'japanese_yen = [] = JPY = ¥')", "", "ureg", ".", "define", "(", "'russian_ruble = [] = RUB = руб')", "", "ureg", ".", "define", "(", "'south_korean_won = [] = KRW = ₩')", "", "ureg", ".", "define", "(", "'swedish_krona = [] = SEK = kr'", ")", "ureg", ".", "define", "(", "'swiss_franc = [] = CHF = Fr'", ")", "ureg", ".", "define", "(", "'taiwan_dollar = [] = TWD'", ")", "ureg", ".", "define", "(", "'us_dollar = [] = USD = $'", ")", "ureg", ".", "define", "(", "'new_israeli_shekel = [] = NIS'", ")", "return", "ureg" ]
Missing units found in project-haystack Added to the registry
[ "Missing", "units", "found", "in", "project", "-", "haystack", "Added", "to", "the", "registry" ]
python
valid
google-research/batch-ppo
agents/tools/loop.py
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/loop.py#L189-L217
def _define_step(self, done, score, summary): """Combine operations of a phase. Keeps track of the mean score and when to report it. Args: done: Tensor indicating whether current score can be used. score: Tensor holding the current, possibly intermediate, score. summary: Tensor holding summary string to write if not an empty string. Returns: Tuple of summary tensor, mean score, and new global step. The mean score is zero for non reporting steps. """ if done.shape.ndims == 0: done = done[None] if score.shape.ndims == 0: score = score[None] score_mean = streaming_mean.StreamingMean((), tf.float32) with tf.control_dependencies([done, score, summary]): done_score = tf.gather(score, tf.where(done)[:, 0]) submit_score = tf.cond( tf.reduce_any(done), lambda: score_mean.submit(done_score), tf.no_op) with tf.control_dependencies([submit_score]): mean_score = tf.cond(self._report, score_mean.clear, float) steps_made = tf.shape(score)[0] next_step = self._step.assign_add(steps_made) with tf.control_dependencies([mean_score, next_step]): return tf.identity(summary), mean_score, next_step, steps_made
[ "def", "_define_step", "(", "self", ",", "done", ",", "score", ",", "summary", ")", ":", "if", "done", ".", "shape", ".", "ndims", "==", "0", ":", "done", "=", "done", "[", "None", "]", "if", "score", ".", "shape", ".", "ndims", "==", "0", ":", "score", "=", "score", "[", "None", "]", "score_mean", "=", "streaming_mean", ".", "StreamingMean", "(", "(", ")", ",", "tf", ".", "float32", ")", "with", "tf", ".", "control_dependencies", "(", "[", "done", ",", "score", ",", "summary", "]", ")", ":", "done_score", "=", "tf", ".", "gather", "(", "score", ",", "tf", ".", "where", "(", "done", ")", "[", ":", ",", "0", "]", ")", "submit_score", "=", "tf", ".", "cond", "(", "tf", ".", "reduce_any", "(", "done", ")", ",", "lambda", ":", "score_mean", ".", "submit", "(", "done_score", ")", ",", "tf", ".", "no_op", ")", "with", "tf", ".", "control_dependencies", "(", "[", "submit_score", "]", ")", ":", "mean_score", "=", "tf", ".", "cond", "(", "self", ".", "_report", ",", "score_mean", ".", "clear", ",", "float", ")", "steps_made", "=", "tf", ".", "shape", "(", "score", ")", "[", "0", "]", "next_step", "=", "self", ".", "_step", ".", "assign_add", "(", "steps_made", ")", "with", "tf", ".", "control_dependencies", "(", "[", "mean_score", ",", "next_step", "]", ")", ":", "return", "tf", ".", "identity", "(", "summary", ")", ",", "mean_score", ",", "next_step", ",", "steps_made" ]
Combine operations of a phase. Keeps track of the mean score and when to report it. Args: done: Tensor indicating whether current score can be used. score: Tensor holding the current, possibly intermediate, score. summary: Tensor holding summary string to write if not an empty string. Returns: Tuple of summary tensor, mean score, and new global step. The mean score is zero for non reporting steps.
[ "Combine", "operations", "of", "a", "phase", "." ]
python
train
erdc/RAPIDpy
RAPIDpy/helper_functions.py
https://github.com/erdc/RAPIDpy/blob/50e14e130554b254a00ff23b226cd7e4c6cfe91a/RAPIDpy/helper_functions.py#L21-L33
def open_csv(csv_file, mode='r'): """ Get mode depending on Python version Based on: http://stackoverflow.com/questions/29840849/writing-a-csv-file-in-python-that-works-for-both-python-2-7-and-python-3-3-in """ # noqa if version_info[0] == 2: # Not named on 2.6 access = '{0}b'.format(mode) kwargs = {} else: access = '{0}t'.format(mode) kwargs = {'newline': ''} return open(csv_file, access, **kwargs)
[ "def", "open_csv", "(", "csv_file", ",", "mode", "=", "'r'", ")", ":", "# noqa", "if", "version_info", "[", "0", "]", "==", "2", ":", "# Not named on 2.6", "access", "=", "'{0}b'", ".", "format", "(", "mode", ")", "kwargs", "=", "{", "}", "else", ":", "access", "=", "'{0}t'", ".", "format", "(", "mode", ")", "kwargs", "=", "{", "'newline'", ":", "''", "}", "return", "open", "(", "csv_file", ",", "access", ",", "*", "*", "kwargs", ")" ]
Get mode depending on Python version Based on: http://stackoverflow.com/questions/29840849/writing-a-csv-file-in-python-that-works-for-both-python-2-7-and-python-3-3-in
[ "Get", "mode", "depending", "on", "Python", "version", "Based", "on", ":", "http", ":", "//", "stackoverflow", ".", "com", "/", "questions", "/", "29840849", "/", "writing", "-", "a", "-", "csv", "-", "file", "-", "in", "-", "python", "-", "that", "-", "works", "-", "for", "-", "both", "-", "python", "-", "2", "-", "7", "-", "and", "-", "python", "-", "3", "-", "3", "-", "in" ]
python
train
spookylukey/django-paypal
paypal/standard/models.py
https://github.com/spookylukey/django-paypal/blob/b07d0a3ad91b5c5fe7bb27be3e5d70aabcdef76f/paypal/standard/models.py#L385-L393
def initialize(self, request): """Store the data we'll need to make the postback from the request object.""" if request.method == 'GET': # PDT only - this data is currently unused self.query = request.META.get('QUERY_STRING', '') elif request.method == 'POST': # The following works if paypal sends an ASCII bytestring, which it does. self.query = request.body.decode('ascii') self.ipaddress = request.META.get('REMOTE_ADDR', '')
[ "def", "initialize", "(", "self", ",", "request", ")", ":", "if", "request", ".", "method", "==", "'GET'", ":", "# PDT only - this data is currently unused", "self", ".", "query", "=", "request", ".", "META", ".", "get", "(", "'QUERY_STRING'", ",", "''", ")", "elif", "request", ".", "method", "==", "'POST'", ":", "# The following works if paypal sends an ASCII bytestring, which it does.", "self", ".", "query", "=", "request", ".", "body", ".", "decode", "(", "'ascii'", ")", "self", ".", "ipaddress", "=", "request", ".", "META", ".", "get", "(", "'REMOTE_ADDR'", ",", "''", ")" ]
Store the data we'll need to make the postback from the request object.
[ "Store", "the", "data", "we", "ll", "need", "to", "make", "the", "postback", "from", "the", "request", "object", "." ]
python
train
deepmind/sonnet
sonnet/python/modules/conv.py
https://github.com/deepmind/sonnet/blob/00612ca3178964d86b556e062694d808ff81fcca/sonnet/python/modules/conv.py#L2631-L2677
def _construct_w(self, inputs): """Connects the module into the graph, with input Tensor `inputs`. Args: inputs: A 4D Tensor of shape: [batch_size, input_height, input_width, input_channels] and of type `tf.float16`, `tf.bfloat16` or `tf.float32`. Returns: A tuple of two 4D Tensors, each with the same dtype as `inputs`: 1. w_dw, the depthwise weight matrix, of shape: [kernel_size, input_channels, channel_multiplier] 2. w_pw, the pointwise weight matrix, of shape: [1, 1, channel_multiplier * input_channels, output_channels]. """ depthwise_weight_shape = self._kernel_shape + (self._input_channels, self._channel_multiplier) pointwise_input_size = self._channel_multiplier * self._input_channels pointwise_weight_shape = (1, 1, pointwise_input_size, self._output_channels) if "w_dw" not in self._initializers: fan_in_shape = depthwise_weight_shape[:2] self._initializers["w_dw"] = create_weight_initializer(fan_in_shape, dtype=inputs.dtype) if "w_pw" not in self._initializers: fan_in_shape = pointwise_weight_shape[:3] self._initializers["w_pw"] = create_weight_initializer(fan_in_shape, dtype=inputs.dtype) w_dw = tf.get_variable( "w_dw", shape=depthwise_weight_shape, dtype=inputs.dtype, initializer=self._initializers["w_dw"], partitioner=self._partitioners.get("w_dw", None), regularizer=self._regularizers.get("w_dw", None)) w_pw = tf.get_variable( "w_pw", shape=pointwise_weight_shape, dtype=inputs.dtype, initializer=self._initializers["w_pw"], partitioner=self._partitioners.get("w_pw", None), regularizer=self._regularizers.get("w_pw", None)) return w_dw, w_pw
[ "def", "_construct_w", "(", "self", ",", "inputs", ")", ":", "depthwise_weight_shape", "=", "self", ".", "_kernel_shape", "+", "(", "self", ".", "_input_channels", ",", "self", ".", "_channel_multiplier", ")", "pointwise_input_size", "=", "self", ".", "_channel_multiplier", "*", "self", ".", "_input_channels", "pointwise_weight_shape", "=", "(", "1", ",", "1", ",", "pointwise_input_size", ",", "self", ".", "_output_channels", ")", "if", "\"w_dw\"", "not", "in", "self", ".", "_initializers", ":", "fan_in_shape", "=", "depthwise_weight_shape", "[", ":", "2", "]", "self", ".", "_initializers", "[", "\"w_dw\"", "]", "=", "create_weight_initializer", "(", "fan_in_shape", ",", "dtype", "=", "inputs", ".", "dtype", ")", "if", "\"w_pw\"", "not", "in", "self", ".", "_initializers", ":", "fan_in_shape", "=", "pointwise_weight_shape", "[", ":", "3", "]", "self", ".", "_initializers", "[", "\"w_pw\"", "]", "=", "create_weight_initializer", "(", "fan_in_shape", ",", "dtype", "=", "inputs", ".", "dtype", ")", "w_dw", "=", "tf", ".", "get_variable", "(", "\"w_dw\"", ",", "shape", "=", "depthwise_weight_shape", ",", "dtype", "=", "inputs", ".", "dtype", ",", "initializer", "=", "self", ".", "_initializers", "[", "\"w_dw\"", "]", ",", "partitioner", "=", "self", ".", "_partitioners", ".", "get", "(", "\"w_dw\"", ",", "None", ")", ",", "regularizer", "=", "self", ".", "_regularizers", ".", "get", "(", "\"w_dw\"", ",", "None", ")", ")", "w_pw", "=", "tf", ".", "get_variable", "(", "\"w_pw\"", ",", "shape", "=", "pointwise_weight_shape", ",", "dtype", "=", "inputs", ".", "dtype", ",", "initializer", "=", "self", ".", "_initializers", "[", "\"w_pw\"", "]", ",", "partitioner", "=", "self", ".", "_partitioners", ".", "get", "(", "\"w_pw\"", ",", "None", ")", ",", "regularizer", "=", "self", ".", "_regularizers", ".", "get", "(", "\"w_pw\"", ",", "None", ")", ")", "return", "w_dw", ",", "w_pw" ]
Connects the module into the graph, with input Tensor `inputs`. Args: inputs: A 4D Tensor of shape: [batch_size, input_height, input_width, input_channels] and of type `tf.float16`, `tf.bfloat16` or `tf.float32`. Returns: A tuple of two 4D Tensors, each with the same dtype as `inputs`: 1. w_dw, the depthwise weight matrix, of shape: [kernel_size, input_channels, channel_multiplier] 2. w_pw, the pointwise weight matrix, of shape: [1, 1, channel_multiplier * input_channels, output_channels].
[ "Connects", "the", "module", "into", "the", "graph", "with", "input", "Tensor", "inputs", "." ]
python
train
inasafe/inasafe
safe/report/extractors/infographic_elements/svg_charts.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/report/extractors/infographic_elements/svg_charts.py#L52-L69
def _convert_tuple_color_to_hex(cls, color): """Convert tuple of color element (r, g, b) to hexa. :param color: A color tuple :type color: (int, int, int) | str :return: Hexa representation of the color :rtype: str """ if isinstance(color, tuple): return '#{:02x}{:02x}{:02x}'.format(*color) elif isinstance(color, str): if not color.startswith('#'): return '#{color_hex}'.format(color_hex=color) else: return color else: return '#000'
[ "def", "_convert_tuple_color_to_hex", "(", "cls", ",", "color", ")", ":", "if", "isinstance", "(", "color", ",", "tuple", ")", ":", "return", "'#{:02x}{:02x}{:02x}'", ".", "format", "(", "*", "color", ")", "elif", "isinstance", "(", "color", ",", "str", ")", ":", "if", "not", "color", ".", "startswith", "(", "'#'", ")", ":", "return", "'#{color_hex}'", ".", "format", "(", "color_hex", "=", "color", ")", "else", ":", "return", "color", "else", ":", "return", "'#000'" ]
Convert tuple of color element (r, g, b) to hexa. :param color: A color tuple :type color: (int, int, int) | str :return: Hexa representation of the color :rtype: str
[ "Convert", "tuple", "of", "color", "element", "(", "r", "g", "b", ")", "to", "hexa", "." ]
python
train
QUANTAXIS/QUANTAXIS
QUANTAXIS/QAData/QABlockStruct.py
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAData/QABlockStruct.py#L140-L153
def get_block(self, block_name): """getblock 获取板块, block_name是list或者是单个str Arguments: block_name {[type]} -- [description] Returns: [type] -- [description] """ # block_name = [block_name] if isinstance( # block_name, str) else block_name # return QA_DataStruct_Stock_block(self.data[self.data.blockname.apply(lambda x: x in block_name)]) return self.new(self.data.loc[(block_name, slice(None)), :])
[ "def", "get_block", "(", "self", ",", "block_name", ")", ":", "# block_name = [block_name] if isinstance(", "# block_name, str) else block_name", "# return QA_DataStruct_Stock_block(self.data[self.data.blockname.apply(lambda x: x in block_name)])", "return", "self", ".", "new", "(", "self", ".", "data", ".", "loc", "[", "(", "block_name", ",", "slice", "(", "None", ")", ")", ",", ":", "]", ")" ]
getblock 获取板块, block_name是list或者是单个str Arguments: block_name {[type]} -- [description] Returns: [type] -- [description]
[ "getblock", "获取板块", "block_name是list或者是单个str" ]
python
train
bcbio/bcbio-nextgen
bcbio/utils.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/utils.py#L351-L374
def symlink_plus(orig, new): """Create relative symlinks and handle associated biological index files. """ orig = os.path.abspath(orig) if not os.path.exists(orig): raise RuntimeError("File not found: %s" % orig) for ext in ["", ".idx", ".gbi", ".tbi", ".bai", ".fai"]: if os.path.exists(orig + ext) and (not os.path.lexists(new + ext) or not os.path.exists(new + ext)): with chdir(os.path.dirname(new)): remove_safe(new + ext) # Work around symlink issues on some filesystems. Randomly # fail to symlink. try: os.symlink(os.path.relpath(orig + ext), os.path.basename(new + ext)) except OSError: if not os.path.exists(new + ext) or not os.path.lexists(new + ext): remove_safe(new + ext) shutil.copyfile(orig + ext, new + ext) orig_noext = splitext_plus(orig)[0] new_noext = splitext_plus(new)[0] for sub_ext in [".bai", ".dict"]: if os.path.exists(orig_noext + sub_ext) and not os.path.lexists(new_noext + sub_ext): with chdir(os.path.dirname(new_noext)): os.symlink(os.path.relpath(orig_noext + sub_ext), os.path.basename(new_noext + sub_ext))
[ "def", "symlink_plus", "(", "orig", ",", "new", ")", ":", "orig", "=", "os", ".", "path", ".", "abspath", "(", "orig", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "orig", ")", ":", "raise", "RuntimeError", "(", "\"File not found: %s\"", "%", "orig", ")", "for", "ext", "in", "[", "\"\"", ",", "\".idx\"", ",", "\".gbi\"", ",", "\".tbi\"", ",", "\".bai\"", ",", "\".fai\"", "]", ":", "if", "os", ".", "path", ".", "exists", "(", "orig", "+", "ext", ")", "and", "(", "not", "os", ".", "path", ".", "lexists", "(", "new", "+", "ext", ")", "or", "not", "os", ".", "path", ".", "exists", "(", "new", "+", "ext", ")", ")", ":", "with", "chdir", "(", "os", ".", "path", ".", "dirname", "(", "new", ")", ")", ":", "remove_safe", "(", "new", "+", "ext", ")", "# Work around symlink issues on some filesystems. Randomly", "# fail to symlink.", "try", ":", "os", ".", "symlink", "(", "os", ".", "path", ".", "relpath", "(", "orig", "+", "ext", ")", ",", "os", ".", "path", ".", "basename", "(", "new", "+", "ext", ")", ")", "except", "OSError", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "new", "+", "ext", ")", "or", "not", "os", ".", "path", ".", "lexists", "(", "new", "+", "ext", ")", ":", "remove_safe", "(", "new", "+", "ext", ")", "shutil", ".", "copyfile", "(", "orig", "+", "ext", ",", "new", "+", "ext", ")", "orig_noext", "=", "splitext_plus", "(", "orig", ")", "[", "0", "]", "new_noext", "=", "splitext_plus", "(", "new", ")", "[", "0", "]", "for", "sub_ext", "in", "[", "\".bai\"", ",", "\".dict\"", "]", ":", "if", "os", ".", "path", ".", "exists", "(", "orig_noext", "+", "sub_ext", ")", "and", "not", "os", ".", "path", ".", "lexists", "(", "new_noext", "+", "sub_ext", ")", ":", "with", "chdir", "(", "os", ".", "path", ".", "dirname", "(", "new_noext", ")", ")", ":", "os", ".", "symlink", "(", "os", ".", "path", ".", "relpath", "(", "orig_noext", "+", "sub_ext", ")", ",", "os", ".", "path", ".", "basename", "(", "new_noext", "+", "sub_ext", ")", ")" ]
Create relative symlinks and handle associated biological index files.
[ "Create", "relative", "symlinks", "and", "handle", "associated", "biological", "index", "files", "." ]
python
train
brandon-rhodes/python-adventure
adventure/game.py
https://github.com/brandon-rhodes/python-adventure/blob/e503b68e394fbccb05fe381901c7009fb1bda3d9/adventure/game.py#L118-L132
def start(self): """Start the game.""" # For old-fashioned players, accept five-letter truncations like # "inven" instead of insisting on full words like "inventory". for key, value in list(self.vocabulary.items()): if isinstance(key, str) and len(key) > 5: self.vocabulary[key[:5]] = value # Set things going. self.chest_room = self.rooms[114] self.bottle.contents = self.water self.yesno(self.messages[65], self.start2)
[ "def", "start", "(", "self", ")", ":", "# For old-fashioned players, accept five-letter truncations like", "# \"inven\" instead of insisting on full words like \"inventory\".", "for", "key", ",", "value", "in", "list", "(", "self", ".", "vocabulary", ".", "items", "(", ")", ")", ":", "if", "isinstance", "(", "key", ",", "str", ")", "and", "len", "(", "key", ")", ">", "5", ":", "self", ".", "vocabulary", "[", "key", "[", ":", "5", "]", "]", "=", "value", "# Set things going.", "self", ".", "chest_room", "=", "self", ".", "rooms", "[", "114", "]", "self", ".", "bottle", ".", "contents", "=", "self", ".", "water", "self", ".", "yesno", "(", "self", ".", "messages", "[", "65", "]", ",", "self", ".", "start2", ")" ]
Start the game.
[ "Start", "the", "game", "." ]
python
train
vecnet/vecnet.openmalaria
vecnet/openmalaria/scenario/entomology.py
https://github.com/vecnet/vecnet.openmalaria/blob/795bc9d1b81a6c664f14879edda7a7c41188e95a/vecnet/openmalaria/scenario/entomology.py#L197-L222
def add(self, vector, InterventionAnophelesParams=None): """ Add a vector to entomology section. vector is either ElementTree or xml snippet InterventionAnophelesParams is an anophelesParams section for every GVI, ITN and IRS intervention already defined in the scenario.xml """ # TODO # 1. If there are GVI interventions, for every GVI, add anophelesParams section. # (gvi_anophelesParams field in AnophelesSnippets models) # 2. If there are ITN interventions, for every ITN, add anophelesParams section # (itn_anophelesParams field in AnophelesSnippets models) # 3. If there are IRS interventions, for every IRS section add anophelesParams section # (irs_anophelesParams field in AnophelesSnippets models) assert isinstance(vector, six.string_types) et = ElementTree.fromstring(vector) # check if it is valid vector mosquito = Vector(et) assert isinstance(mosquito.mosquito, str) assert isinstance(mosquito.propInfected, float) assert len(mosquito.seasonality.monthlyValues) == 12 index = len(self.et.findall("anopheles")) self.et.insert(index, et)
[ "def", "add", "(", "self", ",", "vector", ",", "InterventionAnophelesParams", "=", "None", ")", ":", "# TODO", "# 1. If there are GVI interventions, for every GVI, add anophelesParams section.", "# (gvi_anophelesParams field in AnophelesSnippets models)", "# 2. If there are ITN interventions, for every ITN, add anophelesParams section", "# (itn_anophelesParams field in AnophelesSnippets models)", "# 3. If there are IRS interventions, for every IRS section add anophelesParams section", "# (irs_anophelesParams field in AnophelesSnippets models)", "assert", "isinstance", "(", "vector", ",", "six", ".", "string_types", ")", "et", "=", "ElementTree", ".", "fromstring", "(", "vector", ")", "# check if it is valid vector", "mosquito", "=", "Vector", "(", "et", ")", "assert", "isinstance", "(", "mosquito", ".", "mosquito", ",", "str", ")", "assert", "isinstance", "(", "mosquito", ".", "propInfected", ",", "float", ")", "assert", "len", "(", "mosquito", ".", "seasonality", ".", "monthlyValues", ")", "==", "12", "index", "=", "len", "(", "self", ".", "et", ".", "findall", "(", "\"anopheles\"", ")", ")", "self", ".", "et", ".", "insert", "(", "index", ",", "et", ")" ]
Add a vector to entomology section. vector is either ElementTree or xml snippet InterventionAnophelesParams is an anophelesParams section for every GVI, ITN and IRS intervention already defined in the scenario.xml
[ "Add", "a", "vector", "to", "entomology", "section", ".", "vector", "is", "either", "ElementTree", "or", "xml", "snippet" ]
python
train
rackerlabs/rackspace-python-neutronclient
neutronclient/v2_0/client.py
https://github.com/rackerlabs/rackspace-python-neutronclient/blob/5a5009a8fe078e3aa1d582176669f1b28ab26bef/neutronclient/v2_0/client.py#L621-L623
def show_extension(self, ext_alias, **_params): """Fetches information of a certain extension.""" return self.get(self.extension_path % ext_alias, params=_params)
[ "def", "show_extension", "(", "self", ",", "ext_alias", ",", "*", "*", "_params", ")", ":", "return", "self", ".", "get", "(", "self", ".", "extension_path", "%", "ext_alias", ",", "params", "=", "_params", ")" ]
Fetches information of a certain extension.
[ "Fetches", "information", "of", "a", "certain", "extension", "." ]
python
train
dancsalo/TensorBase
tensorbase/base.py
https://github.com/dancsalo/TensorBase/blob/3d42a326452bd03427034916ff2fb90730020204/tensorbase/base.py#L912-L919
def const_variable(name, shape, value, trainable): """ :param name: string :param shape: 1D array :param value: float :return: tf variable """ return tf.get_variable(name, shape, initializer=tf.constant_initializer(value), trainable=trainable)
[ "def", "const_variable", "(", "name", ",", "shape", ",", "value", ",", "trainable", ")", ":", "return", "tf", ".", "get_variable", "(", "name", ",", "shape", ",", "initializer", "=", "tf", ".", "constant_initializer", "(", "value", ")", ",", "trainable", "=", "trainable", ")" ]
:param name: string :param shape: 1D array :param value: float :return: tf variable
[ ":", "param", "name", ":", "string", ":", "param", "shape", ":", "1D", "array", ":", "param", "value", ":", "float", ":", "return", ":", "tf", "variable" ]
python
train
PyCQA/astroid
astroid/transforms.py
https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/transforms.py#L83-L90
def visit(self, module): """Walk the given astroid *tree* and transform each encountered node Only the nodes which have transforms registered will actually be replaced or changed. """ module.body = [self._visit(child) for child in module.body] return self._transform(module)
[ "def", "visit", "(", "self", ",", "module", ")", ":", "module", ".", "body", "=", "[", "self", ".", "_visit", "(", "child", ")", "for", "child", "in", "module", ".", "body", "]", "return", "self", ".", "_transform", "(", "module", ")" ]
Walk the given astroid *tree* and transform each encountered node Only the nodes which have transforms registered will actually be replaced or changed.
[ "Walk", "the", "given", "astroid", "*", "tree", "*", "and", "transform", "each", "encountered", "node" ]
python
train
klahnakoski/pyLibrary
jx_python/meta.py
https://github.com/klahnakoski/pyLibrary/blob/fa2dcbc48fda8d26999baef400e9a98149e0b982/jx_python/meta.py#L546-L615
def _get_schema_from_list(frum, table_name, parent, nested_path, columns): """ :param frum: The list :param table_name: Name of the table this list holds records for :param parent: parent path :param nested_path: each nested array, in reverse order :param columns: map from full name to column definition :return: """ for d in frum: row_type = python_type_to_json_type[d.__class__] if row_type != "object": # EXPECTING PRIMITIVE VALUE full_name = parent column = columns[full_name] if not column: column = Column( name=concat_field(table_name, full_name), es_column=full_name, es_index=".", es_type=d.__class__.__name__, jx_type=None, # WILL BE SET BELOW last_updated=Date.now(), nested_path=nested_path, ) columns.add(column) column.es_type = _merge_python_type(column.es_type, d.__class__) column.jx_type = python_type_to_json_type[column.es_type] else: for name, value in d.items(): full_name = concat_field(parent, name) column = columns[full_name] if not column: column = Column( name=concat_field(table_name, full_name), es_column=full_name, es_index=".", es_type=value.__class__.__name__, jx_type=None, # WILL BE SET BELOW last_updated=Date.now(), nested_path=nested_path, ) columns.add(column) if is_container(value): # GET TYPE OF MULTIVALUE v = list(value) if len(v) == 0: this_type = none_type.__name__ elif len(v) == 1: this_type = v[0].__class__.__name__ else: this_type = reduce( _merge_python_type, (vi.__class__.__name__ for vi in value) ) else: this_type = value.__class__.__name__ column.es_type = _merge_python_type(column.es_type, this_type) column.jx_type = python_type_to_json_type[column.es_type] if this_type in {"object", "dict", "Mapping", "Data"}: _get_schema_from_list( [value], table_name, full_name, nested_path, columns ) elif this_type in {"list", "FlatList"}: np = listwrap(nested_path) newpath = unwraplist([join_field(split_field(np[0]) + [name])] + np) _get_schema_from_list( value, table_name, full_name, newpath, columns )
[ "def", "_get_schema_from_list", "(", "frum", ",", "table_name", ",", "parent", ",", "nested_path", ",", "columns", ")", ":", "for", "d", "in", "frum", ":", "row_type", "=", "python_type_to_json_type", "[", "d", ".", "__class__", "]", "if", "row_type", "!=", "\"object\"", ":", "# EXPECTING PRIMITIVE VALUE", "full_name", "=", "parent", "column", "=", "columns", "[", "full_name", "]", "if", "not", "column", ":", "column", "=", "Column", "(", "name", "=", "concat_field", "(", "table_name", ",", "full_name", ")", ",", "es_column", "=", "full_name", ",", "es_index", "=", "\".\"", ",", "es_type", "=", "d", ".", "__class__", ".", "__name__", ",", "jx_type", "=", "None", ",", "# WILL BE SET BELOW", "last_updated", "=", "Date", ".", "now", "(", ")", ",", "nested_path", "=", "nested_path", ",", ")", "columns", ".", "add", "(", "column", ")", "column", ".", "es_type", "=", "_merge_python_type", "(", "column", ".", "es_type", ",", "d", ".", "__class__", ")", "column", ".", "jx_type", "=", "python_type_to_json_type", "[", "column", ".", "es_type", "]", "else", ":", "for", "name", ",", "value", "in", "d", ".", "items", "(", ")", ":", "full_name", "=", "concat_field", "(", "parent", ",", "name", ")", "column", "=", "columns", "[", "full_name", "]", "if", "not", "column", ":", "column", "=", "Column", "(", "name", "=", "concat_field", "(", "table_name", ",", "full_name", ")", ",", "es_column", "=", "full_name", ",", "es_index", "=", "\".\"", ",", "es_type", "=", "value", ".", "__class__", ".", "__name__", ",", "jx_type", "=", "None", ",", "# WILL BE SET BELOW", "last_updated", "=", "Date", ".", "now", "(", ")", ",", "nested_path", "=", "nested_path", ",", ")", "columns", ".", "add", "(", "column", ")", "if", "is_container", "(", "value", ")", ":", "# GET TYPE OF MULTIVALUE", "v", "=", "list", "(", "value", ")", "if", "len", "(", "v", ")", "==", "0", ":", "this_type", "=", "none_type", ".", "__name__", "elif", "len", "(", "v", ")", "==", "1", ":", "this_type", "=", "v", "[", "0", "]", ".", "__class__", ".", "__name__", "else", ":", "this_type", "=", "reduce", "(", "_merge_python_type", ",", "(", "vi", ".", "__class__", ".", "__name__", "for", "vi", "in", "value", ")", ")", "else", ":", "this_type", "=", "value", ".", "__class__", ".", "__name__", "column", ".", "es_type", "=", "_merge_python_type", "(", "column", ".", "es_type", ",", "this_type", ")", "column", ".", "jx_type", "=", "python_type_to_json_type", "[", "column", ".", "es_type", "]", "if", "this_type", "in", "{", "\"object\"", ",", "\"dict\"", ",", "\"Mapping\"", ",", "\"Data\"", "}", ":", "_get_schema_from_list", "(", "[", "value", "]", ",", "table_name", ",", "full_name", ",", "nested_path", ",", "columns", ")", "elif", "this_type", "in", "{", "\"list\"", ",", "\"FlatList\"", "}", ":", "np", "=", "listwrap", "(", "nested_path", ")", "newpath", "=", "unwraplist", "(", "[", "join_field", "(", "split_field", "(", "np", "[", "0", "]", ")", "+", "[", "name", "]", ")", "]", "+", "np", ")", "_get_schema_from_list", "(", "value", ",", "table_name", ",", "full_name", ",", "newpath", ",", "columns", ")" ]
:param frum: The list :param table_name: Name of the table this list holds records for :param parent: parent path :param nested_path: each nested array, in reverse order :param columns: map from full name to column definition :return:
[ ":", "param", "frum", ":", "The", "list", ":", "param", "table_name", ":", "Name", "of", "the", "table", "this", "list", "holds", "records", "for", ":", "param", "parent", ":", "parent", "path", ":", "param", "nested_path", ":", "each", "nested", "array", "in", "reverse", "order", ":", "param", "columns", ":", "map", "from", "full", "name", "to", "column", "definition", ":", "return", ":" ]
python
train
CivicSpleen/ambry
ambry/library/search_backends/base.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/library/search_backends/base.py#L705-L832
def parse(self, s, term_join=None): """ Parses search term to Args: s (str): string with search term. or_join (callable): function to join 'OR' terms. Returns: dict: all of the terms grouped by marker. Key is a marker, value is a term. Example: >>> SearchTermParser().parse('table2 from 1978 to 1979 in california') {'to': 1979, 'about': 'table2', 'from': 1978, 'in': 'california'} """ if not term_join: term_join = lambda x: '(' + ' OR '.join(x) + ')' toks = self.scan(s) # Examples: starting with this query: # diabetes from 2014 to 2016 source healthindicators.gov # Assume the first term is ABOUT, if it is not marked with a marker. if toks and toks[0] and (toks[0][0] == self.TERM or toks[0][0] == self.QUOTEDTERM): toks = [(self.MARKER, 'about')] + toks # The example query produces this list of tokens: #[(3, 'about'), # (0, 'diabetes'), # (3, 'from'), # (4, 2014), # (3, 'to'), # (4, 2016), # (3, 'source'), # (0, 'healthindicators.gov')] # Group the terms by their marker. bymarker = [] for t in toks: if t[0] == self.MARKER: bymarker.append((t[1], [])) else: bymarker[-1][1].append(t) # After grouping tokens by their markers # [('about', [(0, 'diabetes')]), # ('from', [(4, 2014)]), # ('to', [(4, 2016)]), # ('source', [(0, 'healthindicators.gov')]) # ] # Convert some of the markers based on their contents. This just changes the marker type for keywords # we'll do more adjustments later. comps = [] for t in bymarker: t = list(t) if t[0] == 'in' and len(t[1]) == 1 and isinstance(t[1][0][1], string_types) and self.stem( t[1][0][1]) in self.geograins.keys(): t[0] = 'by' # If the from term isn't an integer, then it is really a source. if t[0] == 'from' and len(t[1]) == 1 and t[1][0][0] != self.YEAR: t[0] = 'source' comps.append(t) # After conversions # [['about', [(0, 'diabetes')]], # ['from', [(4, 2014)]], # ['to', [(4, 2016)]], # ['source', [(0, 'healthindicators.gov')]]] # Join all of the terms into single marker groups groups = {marker: [] for marker, _ in comps} for marker, terms in comps: groups[marker] += [term for marker, term in terms] # At this point, the groups dict is formed, but it will have a list # for each marker that has multiple terms. # Only a few of the markers should have more than one term, so move # extras to the about group for marker, group in groups.items(): if marker == 'about': continue if len(group) > 1 and marker not in self.multiterms: groups[marker], extras = [group[0]], group[1:] if not 'about' in groups: groups['about'] = extras else: groups['about'] += extras if marker == 'by': groups['by'] = [ self.geograins.get(self.stem(e)) for e in group] for marker, terms in iteritems(groups): if len(terms) > 1: if marker in 'in': groups[marker] = ' '.join(terms) else: groups[marker] = term_join(terms) elif len(terms) == 1: groups[marker] = terms[0] else: pass # After grouping: # {'to': 2016, # 'about': 'diabetes', # 'from': 2014, # 'source': 'healthindicators.gov'} # If there were any markers with multiple terms, they would be cast in the or_join form. return groups
[ "def", "parse", "(", "self", ",", "s", ",", "term_join", "=", "None", ")", ":", "if", "not", "term_join", ":", "term_join", "=", "lambda", "x", ":", "'('", "+", "' OR '", ".", "join", "(", "x", ")", "+", "')'", "toks", "=", "self", ".", "scan", "(", "s", ")", "# Examples: starting with this query:", "# diabetes from 2014 to 2016 source healthindicators.gov", "# Assume the first term is ABOUT, if it is not marked with a marker.", "if", "toks", "and", "toks", "[", "0", "]", "and", "(", "toks", "[", "0", "]", "[", "0", "]", "==", "self", ".", "TERM", "or", "toks", "[", "0", "]", "[", "0", "]", "==", "self", ".", "QUOTEDTERM", ")", ":", "toks", "=", "[", "(", "self", ".", "MARKER", ",", "'about'", ")", "]", "+", "toks", "# The example query produces this list of tokens:", "#[(3, 'about'),", "# (0, 'diabetes'),", "# (3, 'from'),", "# (4, 2014),", "# (3, 'to'),", "# (4, 2016),", "# (3, 'source'),", "# (0, 'healthindicators.gov')]", "# Group the terms by their marker.", "bymarker", "=", "[", "]", "for", "t", "in", "toks", ":", "if", "t", "[", "0", "]", "==", "self", ".", "MARKER", ":", "bymarker", ".", "append", "(", "(", "t", "[", "1", "]", ",", "[", "]", ")", ")", "else", ":", "bymarker", "[", "-", "1", "]", "[", "1", "]", ".", "append", "(", "t", ")", "# After grouping tokens by their markers", "# [('about', [(0, 'diabetes')]),", "# ('from', [(4, 2014)]),", "# ('to', [(4, 2016)]),", "# ('source', [(0, 'healthindicators.gov')])", "# ]", "# Convert some of the markers based on their contents. This just changes the marker type for keywords", "# we'll do more adjustments later.", "comps", "=", "[", "]", "for", "t", "in", "bymarker", ":", "t", "=", "list", "(", "t", ")", "if", "t", "[", "0", "]", "==", "'in'", "and", "len", "(", "t", "[", "1", "]", ")", "==", "1", "and", "isinstance", "(", "t", "[", "1", "]", "[", "0", "]", "[", "1", "]", ",", "string_types", ")", "and", "self", ".", "stem", "(", "t", "[", "1", "]", "[", "0", "]", "[", "1", "]", ")", "in", "self", ".", "geograins", ".", "keys", "(", ")", ":", "t", "[", "0", "]", "=", "'by'", "# If the from term isn't an integer, then it is really a source.", "if", "t", "[", "0", "]", "==", "'from'", "and", "len", "(", "t", "[", "1", "]", ")", "==", "1", "and", "t", "[", "1", "]", "[", "0", "]", "[", "0", "]", "!=", "self", ".", "YEAR", ":", "t", "[", "0", "]", "=", "'source'", "comps", ".", "append", "(", "t", ")", "# After conversions", "# [['about', [(0, 'diabetes')]],", "# ['from', [(4, 2014)]],", "# ['to', [(4, 2016)]],", "# ['source', [(0, 'healthindicators.gov')]]]", "# Join all of the terms into single marker groups", "groups", "=", "{", "marker", ":", "[", "]", "for", "marker", ",", "_", "in", "comps", "}", "for", "marker", ",", "terms", "in", "comps", ":", "groups", "[", "marker", "]", "+=", "[", "term", "for", "marker", ",", "term", "in", "terms", "]", "# At this point, the groups dict is formed, but it will have a list", "# for each marker that has multiple terms.", "# Only a few of the markers should have more than one term, so move", "# extras to the about group", "for", "marker", ",", "group", "in", "groups", ".", "items", "(", ")", ":", "if", "marker", "==", "'about'", ":", "continue", "if", "len", "(", "group", ")", ">", "1", "and", "marker", "not", "in", "self", ".", "multiterms", ":", "groups", "[", "marker", "]", ",", "extras", "=", "[", "group", "[", "0", "]", "]", ",", "group", "[", "1", ":", "]", "if", "not", "'about'", "in", "groups", ":", "groups", "[", "'about'", "]", "=", "extras", "else", ":", "groups", "[", "'about'", "]", "+=", "extras", "if", "marker", "==", "'by'", ":", "groups", "[", "'by'", "]", "=", "[", "self", ".", "geograins", ".", "get", "(", "self", ".", "stem", "(", "e", ")", ")", "for", "e", "in", "group", "]", "for", "marker", ",", "terms", "in", "iteritems", "(", "groups", ")", ":", "if", "len", "(", "terms", ")", ">", "1", ":", "if", "marker", "in", "'in'", ":", "groups", "[", "marker", "]", "=", "' '", ".", "join", "(", "terms", ")", "else", ":", "groups", "[", "marker", "]", "=", "term_join", "(", "terms", ")", "elif", "len", "(", "terms", ")", "==", "1", ":", "groups", "[", "marker", "]", "=", "terms", "[", "0", "]", "else", ":", "pass", "# After grouping:", "# {'to': 2016,", "# 'about': 'diabetes',", "# 'from': 2014,", "# 'source': 'healthindicators.gov'}", "# If there were any markers with multiple terms, they would be cast in the or_join form.", "return", "groups" ]
Parses search term to Args: s (str): string with search term. or_join (callable): function to join 'OR' terms. Returns: dict: all of the terms grouped by marker. Key is a marker, value is a term. Example: >>> SearchTermParser().parse('table2 from 1978 to 1979 in california') {'to': 1979, 'about': 'table2', 'from': 1978, 'in': 'california'}
[ "Parses", "search", "term", "to" ]
python
train
jmoiron/speedparser
speedparser/speedparser.py
https://github.com/jmoiron/speedparser/blob/e7e8d79daf73b35c9259695ad1e379476e1dfc77/speedparser/speedparser.py#L255-L298
def parse_entry(self, entry): """An attempt to parse pieces of an entry out w/o xpath, by looping over the entry root's children and slotting them into the right places. This is going to be way messier than SpeedParserEntries, and maybe less cleanly usable, but it should be faster.""" e = feedparser.FeedParserDict() tag_map = self.tag_map nslookup = self.nslookup for child in entry.getchildren(): if isinstance(child, etree._Comment): continue ns, tag = clean_ns(child.tag) mapping = tag_map.get(tag, None) if mapping: getattr(self, 'parse_%s' % mapping)(child, e, nslookup.get(ns, ns)) if not ns: continue fulltag = '%s:%s' % (nslookup.get(ns, ''), tag) mapping = tag_map.get(fulltag, None) if mapping: getattr(self, 'parse_%s' % mapping)(child, e, nslookup[ns]) lacks_summary = 'summary' not in e or e['summary'] is None lacks_content = 'content' not in e or not bool(e.get('content', None)) if not lacks_summary and lacks_content: e['content'] = [{'value': e.summary}] # feedparser sometimes copies the first content value into the # summary field when summary was completely missing; we want # to do that as well, but avoid the case where summary was given as '' if lacks_summary and not lacks_content: e['summary'] = e['content'][0]['value'] if e.get('summary', False) is None: e['summary'] = u'' # support feed entries that have a guid but no link if 'guid' in e and 'link' not in e: e['link'] = full_href(e['guid'], self.baseurl) return e
[ "def", "parse_entry", "(", "self", ",", "entry", ")", ":", "e", "=", "feedparser", ".", "FeedParserDict", "(", ")", "tag_map", "=", "self", ".", "tag_map", "nslookup", "=", "self", ".", "nslookup", "for", "child", "in", "entry", ".", "getchildren", "(", ")", ":", "if", "isinstance", "(", "child", ",", "etree", ".", "_Comment", ")", ":", "continue", "ns", ",", "tag", "=", "clean_ns", "(", "child", ".", "tag", ")", "mapping", "=", "tag_map", ".", "get", "(", "tag", ",", "None", ")", "if", "mapping", ":", "getattr", "(", "self", ",", "'parse_%s'", "%", "mapping", ")", "(", "child", ",", "e", ",", "nslookup", ".", "get", "(", "ns", ",", "ns", ")", ")", "if", "not", "ns", ":", "continue", "fulltag", "=", "'%s:%s'", "%", "(", "nslookup", ".", "get", "(", "ns", ",", "''", ")", ",", "tag", ")", "mapping", "=", "tag_map", ".", "get", "(", "fulltag", ",", "None", ")", "if", "mapping", ":", "getattr", "(", "self", ",", "'parse_%s'", "%", "mapping", ")", "(", "child", ",", "e", ",", "nslookup", "[", "ns", "]", ")", "lacks_summary", "=", "'summary'", "not", "in", "e", "or", "e", "[", "'summary'", "]", "is", "None", "lacks_content", "=", "'content'", "not", "in", "e", "or", "not", "bool", "(", "e", ".", "get", "(", "'content'", ",", "None", ")", ")", "if", "not", "lacks_summary", "and", "lacks_content", ":", "e", "[", "'content'", "]", "=", "[", "{", "'value'", ":", "e", ".", "summary", "}", "]", "# feedparser sometimes copies the first content value into the", "# summary field when summary was completely missing; we want", "# to do that as well, but avoid the case where summary was given as ''", "if", "lacks_summary", "and", "not", "lacks_content", ":", "e", "[", "'summary'", "]", "=", "e", "[", "'content'", "]", "[", "0", "]", "[", "'value'", "]", "if", "e", ".", "get", "(", "'summary'", ",", "False", ")", "is", "None", ":", "e", "[", "'summary'", "]", "=", "u''", "# support feed entries that have a guid but no link", "if", "'guid'", "in", "e", "and", "'link'", "not", "in", "e", ":", "e", "[", "'link'", "]", "=", "full_href", "(", "e", "[", "'guid'", "]", ",", "self", ".", "baseurl", ")", "return", "e" ]
An attempt to parse pieces of an entry out w/o xpath, by looping over the entry root's children and slotting them into the right places. This is going to be way messier than SpeedParserEntries, and maybe less cleanly usable, but it should be faster.
[ "An", "attempt", "to", "parse", "pieces", "of", "an", "entry", "out", "w", "/", "o", "xpath", "by", "looping", "over", "the", "entry", "root", "s", "children", "and", "slotting", "them", "into", "the", "right", "places", ".", "This", "is", "going", "to", "be", "way", "messier", "than", "SpeedParserEntries", "and", "maybe", "less", "cleanly", "usable", "but", "it", "should", "be", "faster", "." ]
python
train
pydsigner/pygu
pygu/pygw.py
https://github.com/pydsigner/pygu/blob/09fe71534900933908ab83db12f5659b7827e31c/pygu/pygw.py#L550-L559
def copy(self): ''' Copy the text in the Entry() and place it on the clipboard. ''' try: pygame.scrap.put(SCRAP_TEXT, self.get()) return True except: # pygame.scrap is experimental, allow for changes return False
[ "def", "copy", "(", "self", ")", ":", "try", ":", "pygame", ".", "scrap", ".", "put", "(", "SCRAP_TEXT", ",", "self", ".", "get", "(", ")", ")", "return", "True", "except", ":", "# pygame.scrap is experimental, allow for changes", "return", "False" ]
Copy the text in the Entry() and place it on the clipboard.
[ "Copy", "the", "text", "in", "the", "Entry", "()", "and", "place", "it", "on", "the", "clipboard", "." ]
python
train
rbuffat/pyepw
pyepw/epw.py
https://github.com/rbuffat/pyepw/blob/373d4d3c8386c8d35789f086ac5f6018c2711745/pyepw/epw.py#L5100-L5132
def read(self, vals): """Read values. Args: vals (list): list of strings representing values """ i = 0 if len(vals[i]) == 0: self.number_of_records_per_hour = None else: self.number_of_records_per_hour = vals[i] i += 1 if len(vals[i]) == 0: self.data_period_name_or_description = None else: self.data_period_name_or_description = vals[i] i += 1 if len(vals[i]) == 0: self.data_period_start_day_of_week = None else: self.data_period_start_day_of_week = vals[i] i += 1 if len(vals[i]) == 0: self.data_period_start_day = None else: self.data_period_start_day = vals[i] i += 1 if len(vals[i]) == 0: self.data_period_end_day = None else: self.data_period_end_day = vals[i] i += 1
[ "def", "read", "(", "self", ",", "vals", ")", ":", "i", "=", "0", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "number_of_records_per_hour", "=", "None", "else", ":", "self", ".", "number_of_records_per_hour", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "data_period_name_or_description", "=", "None", "else", ":", "self", ".", "data_period_name_or_description", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "data_period_start_day_of_week", "=", "None", "else", ":", "self", ".", "data_period_start_day_of_week", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "data_period_start_day", "=", "None", "else", ":", "self", ".", "data_period_start_day", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "data_period_end_day", "=", "None", "else", ":", "self", ".", "data_period_end_day", "=", "vals", "[", "i", "]", "i", "+=", "1" ]
Read values. Args: vals (list): list of strings representing values
[ "Read", "values", "." ]
python
train
Roastero/freshroastsr700
freshroastsr700/pid.py
https://github.com/Roastero/freshroastsr700/blob/49cf4961444c0f56d051d5ac5088ace480b54f02/freshroastsr700/pid.py#L61-L65
def setPoint(self, targetTemp): """Initilize the setpoint of PID.""" self.targetTemp = targetTemp self.Integrator = 0 self.Derivator = 0
[ "def", "setPoint", "(", "self", ",", "targetTemp", ")", ":", "self", ".", "targetTemp", "=", "targetTemp", "self", ".", "Integrator", "=", "0", "self", ".", "Derivator", "=", "0" ]
Initilize the setpoint of PID.
[ "Initilize", "the", "setpoint", "of", "PID", "." ]
python
train
openstack/proliantutils
proliantutils/hpssa/manager.py
https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/hpssa/manager.py#L362-L402
def erase_devices(): """Erase all the drives on this server. This method performs sanitize erase on all the supported physical drives in this server. This erase cannot be performed on logical drives. :returns: a dictionary of controllers with drives and the erase status. :raises exception.HPSSAException, if none of the drives support sanitize erase. """ server = objects.Server() for controller in server.controllers: drives = [x for x in controller.unassigned_physical_drives if (x.get_physical_drive_dict().get('erase_status', '') == 'OK')] if drives: controller.erase_devices(drives) while not has_erase_completed(): time.sleep(300) server.refresh() status = {} for controller in server.controllers: drive_status = {x.id: x.erase_status for x in controller.unassigned_physical_drives} sanitize_supported = controller.properties.get( 'Sanitize Erase Supported', 'False') if sanitize_supported == 'False': msg = ("Drives overwritten with zeros because sanitize erase " "is not supported on the controller.") else: msg = ("Sanitize Erase performed on the disks attached to " "the controller.") drive_status.update({'Summary': msg}) status[controller.id] = drive_status return status
[ "def", "erase_devices", "(", ")", ":", "server", "=", "objects", ".", "Server", "(", ")", "for", "controller", "in", "server", ".", "controllers", ":", "drives", "=", "[", "x", "for", "x", "in", "controller", ".", "unassigned_physical_drives", "if", "(", "x", ".", "get_physical_drive_dict", "(", ")", ".", "get", "(", "'erase_status'", ",", "''", ")", "==", "'OK'", ")", "]", "if", "drives", ":", "controller", ".", "erase_devices", "(", "drives", ")", "while", "not", "has_erase_completed", "(", ")", ":", "time", ".", "sleep", "(", "300", ")", "server", ".", "refresh", "(", ")", "status", "=", "{", "}", "for", "controller", "in", "server", ".", "controllers", ":", "drive_status", "=", "{", "x", ".", "id", ":", "x", ".", "erase_status", "for", "x", "in", "controller", ".", "unassigned_physical_drives", "}", "sanitize_supported", "=", "controller", ".", "properties", ".", "get", "(", "'Sanitize Erase Supported'", ",", "'False'", ")", "if", "sanitize_supported", "==", "'False'", ":", "msg", "=", "(", "\"Drives overwritten with zeros because sanitize erase \"", "\"is not supported on the controller.\"", ")", "else", ":", "msg", "=", "(", "\"Sanitize Erase performed on the disks attached to \"", "\"the controller.\"", ")", "drive_status", ".", "update", "(", "{", "'Summary'", ":", "msg", "}", ")", "status", "[", "controller", ".", "id", "]", "=", "drive_status", "return", "status" ]
Erase all the drives on this server. This method performs sanitize erase on all the supported physical drives in this server. This erase cannot be performed on logical drives. :returns: a dictionary of controllers with drives and the erase status. :raises exception.HPSSAException, if none of the drives support sanitize erase.
[ "Erase", "all", "the", "drives", "on", "this", "server", "." ]
python
train
cherrypy/cheroot
cheroot/server.py
https://github.com/cherrypy/cheroot/blob/2af3b1798d66da697957480d3a8b4831a405770b/cheroot/server.py#L541-L582
def readline(self, size=None): """Read a single line from rfile buffer and return it. Args: size (int): minimum amount of data to read Returns: bytes: One line from rfile. """ data = EMPTY if size == 0: return data while True: if size and len(data) >= size: return data if not self.buffer: self._fetch() if not self.buffer: # EOF return data newline_pos = self.buffer.find(LF) if size: if newline_pos == -1: remaining = size - len(data) data += self.buffer[:remaining] self.buffer = self.buffer[remaining:] else: remaining = min(size - len(data), newline_pos) data += self.buffer[:remaining] self.buffer = self.buffer[remaining:] else: if newline_pos == -1: data += self.buffer self.buffer = EMPTY else: data += self.buffer[:newline_pos] self.buffer = self.buffer[newline_pos:]
[ "def", "readline", "(", "self", ",", "size", "=", "None", ")", ":", "data", "=", "EMPTY", "if", "size", "==", "0", ":", "return", "data", "while", "True", ":", "if", "size", "and", "len", "(", "data", ")", ">=", "size", ":", "return", "data", "if", "not", "self", ".", "buffer", ":", "self", ".", "_fetch", "(", ")", "if", "not", "self", ".", "buffer", ":", "# EOF", "return", "data", "newline_pos", "=", "self", ".", "buffer", ".", "find", "(", "LF", ")", "if", "size", ":", "if", "newline_pos", "==", "-", "1", ":", "remaining", "=", "size", "-", "len", "(", "data", ")", "data", "+=", "self", ".", "buffer", "[", ":", "remaining", "]", "self", ".", "buffer", "=", "self", ".", "buffer", "[", "remaining", ":", "]", "else", ":", "remaining", "=", "min", "(", "size", "-", "len", "(", "data", ")", ",", "newline_pos", ")", "data", "+=", "self", ".", "buffer", "[", ":", "remaining", "]", "self", ".", "buffer", "=", "self", ".", "buffer", "[", "remaining", ":", "]", "else", ":", "if", "newline_pos", "==", "-", "1", ":", "data", "+=", "self", ".", "buffer", "self", ".", "buffer", "=", "EMPTY", "else", ":", "data", "+=", "self", ".", "buffer", "[", ":", "newline_pos", "]", "self", ".", "buffer", "=", "self", ".", "buffer", "[", "newline_pos", ":", "]" ]
Read a single line from rfile buffer and return it. Args: size (int): minimum amount of data to read Returns: bytes: One line from rfile.
[ "Read", "a", "single", "line", "from", "rfile", "buffer", "and", "return", "it", "." ]
python
train
iotile/coretools
transport_plugins/awsiot/iotile_transport_awsiot/gateway_agent.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/transport_plugins/awsiot/iotile_transport_awsiot/gateway_agent.py#L631-L655
def _send_accum_trace(self, device_uuid): """Send whatever accumulated tracing data we have for the device.""" if device_uuid not in self._connections: self._logger.debug("Dropping trace data for device without an active connection, uuid=0x%X", device_uuid) return conn_data = self._connections[device_uuid] trace = conn_data['trace_accum'] if len(trace) > 0: slug = self._build_device_slug(device_uuid) tracing_topic = self.topics.prefix + 'devices/{}/data/tracing'.format(slug) data = {'type': 'notification', 'operation': 'trace'} data['trace'] = binascii.hexlify(trace) data['trace_origin'] = device_uuid self._logger.debug('Publishing trace: (topic=%s)', tracing_topic) self.client.publish(tracing_topic, data) conn_data['trace_scheduled'] = False conn_data['last_trace'] = monotonic() conn_data['trace_accum'] = bytes()
[ "def", "_send_accum_trace", "(", "self", ",", "device_uuid", ")", ":", "if", "device_uuid", "not", "in", "self", ".", "_connections", ":", "self", ".", "_logger", ".", "debug", "(", "\"Dropping trace data for device without an active connection, uuid=0x%X\"", ",", "device_uuid", ")", "return", "conn_data", "=", "self", ".", "_connections", "[", "device_uuid", "]", "trace", "=", "conn_data", "[", "'trace_accum'", "]", "if", "len", "(", "trace", ")", ">", "0", ":", "slug", "=", "self", ".", "_build_device_slug", "(", "device_uuid", ")", "tracing_topic", "=", "self", ".", "topics", ".", "prefix", "+", "'devices/{}/data/tracing'", ".", "format", "(", "slug", ")", "data", "=", "{", "'type'", ":", "'notification'", ",", "'operation'", ":", "'trace'", "}", "data", "[", "'trace'", "]", "=", "binascii", ".", "hexlify", "(", "trace", ")", "data", "[", "'trace_origin'", "]", "=", "device_uuid", "self", ".", "_logger", ".", "debug", "(", "'Publishing trace: (topic=%s)'", ",", "tracing_topic", ")", "self", ".", "client", ".", "publish", "(", "tracing_topic", ",", "data", ")", "conn_data", "[", "'trace_scheduled'", "]", "=", "False", "conn_data", "[", "'last_trace'", "]", "=", "monotonic", "(", ")", "conn_data", "[", "'trace_accum'", "]", "=", "bytes", "(", ")" ]
Send whatever accumulated tracing data we have for the device.
[ "Send", "whatever", "accumulated", "tracing", "data", "we", "have", "for", "the", "device", "." ]
python
train
fabioz/PyDev.Debugger
third_party/pep8/pycodestyle.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/third_party/pep8/pycodestyle.py#L1422-L1437
def normalize_paths(value, parent=os.curdir): """Parse a comma-separated list of paths. Return a list of absolute paths. """ if not value: return [] if isinstance(value, list): return value paths = [] for path in value.split(','): path = path.strip() if '/' in path: path = os.path.abspath(os.path.join(parent, path)) paths.append(path.rstrip('/')) return paths
[ "def", "normalize_paths", "(", "value", ",", "parent", "=", "os", ".", "curdir", ")", ":", "if", "not", "value", ":", "return", "[", "]", "if", "isinstance", "(", "value", ",", "list", ")", ":", "return", "value", "paths", "=", "[", "]", "for", "path", "in", "value", ".", "split", "(", "','", ")", ":", "path", "=", "path", ".", "strip", "(", ")", "if", "'/'", "in", "path", ":", "path", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "parent", ",", "path", ")", ")", "paths", ".", "append", "(", "path", ".", "rstrip", "(", "'/'", ")", ")", "return", "paths" ]
Parse a comma-separated list of paths. Return a list of absolute paths.
[ "Parse", "a", "comma", "-", "separated", "list", "of", "paths", "." ]
python
train
softlayer/softlayer-python
SoftLayer/utils.py
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/utils.py#L150-L163
def event_log_filter_between_date(start, end, utc): """betweenDate Query filter that SoftLayer_EventLog likes :param string start: lower bound date in mm/dd/yyyy format :param string end: upper bound date in mm/dd/yyyy format :param string utc: utc offset. Defaults to '+0000' """ return { 'operation': 'betweenDate', 'options': [ {'name': 'startDate', 'value': [format_event_log_date(start, utc)]}, {'name': 'endDate', 'value': [format_event_log_date(end, utc)]} ] }
[ "def", "event_log_filter_between_date", "(", "start", ",", "end", ",", "utc", ")", ":", "return", "{", "'operation'", ":", "'betweenDate'", ",", "'options'", ":", "[", "{", "'name'", ":", "'startDate'", ",", "'value'", ":", "[", "format_event_log_date", "(", "start", ",", "utc", ")", "]", "}", ",", "{", "'name'", ":", "'endDate'", ",", "'value'", ":", "[", "format_event_log_date", "(", "end", ",", "utc", ")", "]", "}", "]", "}" ]
betweenDate Query filter that SoftLayer_EventLog likes :param string start: lower bound date in mm/dd/yyyy format :param string end: upper bound date in mm/dd/yyyy format :param string utc: utc offset. Defaults to '+0000'
[ "betweenDate", "Query", "filter", "that", "SoftLayer_EventLog", "likes" ]
python
train
barryp/py-amqplib
amqplib/client_0_8/channel.py
https://github.com/barryp/py-amqplib/blob/2b3a47de34b4712c111d0a55d7ff109dffc2a7b2/amqplib/client_0_8/channel.py#L135-L195
def close(self, reply_code=0, reply_text='', method_sig=(0, 0)): """ request a channel close This method indicates that the sender wants to close the channel. This may be due to internal conditions (e.g. a forced shut-down) or due to an error handling a specific method, i.e. an exception. When a close is due to an exception, the sender provides the class and method id of the method which caused the exception. RULE: After sending this method any received method except Channel.Close-OK MUST be discarded. RULE: The peer sending this method MAY use a counter or timeout to detect failure of the other peer to respond correctly with Channel.Close-OK.. PARAMETERS: reply_code: short The reply code. The AMQ reply codes are defined in AMQ RFC 011. reply_text: shortstr The localised reply text. This text can be logged as an aid to resolving issues. class_id: short failing method class When the close is provoked by a method exception, this is the class of the method. method_id: short failing method ID When the close is provoked by a method exception, this is the ID of the method. """ if not self.is_open: # already closed return args = AMQPWriter() args.write_short(reply_code) args.write_shortstr(reply_text) args.write_short(method_sig[0]) # class_id args.write_short(method_sig[1]) # method_id self._send_method((20, 40), args) return self.wait(allowed_methods=[ (20, 41), # Channel.close_ok ])
[ "def", "close", "(", "self", ",", "reply_code", "=", "0", ",", "reply_text", "=", "''", ",", "method_sig", "=", "(", "0", ",", "0", ")", ")", ":", "if", "not", "self", ".", "is_open", ":", "# already closed", "return", "args", "=", "AMQPWriter", "(", ")", "args", ".", "write_short", "(", "reply_code", ")", "args", ".", "write_shortstr", "(", "reply_text", ")", "args", ".", "write_short", "(", "method_sig", "[", "0", "]", ")", "# class_id", "args", ".", "write_short", "(", "method_sig", "[", "1", "]", ")", "# method_id", "self", ".", "_send_method", "(", "(", "20", ",", "40", ")", ",", "args", ")", "return", "self", ".", "wait", "(", "allowed_methods", "=", "[", "(", "20", ",", "41", ")", ",", "# Channel.close_ok", "]", ")" ]
request a channel close This method indicates that the sender wants to close the channel. This may be due to internal conditions (e.g. a forced shut-down) or due to an error handling a specific method, i.e. an exception. When a close is due to an exception, the sender provides the class and method id of the method which caused the exception. RULE: After sending this method any received method except Channel.Close-OK MUST be discarded. RULE: The peer sending this method MAY use a counter or timeout to detect failure of the other peer to respond correctly with Channel.Close-OK.. PARAMETERS: reply_code: short The reply code. The AMQ reply codes are defined in AMQ RFC 011. reply_text: shortstr The localised reply text. This text can be logged as an aid to resolving issues. class_id: short failing method class When the close is provoked by a method exception, this is the class of the method. method_id: short failing method ID When the close is provoked by a method exception, this is the ID of the method.
[ "request", "a", "channel", "close" ]
python
train
gregreen/dustmaps
dustmaps/fetch_utils.py
https://github.com/gregreen/dustmaps/blob/c8f571a71da0d951bf8ea865621bee14492bdfd9/dustmaps/fetch_utils.py#L94-L150
def h5_file_exists(fname, size_guess=None, rtol=0.1, atol=1., dsets={}): """ Returns ``True`` if an HDF5 file exists, has the expected file size, and contains (at least) the given datasets, with the correct shapes. Args: fname (str): Filename to check. size_guess (Optional[int]): Expected size (in Bytes) of the file. If ``None`` (the default), then filesize is not checked. rtol (Optional[float]): Relative tolerance for filesize. atol (Optional[float]): Absolute tolerance (in Bytes) for filesize. dsets (Optional[dict]): Dictionary specifying expected datasets. Each key is the name of a dataset, while each value is the expected shape of the dataset. Defaults to ``{}``, meaning that no datasets are checked. Returns: ``True`` if the file matches by all given criteria. """ # Check if the file exists if not os.path.isfile(fname): # print('File does not exist.') return False # Check file size, withe the given tolerances if size_guess is not None: size = os.path.getsize(fname) tol = atol + rtol * size_guess if abs(size - size_guess) > tol: # print('File size is wrong:') # print(' expected: {: >16d}'.format(size_guess)) # print(' found: {: >16d}'.format(size)) return False # Check the datasets in the file if len(dsets): import h5py try: with h5py.File(fname, 'r') as f: for key in dsets: # Check that dataset is in file if key not in f: # print('Dataset "{}" not in file.'.format(key)) return False # Check that the shape of the dataset is correct if dsets[key] is not None: if f[key].shape != dsets[key]: # print('Dataset "{}" has wrong shape:'.format(key)) # print(' expected: {}'.format(dsets[key])) # print(' found: {}'.format(f[key].shape)) return False except IOError: # print('Problem reading file.') return False return True
[ "def", "h5_file_exists", "(", "fname", ",", "size_guess", "=", "None", ",", "rtol", "=", "0.1", ",", "atol", "=", "1.", ",", "dsets", "=", "{", "}", ")", ":", "# Check if the file exists", "if", "not", "os", ".", "path", ".", "isfile", "(", "fname", ")", ":", "# print('File does not exist.')", "return", "False", "# Check file size, withe the given tolerances", "if", "size_guess", "is", "not", "None", ":", "size", "=", "os", ".", "path", ".", "getsize", "(", "fname", ")", "tol", "=", "atol", "+", "rtol", "*", "size_guess", "if", "abs", "(", "size", "-", "size_guess", ")", ">", "tol", ":", "# print('File size is wrong:')", "# print(' expected: {: >16d}'.format(size_guess))", "# print(' found: {: >16d}'.format(size))", "return", "False", "# Check the datasets in the file", "if", "len", "(", "dsets", ")", ":", "import", "h5py", "try", ":", "with", "h5py", ".", "File", "(", "fname", ",", "'r'", ")", "as", "f", ":", "for", "key", "in", "dsets", ":", "# Check that dataset is in file", "if", "key", "not", "in", "f", ":", "# print('Dataset \"{}\" not in file.'.format(key))", "return", "False", "# Check that the shape of the dataset is correct", "if", "dsets", "[", "key", "]", "is", "not", "None", ":", "if", "f", "[", "key", "]", ".", "shape", "!=", "dsets", "[", "key", "]", ":", "# print('Dataset \"{}\" has wrong shape:'.format(key))", "# print(' expected: {}'.format(dsets[key]))", "# print(' found: {}'.format(f[key].shape))", "return", "False", "except", "IOError", ":", "# print('Problem reading file.')", "return", "False", "return", "True" ]
Returns ``True`` if an HDF5 file exists, has the expected file size, and contains (at least) the given datasets, with the correct shapes. Args: fname (str): Filename to check. size_guess (Optional[int]): Expected size (in Bytes) of the file. If ``None`` (the default), then filesize is not checked. rtol (Optional[float]): Relative tolerance for filesize. atol (Optional[float]): Absolute tolerance (in Bytes) for filesize. dsets (Optional[dict]): Dictionary specifying expected datasets. Each key is the name of a dataset, while each value is the expected shape of the dataset. Defaults to ``{}``, meaning that no datasets are checked. Returns: ``True`` if the file matches by all given criteria.
[ "Returns", "True", "if", "an", "HDF5", "file", "exists", "has", "the", "expected", "file", "size", "and", "contains", "(", "at", "least", ")", "the", "given", "datasets", "with", "the", "correct", "shapes", "." ]
python
train
gem/oq-engine
openquake/commands/purge.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/commands/purge.py#L42-L56
def purge_all(user=None, fast=False): """ Remove all calculations of the given user """ user = user or getpass.getuser() if os.path.exists(datadir): if fast: shutil.rmtree(datadir) print('Removed %s' % datadir) else: for fname in os.listdir(datadir): mo = re.match('calc_(\d+)\.hdf5', fname) if mo is not None: calc_id = int(mo.group(1)) purge_one(calc_id, user)
[ "def", "purge_all", "(", "user", "=", "None", ",", "fast", "=", "False", ")", ":", "user", "=", "user", "or", "getpass", ".", "getuser", "(", ")", "if", "os", ".", "path", ".", "exists", "(", "datadir", ")", ":", "if", "fast", ":", "shutil", ".", "rmtree", "(", "datadir", ")", "print", "(", "'Removed %s'", "%", "datadir", ")", "else", ":", "for", "fname", "in", "os", ".", "listdir", "(", "datadir", ")", ":", "mo", "=", "re", ".", "match", "(", "'calc_(\\d+)\\.hdf5'", ",", "fname", ")", "if", "mo", "is", "not", "None", ":", "calc_id", "=", "int", "(", "mo", ".", "group", "(", "1", ")", ")", "purge_one", "(", "calc_id", ",", "user", ")" ]
Remove all calculations of the given user
[ "Remove", "all", "calculations", "of", "the", "given", "user" ]
python
train
ic-labs/django-icekit
icekit/publishing/models.py
https://github.com/ic-labs/django-icekit/blob/c507ea5b1864303732c53ad7c5800571fca5fa94/icekit/publishing/models.py#L740-L768
def handle_publishable_m2m_changed( sender, instance, action, reverse, model, pk_set, **kwargs): """ Cache related published objects in `pre_clear` so they can be restored in `post_clear`. """ # Do nothing if the target model is not publishable. if not issubclass(model, PublishingModel): return # Get the right `ManyRelatedManager`. Iterate M2Ms and compare `sender` # (the through model), in case there are multiple M2Ms to the same model. if reverse: for rel_obj in instance._meta.get_all_related_many_to_many_objects(): if rel_obj.field.rel.through == sender: m2m = getattr(instance, rel_obj.get_accessor_name()) break else: for field in instance._meta.many_to_many: if field.rel.through == sender: m2m = getattr(instance, field.attname) break # Cache published PKs on the instance. if action == 'pre_clear': instance._published_m2m_cache = set( m2m.filter(publishing_is_draft=False).values_list('pk', flat=True)) # Add published PKs from the cache. if action == 'post_clear': m2m.add(*instance._published_m2m_cache) del instance._published_m2m_cache
[ "def", "handle_publishable_m2m_changed", "(", "sender", ",", "instance", ",", "action", ",", "reverse", ",", "model", ",", "pk_set", ",", "*", "*", "kwargs", ")", ":", "# Do nothing if the target model is not publishable.", "if", "not", "issubclass", "(", "model", ",", "PublishingModel", ")", ":", "return", "# Get the right `ManyRelatedManager`. Iterate M2Ms and compare `sender`", "# (the through model), in case there are multiple M2Ms to the same model.", "if", "reverse", ":", "for", "rel_obj", "in", "instance", ".", "_meta", ".", "get_all_related_many_to_many_objects", "(", ")", ":", "if", "rel_obj", ".", "field", ".", "rel", ".", "through", "==", "sender", ":", "m2m", "=", "getattr", "(", "instance", ",", "rel_obj", ".", "get_accessor_name", "(", ")", ")", "break", "else", ":", "for", "field", "in", "instance", ".", "_meta", ".", "many_to_many", ":", "if", "field", ".", "rel", ".", "through", "==", "sender", ":", "m2m", "=", "getattr", "(", "instance", ",", "field", ".", "attname", ")", "break", "# Cache published PKs on the instance.", "if", "action", "==", "'pre_clear'", ":", "instance", ".", "_published_m2m_cache", "=", "set", "(", "m2m", ".", "filter", "(", "publishing_is_draft", "=", "False", ")", ".", "values_list", "(", "'pk'", ",", "flat", "=", "True", ")", ")", "# Add published PKs from the cache.", "if", "action", "==", "'post_clear'", ":", "m2m", ".", "add", "(", "*", "instance", ".", "_published_m2m_cache", ")", "del", "instance", ".", "_published_m2m_cache" ]
Cache related published objects in `pre_clear` so they can be restored in `post_clear`.
[ "Cache", "related", "published", "objects", "in", "pre_clear", "so", "they", "can", "be", "restored", "in", "post_clear", "." ]
python
train
mila-iqia/fuel
fuel/utils/lock.py
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/utils/lock.py#L392-L403
def release_readlock(lockdir_name): """Release a previously obtained readlock. Parameters ---------- lockdir_name : str Name of the previously obtained readlock """ # Make sure the lock still exists before deleting it if os.path.exists(lockdir_name) and os.path.isdir(lockdir_name): os.rmdir(lockdir_name)
[ "def", "release_readlock", "(", "lockdir_name", ")", ":", "# Make sure the lock still exists before deleting it", "if", "os", ".", "path", ".", "exists", "(", "lockdir_name", ")", "and", "os", ".", "path", ".", "isdir", "(", "lockdir_name", ")", ":", "os", ".", "rmdir", "(", "lockdir_name", ")" ]
Release a previously obtained readlock. Parameters ---------- lockdir_name : str Name of the previously obtained readlock
[ "Release", "a", "previously", "obtained", "readlock", "." ]
python
train
saltstack/salt
salt/log/setup.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/log/setup.py#L1060-L1066
def set_logger_level(logger_name, log_level='error'): ''' Tweak a specific logger's logging level ''' logging.getLogger(logger_name).setLevel( LOG_LEVELS.get(log_level.lower(), logging.ERROR) )
[ "def", "set_logger_level", "(", "logger_name", ",", "log_level", "=", "'error'", ")", ":", "logging", ".", "getLogger", "(", "logger_name", ")", ".", "setLevel", "(", "LOG_LEVELS", ".", "get", "(", "log_level", ".", "lower", "(", ")", ",", "logging", ".", "ERROR", ")", ")" ]
Tweak a specific logger's logging level
[ "Tweak", "a", "specific", "logger", "s", "logging", "level" ]
python
train
jantman/pypi-download-stats
pypi_download_stats/projectstats.py
https://github.com/jantman/pypi-download-stats/blob/44a7a6bbcd61a9e7f02bd02c52584a98183f80c5/pypi_download_stats/projectstats.py#L266-L287
def per_implementation_data(self): """ Return download data by python impelementation name and version. :return: dict of cache data; keys are datetime objects, values are dict of implementation name/version (str) to count (int). :rtype: dict """ ret = {} for cache_date in self.cache_dates: data = self._cache_get(cache_date) ret[cache_date] = {} for impl_name, impl_data in data['by_implementation'].items(): for impl_ver, count in impl_data.items(): k = self._compound_column_value( impl_name, self._shorten_version(impl_ver) ) ret[cache_date][k] = count if len(ret[cache_date]) == 0: ret[cache_date]['unknown'] = 0 return ret
[ "def", "per_implementation_data", "(", "self", ")", ":", "ret", "=", "{", "}", "for", "cache_date", "in", "self", ".", "cache_dates", ":", "data", "=", "self", ".", "_cache_get", "(", "cache_date", ")", "ret", "[", "cache_date", "]", "=", "{", "}", "for", "impl_name", ",", "impl_data", "in", "data", "[", "'by_implementation'", "]", ".", "items", "(", ")", ":", "for", "impl_ver", ",", "count", "in", "impl_data", ".", "items", "(", ")", ":", "k", "=", "self", ".", "_compound_column_value", "(", "impl_name", ",", "self", ".", "_shorten_version", "(", "impl_ver", ")", ")", "ret", "[", "cache_date", "]", "[", "k", "]", "=", "count", "if", "len", "(", "ret", "[", "cache_date", "]", ")", "==", "0", ":", "ret", "[", "cache_date", "]", "[", "'unknown'", "]", "=", "0", "return", "ret" ]
Return download data by python impelementation name and version. :return: dict of cache data; keys are datetime objects, values are dict of implementation name/version (str) to count (int). :rtype: dict
[ "Return", "download", "data", "by", "python", "impelementation", "name", "and", "version", "." ]
python
train
CI-WATER/gsshapy
gsshapy/orm/prj.py
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/prj.py#L1642-L1669
def _writeXput(self, session, directory, fileCards, name=None, replaceParamFile=None): """ GSSHA Project Write Files to File Method """ for card in self.projectCards: if (card.name in fileCards) and self._noneOrNumValue(card.value) \ and fileCards[card.name]: fileIO = fileCards[card.name] filename = card.value.strip('"') # Check for replacement variables if '[' in filename or ']' in filename: log.info('The file for project card {0} cannot be ' 'written, because the path has been replaced ' 'with replacement variable {1}.'.format(card.name, filename)) return # Determine new filename filename = self._replaceNewFilename(filename=filename, name=name) # Invoke write method on each file self._invokeWrite(fileIO=fileIO, session=session, directory=directory, filename=filename, replaceParamFile=replaceParamFile)
[ "def", "_writeXput", "(", "self", ",", "session", ",", "directory", ",", "fileCards", ",", "name", "=", "None", ",", "replaceParamFile", "=", "None", ")", ":", "for", "card", "in", "self", ".", "projectCards", ":", "if", "(", "card", ".", "name", "in", "fileCards", ")", "and", "self", ".", "_noneOrNumValue", "(", "card", ".", "value", ")", "and", "fileCards", "[", "card", ".", "name", "]", ":", "fileIO", "=", "fileCards", "[", "card", ".", "name", "]", "filename", "=", "card", ".", "value", ".", "strip", "(", "'\"'", ")", "# Check for replacement variables", "if", "'['", "in", "filename", "or", "']'", "in", "filename", ":", "log", ".", "info", "(", "'The file for project card {0} cannot be '", "'written, because the path has been replaced '", "'with replacement variable {1}.'", ".", "format", "(", "card", ".", "name", ",", "filename", ")", ")", "return", "# Determine new filename", "filename", "=", "self", ".", "_replaceNewFilename", "(", "filename", "=", "filename", ",", "name", "=", "name", ")", "# Invoke write method on each file", "self", ".", "_invokeWrite", "(", "fileIO", "=", "fileIO", ",", "session", "=", "session", ",", "directory", "=", "directory", ",", "filename", "=", "filename", ",", "replaceParamFile", "=", "replaceParamFile", ")" ]
GSSHA Project Write Files to File Method
[ "GSSHA", "Project", "Write", "Files", "to", "File", "Method" ]
python
train
LonamiWebs/Telethon
telethon/tl/custom/draft.py
https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon/tl/custom/draft.py#L117-L163
async def set_message( self, text=None, reply_to=0, parse_mode=(), link_preview=None): """ Changes the draft message on the Telegram servers. The changes are reflected in this object. :param str text: New text of the draft. Preserved if left as None. :param int reply_to: Message ID to reply to. Preserved if left as 0, erased if set to None. :param bool link_preview: Whether to attach a web page preview. Preserved if left as None. :param str parse_mode: The parse mode to be used for the text. :return bool: ``True`` on success. """ if text is None: text = self._text if reply_to == 0: reply_to = self.reply_to_msg_id if link_preview is None: link_preview = self.link_preview raw_text, entities =\ await self._client._parse_message_text(text, parse_mode) result = await self._client(SaveDraftRequest( peer=self._peer, message=raw_text, no_webpage=not link_preview, reply_to_msg_id=reply_to, entities=entities )) if result: self._text = text self._raw_text = raw_text self.link_preview = link_preview self.reply_to_msg_id = reply_to self.date = datetime.datetime.now(tz=datetime.timezone.utc) return result
[ "async", "def", "set_message", "(", "self", ",", "text", "=", "None", ",", "reply_to", "=", "0", ",", "parse_mode", "=", "(", ")", ",", "link_preview", "=", "None", ")", ":", "if", "text", "is", "None", ":", "text", "=", "self", ".", "_text", "if", "reply_to", "==", "0", ":", "reply_to", "=", "self", ".", "reply_to_msg_id", "if", "link_preview", "is", "None", ":", "link_preview", "=", "self", ".", "link_preview", "raw_text", ",", "entities", "=", "await", "self", ".", "_client", ".", "_parse_message_text", "(", "text", ",", "parse_mode", ")", "result", "=", "await", "self", ".", "_client", "(", "SaveDraftRequest", "(", "peer", "=", "self", ".", "_peer", ",", "message", "=", "raw_text", ",", "no_webpage", "=", "not", "link_preview", ",", "reply_to_msg_id", "=", "reply_to", ",", "entities", "=", "entities", ")", ")", "if", "result", ":", "self", ".", "_text", "=", "text", "self", ".", "_raw_text", "=", "raw_text", "self", ".", "link_preview", "=", "link_preview", "self", ".", "reply_to_msg_id", "=", "reply_to", "self", ".", "date", "=", "datetime", ".", "datetime", ".", "now", "(", "tz", "=", "datetime", ".", "timezone", ".", "utc", ")", "return", "result" ]
Changes the draft message on the Telegram servers. The changes are reflected in this object. :param str text: New text of the draft. Preserved if left as None. :param int reply_to: Message ID to reply to. Preserved if left as 0, erased if set to None. :param bool link_preview: Whether to attach a web page preview. Preserved if left as None. :param str parse_mode: The parse mode to be used for the text. :return bool: ``True`` on success.
[ "Changes", "the", "draft", "message", "on", "the", "Telegram", "servers", ".", "The", "changes", "are", "reflected", "in", "this", "object", "." ]
python
train
kobinpy/kobin
kobin/routes.py
https://github.com/kobinpy/kobin/blob/e6caff5af05db8a6e511d3de275d262466ab36a6/kobin/routes.py#L137-L178
def match(self, path, method): """ Get callback and url_vars. >>> from kobin import Response >>> r = Router() >>> def view(user_id: int) -> Response: ... return Response(f'You are {user_id}') ... >>> r.add('/users/{user_id}', 'GET', 'user-detail', view) >>> callback, url_vars = r.match('/users/1', 'GET') >>> url_vars {'user_id': 1} >>> response = callback(**url_vars) >>> response.body [b'You are 1'] >>> callback, url_vars = r.match('/notfound', 'GET') Traceback (most recent call last): ... kobin.responses.HTTPError """ if path != '/': path = path.rstrip('/') method = method.upper() status = 404 for p, n, m in self.endpoints: matched, url_vars = match_path(p, path) if not matched: # path: not matched continue if method not in m: # path: matched, method: not matched status = 405 raise HTTPError(status=status, body=f'Method not found: {path} {method}') # it has security issue?? callback, type_hints = m[method] type_matched, typed_url_vars = match_url_vars_type(url_vars, type_hints) if not type_matched: continue # path: not matched (types are different) return callback, typed_url_vars raise HTTPError(status=status, body=f'Not found: {path}')
[ "def", "match", "(", "self", ",", "path", ",", "method", ")", ":", "if", "path", "!=", "'/'", ":", "path", "=", "path", ".", "rstrip", "(", "'/'", ")", "method", "=", "method", ".", "upper", "(", ")", "status", "=", "404", "for", "p", ",", "n", ",", "m", "in", "self", ".", "endpoints", ":", "matched", ",", "url_vars", "=", "match_path", "(", "p", ",", "path", ")", "if", "not", "matched", ":", "# path: not matched", "continue", "if", "method", "not", "in", "m", ":", "# path: matched, method: not matched", "status", "=", "405", "raise", "HTTPError", "(", "status", "=", "status", ",", "body", "=", "f'Method not found: {path} {method}'", ")", "# it has security issue??", "callback", ",", "type_hints", "=", "m", "[", "method", "]", "type_matched", ",", "typed_url_vars", "=", "match_url_vars_type", "(", "url_vars", ",", "type_hints", ")", "if", "not", "type_matched", ":", "continue", "# path: not matched (types are different)", "return", "callback", ",", "typed_url_vars", "raise", "HTTPError", "(", "status", "=", "status", ",", "body", "=", "f'Not found: {path}'", ")" ]
Get callback and url_vars. >>> from kobin import Response >>> r = Router() >>> def view(user_id: int) -> Response: ... return Response(f'You are {user_id}') ... >>> r.add('/users/{user_id}', 'GET', 'user-detail', view) >>> callback, url_vars = r.match('/users/1', 'GET') >>> url_vars {'user_id': 1} >>> response = callback(**url_vars) >>> response.body [b'You are 1'] >>> callback, url_vars = r.match('/notfound', 'GET') Traceback (most recent call last): ... kobin.responses.HTTPError
[ "Get", "callback", "and", "url_vars", "." ]
python
train
vberlier/nbtlib
nbtlib/literal/serializer.py
https://github.com/vberlier/nbtlib/blob/9c9d58b5c4a530b0f1ffd76dda176f00406c3547/nbtlib/literal/serializer.py#L83-L92
def should_expand(self, tag): """Return whether the specified tag should be expanded.""" return self.indentation is not None and tag and ( not self.previous_indent or ( tag.serializer == 'list' and tag.subtype.serializer in ('array', 'list', 'compound') ) or ( tag.serializer == 'compound' ) )
[ "def", "should_expand", "(", "self", ",", "tag", ")", ":", "return", "self", ".", "indentation", "is", "not", "None", "and", "tag", "and", "(", "not", "self", ".", "previous_indent", "or", "(", "tag", ".", "serializer", "==", "'list'", "and", "tag", ".", "subtype", ".", "serializer", "in", "(", "'array'", ",", "'list'", ",", "'compound'", ")", ")", "or", "(", "tag", ".", "serializer", "==", "'compound'", ")", ")" ]
Return whether the specified tag should be expanded.
[ "Return", "whether", "the", "specified", "tag", "should", "be", "expanded", "." ]
python
train
dylanaraps/pywal
pywal/theme.py
https://github.com/dylanaraps/pywal/blob/c823e3c9dbd0100ca09caf824e77d296685a1c1e/pywal/theme.py#L91-L124
def file(input_file, light=False): """Import colorscheme from json file.""" util.create_dir(os.path.join(CONF_DIR, "colorschemes/light/")) util.create_dir(os.path.join(CONF_DIR, "colorschemes/dark/")) theme_name = ".".join((input_file, "json")) bri = "light" if light else "dark" user_theme_file = os.path.join(CONF_DIR, "colorschemes", bri, theme_name) theme_file = os.path.join(MODULE_DIR, "colorschemes", bri, theme_name) # Find the theme file. if input_file in ("random", "random_dark"): theme_file = get_random_theme() elif input_file == "random_light": theme_file = get_random_theme(light) elif os.path.isfile(user_theme_file): theme_file = user_theme_file elif os.path.isfile(input_file): theme_file = input_file # Parse the theme file. if os.path.isfile(theme_file): logging.info("Set theme to \033[1;37m%s\033[0m.", os.path.basename(theme_file)) return parse(theme_file) logging.error("No %s colorscheme file found.", bri) logging.error("Try adding '-l' to set light themes.") logging.error("Try removing '-l' to set dark themes.") sys.exit(1)
[ "def", "file", "(", "input_file", ",", "light", "=", "False", ")", ":", "util", ".", "create_dir", "(", "os", ".", "path", ".", "join", "(", "CONF_DIR", ",", "\"colorschemes/light/\"", ")", ")", "util", ".", "create_dir", "(", "os", ".", "path", ".", "join", "(", "CONF_DIR", ",", "\"colorschemes/dark/\"", ")", ")", "theme_name", "=", "\".\"", ".", "join", "(", "(", "input_file", ",", "\"json\"", ")", ")", "bri", "=", "\"light\"", "if", "light", "else", "\"dark\"", "user_theme_file", "=", "os", ".", "path", ".", "join", "(", "CONF_DIR", ",", "\"colorschemes\"", ",", "bri", ",", "theme_name", ")", "theme_file", "=", "os", ".", "path", ".", "join", "(", "MODULE_DIR", ",", "\"colorschemes\"", ",", "bri", ",", "theme_name", ")", "# Find the theme file.", "if", "input_file", "in", "(", "\"random\"", ",", "\"random_dark\"", ")", ":", "theme_file", "=", "get_random_theme", "(", ")", "elif", "input_file", "==", "\"random_light\"", ":", "theme_file", "=", "get_random_theme", "(", "light", ")", "elif", "os", ".", "path", ".", "isfile", "(", "user_theme_file", ")", ":", "theme_file", "=", "user_theme_file", "elif", "os", ".", "path", ".", "isfile", "(", "input_file", ")", ":", "theme_file", "=", "input_file", "# Parse the theme file.", "if", "os", ".", "path", ".", "isfile", "(", "theme_file", ")", ":", "logging", ".", "info", "(", "\"Set theme to \\033[1;37m%s\\033[0m.\"", ",", "os", ".", "path", ".", "basename", "(", "theme_file", ")", ")", "return", "parse", "(", "theme_file", ")", "logging", ".", "error", "(", "\"No %s colorscheme file found.\"", ",", "bri", ")", "logging", ".", "error", "(", "\"Try adding '-l' to set light themes.\"", ")", "logging", ".", "error", "(", "\"Try removing '-l' to set dark themes.\"", ")", "sys", ".", "exit", "(", "1", ")" ]
Import colorscheme from json file.
[ "Import", "colorscheme", "from", "json", "file", "." ]
python
train
galaxyproject/pulsar
pulsar/web/wsgi.py
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/web/wsgi.py#L14-L20
def app_factory(global_conf, **local_conf): """ Returns the Pulsar WSGI application. """ configuration_file = global_conf.get("__file__", None) webapp = init_webapp(ini_path=configuration_file, local_conf=local_conf) return webapp
[ "def", "app_factory", "(", "global_conf", ",", "*", "*", "local_conf", ")", ":", "configuration_file", "=", "global_conf", ".", "get", "(", "\"__file__\"", ",", "None", ")", "webapp", "=", "init_webapp", "(", "ini_path", "=", "configuration_file", ",", "local_conf", "=", "local_conf", ")", "return", "webapp" ]
Returns the Pulsar WSGI application.
[ "Returns", "the", "Pulsar", "WSGI", "application", "." ]
python
train
kibitzr/kibitzr
kibitzr/storage.py
https://github.com/kibitzr/kibitzr/blob/749da312488f1dda1ed1093cf4c95aaac0a604f7/kibitzr/storage.py#L167-L181
def verbose(self): """Return changes in human-friendly format #14""" try: before = self.git.show('HEAD~1:content').strip() except sh.ErrorReturnCode_128: before = None after = self.git.show('HEAD:content').strip() if before is not None: return (u'{subject}\nNew value:\n{after}\n' u'Old value:\n{before}\n' .format(subject=self.subject, before=before, after=after)) else: return u'\n'.join([self.subject, after])
[ "def", "verbose", "(", "self", ")", ":", "try", ":", "before", "=", "self", ".", "git", ".", "show", "(", "'HEAD~1:content'", ")", ".", "strip", "(", ")", "except", "sh", ".", "ErrorReturnCode_128", ":", "before", "=", "None", "after", "=", "self", ".", "git", ".", "show", "(", "'HEAD:content'", ")", ".", "strip", "(", ")", "if", "before", "is", "not", "None", ":", "return", "(", "u'{subject}\\nNew value:\\n{after}\\n'", "u'Old value:\\n{before}\\n'", ".", "format", "(", "subject", "=", "self", ".", "subject", ",", "before", "=", "before", ",", "after", "=", "after", ")", ")", "else", ":", "return", "u'\\n'", ".", "join", "(", "[", "self", ".", "subject", ",", "after", "]", ")" ]
Return changes in human-friendly format #14
[ "Return", "changes", "in", "human", "-", "friendly", "format", "#14" ]
python
train
mapnik/Cascadenik
cascadenik/compile.py
https://github.com/mapnik/Cascadenik/blob/82f66859340a31dfcb24af127274f262d4f3ad85/cascadenik/compile.py#L587-L608
def is_merc_projection(srs): """ Return true if the map projection matches that used by VEarth, Google, OSM, etc. Is currently necessary for zoom-level shorthand for scale-denominator. """ if srs.lower() == '+init=epsg:900913': return True # observed srs = dict([p.split('=') for p in srs.split() if '=' in p]) # expected # note, common optional modifiers like +no_defs, +over, and +wkt # are not pairs and should not prevent matching gym = '+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null' gym = dict([p.split('=') for p in gym.split() if '=' in p]) for p in gym: if srs.get(p, None) != gym.get(p, None): return False return True
[ "def", "is_merc_projection", "(", "srs", ")", ":", "if", "srs", ".", "lower", "(", ")", "==", "'+init=epsg:900913'", ":", "return", "True", "# observed", "srs", "=", "dict", "(", "[", "p", ".", "split", "(", "'='", ")", "for", "p", "in", "srs", ".", "split", "(", ")", "if", "'='", "in", "p", "]", ")", "# expected", "# note, common optional modifiers like +no_defs, +over, and +wkt", "# are not pairs and should not prevent matching", "gym", "=", "'+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null'", "gym", "=", "dict", "(", "[", "p", ".", "split", "(", "'='", ")", "for", "p", "in", "gym", ".", "split", "(", ")", "if", "'='", "in", "p", "]", ")", "for", "p", "in", "gym", ":", "if", "srs", ".", "get", "(", "p", ",", "None", ")", "!=", "gym", ".", "get", "(", "p", ",", "None", ")", ":", "return", "False", "return", "True" ]
Return true if the map projection matches that used by VEarth, Google, OSM, etc. Is currently necessary for zoom-level shorthand for scale-denominator.
[ "Return", "true", "if", "the", "map", "projection", "matches", "that", "used", "by", "VEarth", "Google", "OSM", "etc", ".", "Is", "currently", "necessary", "for", "zoom", "-", "level", "shorthand", "for", "scale", "-", "denominator", "." ]
python
train
Alveo/pyalveo
pyalveo/pyalveo.py
https://github.com/Alveo/pyalveo/blob/1e9eec22bc031bc9a08066f9966565a546e6242e/pyalveo/pyalveo.py#L1395-L1412
def get_speakers(self, collection_name): """Get a list of speaker URLs for this collection :type collection_name: String :param collection_name: the name of the collection to search :rtype: List :returns: a list of URLs for the speakers associated with the given collection """ speakers_url = "/speakers/"+collection_name resp = self.api_request(speakers_url) if 'speakers' in resp: return resp['speakers'] else: return []
[ "def", "get_speakers", "(", "self", ",", "collection_name", ")", ":", "speakers_url", "=", "\"/speakers/\"", "+", "collection_name", "resp", "=", "self", ".", "api_request", "(", "speakers_url", ")", "if", "'speakers'", "in", "resp", ":", "return", "resp", "[", "'speakers'", "]", "else", ":", "return", "[", "]" ]
Get a list of speaker URLs for this collection :type collection_name: String :param collection_name: the name of the collection to search :rtype: List :returns: a list of URLs for the speakers associated with the given collection
[ "Get", "a", "list", "of", "speaker", "URLs", "for", "this", "collection" ]
python
train
Telefonica/toolium
toolium/config_driver.py
https://github.com/Telefonica/toolium/blob/56847c243b3a98876df74c184b75e43f8810e475/toolium/config_driver.py#L313-L332
def _create_chrome_options(self): """Create and configure a chrome options object :returns: chrome options object """ # Create Chrome options options = webdriver.ChromeOptions() if self.config.getboolean_optional('Driver', 'headless'): self.logger.debug("Running Chrome in headless mode") options.add_argument('--headless') if os.name == 'nt': # Temporarily needed if running on Windows. options.add_argument('--disable-gpu') # Add Chrome preferences, mobile emulation options and chrome arguments self._add_chrome_options(options, 'prefs') self._add_chrome_options(options, 'mobileEmulation') self._add_chrome_arguments(options) return options
[ "def", "_create_chrome_options", "(", "self", ")", ":", "# Create Chrome options", "options", "=", "webdriver", ".", "ChromeOptions", "(", ")", "if", "self", ".", "config", ".", "getboolean_optional", "(", "'Driver'", ",", "'headless'", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"Running Chrome in headless mode\"", ")", "options", ".", "add_argument", "(", "'--headless'", ")", "if", "os", ".", "name", "==", "'nt'", ":", "# Temporarily needed if running on Windows.", "options", ".", "add_argument", "(", "'--disable-gpu'", ")", "# Add Chrome preferences, mobile emulation options and chrome arguments", "self", ".", "_add_chrome_options", "(", "options", ",", "'prefs'", ")", "self", ".", "_add_chrome_options", "(", "options", ",", "'mobileEmulation'", ")", "self", ".", "_add_chrome_arguments", "(", "options", ")", "return", "options" ]
Create and configure a chrome options object :returns: chrome options object
[ "Create", "and", "configure", "a", "chrome", "options", "object" ]
python
train
maas/python-libmaas
maas/client/utils/__init__.py
https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/utils/__init__.py#L63-L79
def urlencode(data): """A version of `urllib.urlencode` that isn't insane. This only cares that `data` is an iterable of iterables. Each sub-iterable must be of overall length 2, i.e. a name/value pair. Unicode strings will be encoded to UTF-8. This is what Django expects; see `smart_text` in the Django documentation. """ def dec(string): if isinstance(string, bytes): string = string.decode("utf-8") return quote_plus(string) return "&".join( "%s=%s" % (dec(name), dec(value)) for name, value in data)
[ "def", "urlencode", "(", "data", ")", ":", "def", "dec", "(", "string", ")", ":", "if", "isinstance", "(", "string", ",", "bytes", ")", ":", "string", "=", "string", ".", "decode", "(", "\"utf-8\"", ")", "return", "quote_plus", "(", "string", ")", "return", "\"&\"", ".", "join", "(", "\"%s=%s\"", "%", "(", "dec", "(", "name", ")", ",", "dec", "(", "value", ")", ")", "for", "name", ",", "value", "in", "data", ")" ]
A version of `urllib.urlencode` that isn't insane. This only cares that `data` is an iterable of iterables. Each sub-iterable must be of overall length 2, i.e. a name/value pair. Unicode strings will be encoded to UTF-8. This is what Django expects; see `smart_text` in the Django documentation.
[ "A", "version", "of", "urllib", ".", "urlencode", "that", "isn", "t", "insane", "." ]
python
train
hellock/icrawler
icrawler/crawler.py
https://github.com/hellock/icrawler/blob/38c925758fd3d3e568d3ecc993f77bc0acfa4788/icrawler/crawler.py#L154-L204
def crawl(self, feeder_kwargs=None, parser_kwargs=None, downloader_kwargs=None): """Start crawling This method will start feeder, parser and download and wait until all threads exit. Args: feeder_kwargs (dict, optional): Arguments to be passed to ``feeder.start()`` parser_kwargs (dict, optional): Arguments to be passed to ``parser.start()`` downloader_kwargs (dict, optional): Arguments to be passed to ``downloader.start()`` """ self.signal.reset() self.logger.info('start crawling...') feeder_kwargs = {} if feeder_kwargs is None else feeder_kwargs parser_kwargs = {} if parser_kwargs is None else parser_kwargs downloader_kwargs = {} if downloader_kwargs is None else downloader_kwargs self.logger.info('starting %d feeder threads...', self.feeder.thread_num) self.feeder.start(**feeder_kwargs) self.logger.info('starting %d parser threads...', self.parser.thread_num) self.parser.start(**parser_kwargs) self.logger.info('starting %d downloader threads...', self.downloader.thread_num) self.downloader.start(**downloader_kwargs) while True: if not self.feeder.is_alive(): self.signal.set(feeder_exited=True) if not self.parser.is_alive(): self.signal.set(parser_exited=True) if not self.downloader.is_alive(): break time.sleep(1) if not self.feeder.in_queue.empty(): self.feeder.clear_buffer() if not self.parser.in_queue.empty(): self.parser.clear_buffer() if not self.downloader.in_queue.empty(): self.downloader.clear_buffer(True) self.logger.info('Crawling task done!')
[ "def", "crawl", "(", "self", ",", "feeder_kwargs", "=", "None", ",", "parser_kwargs", "=", "None", ",", "downloader_kwargs", "=", "None", ")", ":", "self", ".", "signal", ".", "reset", "(", ")", "self", ".", "logger", ".", "info", "(", "'start crawling...'", ")", "feeder_kwargs", "=", "{", "}", "if", "feeder_kwargs", "is", "None", "else", "feeder_kwargs", "parser_kwargs", "=", "{", "}", "if", "parser_kwargs", "is", "None", "else", "parser_kwargs", "downloader_kwargs", "=", "{", "}", "if", "downloader_kwargs", "is", "None", "else", "downloader_kwargs", "self", ".", "logger", ".", "info", "(", "'starting %d feeder threads...'", ",", "self", ".", "feeder", ".", "thread_num", ")", "self", ".", "feeder", ".", "start", "(", "*", "*", "feeder_kwargs", ")", "self", ".", "logger", ".", "info", "(", "'starting %d parser threads...'", ",", "self", ".", "parser", ".", "thread_num", ")", "self", ".", "parser", ".", "start", "(", "*", "*", "parser_kwargs", ")", "self", ".", "logger", ".", "info", "(", "'starting %d downloader threads...'", ",", "self", ".", "downloader", ".", "thread_num", ")", "self", ".", "downloader", ".", "start", "(", "*", "*", "downloader_kwargs", ")", "while", "True", ":", "if", "not", "self", ".", "feeder", ".", "is_alive", "(", ")", ":", "self", ".", "signal", ".", "set", "(", "feeder_exited", "=", "True", ")", "if", "not", "self", ".", "parser", ".", "is_alive", "(", ")", ":", "self", ".", "signal", ".", "set", "(", "parser_exited", "=", "True", ")", "if", "not", "self", ".", "downloader", ".", "is_alive", "(", ")", ":", "break", "time", ".", "sleep", "(", "1", ")", "if", "not", "self", ".", "feeder", ".", "in_queue", ".", "empty", "(", ")", ":", "self", ".", "feeder", ".", "clear_buffer", "(", ")", "if", "not", "self", ".", "parser", ".", "in_queue", ".", "empty", "(", ")", ":", "self", ".", "parser", ".", "clear_buffer", "(", ")", "if", "not", "self", ".", "downloader", ".", "in_queue", ".", "empty", "(", ")", ":", "self", ".", "downloader", ".", "clear_buffer", "(", "True", ")", "self", ".", "logger", ".", "info", "(", "'Crawling task done!'", ")" ]
Start crawling This method will start feeder, parser and download and wait until all threads exit. Args: feeder_kwargs (dict, optional): Arguments to be passed to ``feeder.start()`` parser_kwargs (dict, optional): Arguments to be passed to ``parser.start()`` downloader_kwargs (dict, optional): Arguments to be passed to ``downloader.start()``
[ "Start", "crawling" ]
python
train
wummel/linkchecker
third_party/dnspython/dns/name.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/third_party/dnspython/dns/name.py#L104-L127
def _validate_labels(labels): """Check for empty labels in the middle of a label sequence, labels that are too long, and for too many labels. @raises NameTooLong: the name as a whole is too long @raises LabelTooLong: an individual label is too long @raises EmptyLabel: a label is empty (i.e. the root label) and appears in a position other than the end of the label sequence""" l = len(labels) total = 0 i = -1 j = 0 for label in labels: ll = len(label) total += ll + 1 if ll > 63: raise LabelTooLong if i < 0 and label == '': i = j j += 1 if total > 255: raise NameTooLong if i >= 0 and i != l - 1: raise EmptyLabel
[ "def", "_validate_labels", "(", "labels", ")", ":", "l", "=", "len", "(", "labels", ")", "total", "=", "0", "i", "=", "-", "1", "j", "=", "0", "for", "label", "in", "labels", ":", "ll", "=", "len", "(", "label", ")", "total", "+=", "ll", "+", "1", "if", "ll", ">", "63", ":", "raise", "LabelTooLong", "if", "i", "<", "0", "and", "label", "==", "''", ":", "i", "=", "j", "j", "+=", "1", "if", "total", ">", "255", ":", "raise", "NameTooLong", "if", "i", ">=", "0", "and", "i", "!=", "l", "-", "1", ":", "raise", "EmptyLabel" ]
Check for empty labels in the middle of a label sequence, labels that are too long, and for too many labels. @raises NameTooLong: the name as a whole is too long @raises LabelTooLong: an individual label is too long @raises EmptyLabel: a label is empty (i.e. the root label) and appears in a position other than the end of the label sequence
[ "Check", "for", "empty", "labels", "in", "the", "middle", "of", "a", "label", "sequence", "labels", "that", "are", "too", "long", "and", "for", "too", "many", "labels", "." ]
python
train
numenta/htmresearch
htmresearch/frameworks/layers/combined_sequence_experiment2.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/layers/combined_sequence_experiment2.py#L194-L237
def _updateInferenceStats(self, statistics, objectName=None): """ Updates the inference statistics. Parameters: ---------------------------- @param statistics (dict) Dictionary in which to write the statistics @param objectName (str) Name of the inferred object, if known. Otherwise, set to None. """ L4Representations = self.getL4Representations() L4PredictedCells = self.getL4PredictedCells() L4PredictedActiveCells = self.getL4PredictedActiveCells() L2Representation = self.getL2Representations() for i in xrange(self.numColumns): statistics["L4 Representation C" + str(i)].append( len(L4Representations[i]) ) statistics["L4 Predicted C" + str(i)].append( len(L4PredictedCells[i]) ) statistics["L4 PredictedActive C" + str(i)].append( len(L4PredictedActiveCells[i]) ) statistics["L2 Representation C" + str(i)].append( len(L2Representation[i]) ) statistics["L4 Apical Segments C" + str(i)].append( len(self.L4Columns[i]._tm.getActiveApicalSegments()) ) statistics["L4 Basal Segments C" + str(i)].append( len(self.L4Columns[i]._tm.getActiveBasalSegments()) ) # add true overlap if objectName was provided if objectName in self.objectL2Representations: objectRepresentation = self.objectL2Representations[objectName] statistics["Overlap L2 with object C" + str(i)].append( len(objectRepresentation[i] & L2Representation[i]) )
[ "def", "_updateInferenceStats", "(", "self", ",", "statistics", ",", "objectName", "=", "None", ")", ":", "L4Representations", "=", "self", ".", "getL4Representations", "(", ")", "L4PredictedCells", "=", "self", ".", "getL4PredictedCells", "(", ")", "L4PredictedActiveCells", "=", "self", ".", "getL4PredictedActiveCells", "(", ")", "L2Representation", "=", "self", ".", "getL2Representations", "(", ")", "for", "i", "in", "xrange", "(", "self", ".", "numColumns", ")", ":", "statistics", "[", "\"L4 Representation C\"", "+", "str", "(", "i", ")", "]", ".", "append", "(", "len", "(", "L4Representations", "[", "i", "]", ")", ")", "statistics", "[", "\"L4 Predicted C\"", "+", "str", "(", "i", ")", "]", ".", "append", "(", "len", "(", "L4PredictedCells", "[", "i", "]", ")", ")", "statistics", "[", "\"L4 PredictedActive C\"", "+", "str", "(", "i", ")", "]", ".", "append", "(", "len", "(", "L4PredictedActiveCells", "[", "i", "]", ")", ")", "statistics", "[", "\"L2 Representation C\"", "+", "str", "(", "i", ")", "]", ".", "append", "(", "len", "(", "L2Representation", "[", "i", "]", ")", ")", "statistics", "[", "\"L4 Apical Segments C\"", "+", "str", "(", "i", ")", "]", ".", "append", "(", "len", "(", "self", ".", "L4Columns", "[", "i", "]", ".", "_tm", ".", "getActiveApicalSegments", "(", ")", ")", ")", "statistics", "[", "\"L4 Basal Segments C\"", "+", "str", "(", "i", ")", "]", ".", "append", "(", "len", "(", "self", ".", "L4Columns", "[", "i", "]", ".", "_tm", ".", "getActiveBasalSegments", "(", ")", ")", ")", "# add true overlap if objectName was provided", "if", "objectName", "in", "self", ".", "objectL2Representations", ":", "objectRepresentation", "=", "self", ".", "objectL2Representations", "[", "objectName", "]", "statistics", "[", "\"Overlap L2 with object C\"", "+", "str", "(", "i", ")", "]", ".", "append", "(", "len", "(", "objectRepresentation", "[", "i", "]", "&", "L2Representation", "[", "i", "]", ")", ")" ]
Updates the inference statistics. Parameters: ---------------------------- @param statistics (dict) Dictionary in which to write the statistics @param objectName (str) Name of the inferred object, if known. Otherwise, set to None.
[ "Updates", "the", "inference", "statistics", "." ]
python
train
casacore/python-casacore
casacore/tables/table.py
https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/tables/table.py#L1873-L1950
def view(self, wait=True, tempname="/tmp/seltable"): """ View a table using casaviewer, casabrowser, or wxwidget based browser. The table is viewed depending on the type: MeasurementSet is viewed using casaviewer. Image is viewed using casaviewer. other are browsed using the :func:`browse` function. If the casaviewer cannot be found, all tables are browsed. The casaviewer can only display tables that are persistent on disk. This gives problems for tables resulting from a query because they are held in memory only (unless an output table name was given). To make viewing of such tables possible, the argument `tempname` can be used to specify a table name that will be used to form a persistent table that can be browsed. Note that such a table is very small as it does not contain data, but only references to rows in the original table. The default for `tempname` is '/tmp/seltable'. If needed, the table can be deleted using the :func:`tabledelete` function. If `wait=False`, the casaviewer is started in the background. In that case the user should delete a possibly created copy of a temporary table. """ import os # Determine the table type. # Test if casaviewer can be found. # On OS-X 'which' always returns 0, so use test on top of it. viewed = False type = self.info()["type"] if type == "Measurement Set" or type == "Image": if os.system('test -x `which casaviewer` > /dev/null 2>&1') == 0: waitstr1 = "" waitstr2 = "foreground ..." if not wait: waitstr1 = " &" waitstr2 = "background ..." if self.iswritable(): six.print_("Flushing data and starting casaviewer " + "in the " + waitstr2) else: six.print_("Starting casaviewer in the " + waitstr2) self.flush() self.unlock() if os.system('test -e ' + self.name() + '/table.dat') == 0: os.system('casaviewer ' + self.name() + waitstr1) viewed = True elif len(tempname) > 0: six.print_(" making a persistent copy in table " + tempname) self.copy(tempname) os.system('casaviewer ' + tempname + waitstr1) viewed = True if wait: from casacore.tables import tabledelete six.print_(" finished viewing") tabledelete(tempname) else: six.print_(" after viewing use tabledelete('" + tempname + "') to delete the copy") else: six.print_("Cannot browse because the table is " + "in memory only.") six.print_("You can browse a (shallow) persistent " + "copy of the table like:") six.print_(" t.view(True, '/tmp/tab1')") # Could not view the table, so browse it. if not viewed: self.browse(wait, tempname)
[ "def", "view", "(", "self", ",", "wait", "=", "True", ",", "tempname", "=", "\"/tmp/seltable\"", ")", ":", "import", "os", "# Determine the table type.", "# Test if casaviewer can be found.", "# On OS-X 'which' always returns 0, so use test on top of it.", "viewed", "=", "False", "type", "=", "self", ".", "info", "(", ")", "[", "\"type\"", "]", "if", "type", "==", "\"Measurement Set\"", "or", "type", "==", "\"Image\"", ":", "if", "os", ".", "system", "(", "'test -x `which casaviewer` > /dev/null 2>&1'", ")", "==", "0", ":", "waitstr1", "=", "\"\"", "waitstr2", "=", "\"foreground ...\"", "if", "not", "wait", ":", "waitstr1", "=", "\" &\"", "waitstr2", "=", "\"background ...\"", "if", "self", ".", "iswritable", "(", ")", ":", "six", ".", "print_", "(", "\"Flushing data and starting casaviewer \"", "+", "\"in the \"", "+", "waitstr2", ")", "else", ":", "six", ".", "print_", "(", "\"Starting casaviewer in the \"", "+", "waitstr2", ")", "self", ".", "flush", "(", ")", "self", ".", "unlock", "(", ")", "if", "os", ".", "system", "(", "'test -e '", "+", "self", ".", "name", "(", ")", "+", "'/table.dat'", ")", "==", "0", ":", "os", ".", "system", "(", "'casaviewer '", "+", "self", ".", "name", "(", ")", "+", "waitstr1", ")", "viewed", "=", "True", "elif", "len", "(", "tempname", ")", ">", "0", ":", "six", ".", "print_", "(", "\" making a persistent copy in table \"", "+", "tempname", ")", "self", ".", "copy", "(", "tempname", ")", "os", ".", "system", "(", "'casaviewer '", "+", "tempname", "+", "waitstr1", ")", "viewed", "=", "True", "if", "wait", ":", "from", "casacore", ".", "tables", "import", "tabledelete", "six", ".", "print_", "(", "\" finished viewing\"", ")", "tabledelete", "(", "tempname", ")", "else", ":", "six", ".", "print_", "(", "\" after viewing use tabledelete('\"", "+", "tempname", "+", "\"') to delete the copy\"", ")", "else", ":", "six", ".", "print_", "(", "\"Cannot browse because the table is \"", "+", "\"in memory only.\"", ")", "six", ".", "print_", "(", "\"You can browse a (shallow) persistent \"", "+", "\"copy of the table like:\"", ")", "six", ".", "print_", "(", "\" t.view(True, '/tmp/tab1')\"", ")", "# Could not view the table, so browse it.", "if", "not", "viewed", ":", "self", ".", "browse", "(", "wait", ",", "tempname", ")" ]
View a table using casaviewer, casabrowser, or wxwidget based browser. The table is viewed depending on the type: MeasurementSet is viewed using casaviewer. Image is viewed using casaviewer. other are browsed using the :func:`browse` function. If the casaviewer cannot be found, all tables are browsed. The casaviewer can only display tables that are persistent on disk. This gives problems for tables resulting from a query because they are held in memory only (unless an output table name was given). To make viewing of such tables possible, the argument `tempname` can be used to specify a table name that will be used to form a persistent table that can be browsed. Note that such a table is very small as it does not contain data, but only references to rows in the original table. The default for `tempname` is '/tmp/seltable'. If needed, the table can be deleted using the :func:`tabledelete` function. If `wait=False`, the casaviewer is started in the background. In that case the user should delete a possibly created copy of a temporary table.
[ "View", "a", "table", "using", "casaviewer", "casabrowser", "or", "wxwidget", "based", "browser", "." ]
python
train