hexsha
stringlengths
40
40
repo
stringlengths
5
121
path
stringlengths
4
227
license
sequence
language
stringclasses
1 value
identifier
stringlengths
1
107
return_type
stringlengths
2
237
original_string
stringlengths
75
13.4k
original_docstring
stringlengths
13
12.9k
docstring
stringlengths
13
2.57k
docstring_tokens
sequence
code
stringlengths
23
1.88k
code_tokens
sequence
short_docstring
stringlengths
1
1.32k
short_docstring_tokens
sequence
comment
sequence
parameters
list
docstring_params
dict
code_with_imports
stringlengths
23
1.88k
idxs
int64
0
611k
cluster
int64
0
1.02k
9a7270ba3471ed29490a6d58cc3cb4089a67f14d
hohn/sarif-cli
sarif_cli/typegraph.py
[ "MIT" ]
Python
tagged_array_columns
<not_specific>
def tagged_array_columns(typegraph, array_id): """ Return a dict mapping the array column names to versions tagged with the id. Example: The original table headers are array_id value_index type_at_index id_or_value_at_index the tagged versions become t8754_array_id t8754_value_index t8754_type_at_index t8754_id_or_value_at_index """ array_id = str(array_id) colheader = ('array_id', 'value_index', 'type_at_index', 'id_or_value_at_index') return { header:"t{:s}_{:s}".format(array_id, header) for header in colheader}
Return a dict mapping the array column names to versions tagged with the id. Example: The original table headers are array_id value_index type_at_index id_or_value_at_index the tagged versions become t8754_array_id t8754_value_index t8754_type_at_index t8754_id_or_value_at_index
Return a dict mapping the array column names to versions tagged with the id.
[ "Return", "a", "dict", "mapping", "the", "array", "column", "names", "to", "versions", "tagged", "with", "the", "id", "." ]
def tagged_array_columns(typegraph, array_id): array_id = str(array_id) colheader = ('array_id', 'value_index', 'type_at_index', 'id_or_value_at_index') return { header:"t{:s}_{:s}".format(array_id, header) for header in colheader}
[ "def", "tagged_array_columns", "(", "typegraph", ",", "array_id", ")", ":", "array_id", "=", "str", "(", "array_id", ")", "colheader", "=", "(", "'array_id'", ",", "'value_index'", ",", "'type_at_index'", ",", "'id_or_value_at_index'", ")", "return", "{", "header", ":", "\"t{:s}_{:s}\"", ".", "format", "(", "array_id", ",", "header", ")", "for", "header", "in", "colheader", "}" ]
Return a dict mapping the array column names to versions tagged with the id.
[ "Return", "a", "dict", "mapping", "the", "array", "column", "names", "to", "versions", "tagged", "with", "the", "id", "." ]
[ "\"\"\" Return a dict mapping the array column names to versions tagged with the id.\n \n Example:\n The original table headers are \n\n array_id value_index type_at_index id_or_value_at_index\n\n the tagged versions become\n\n t8754_array_id t8754_value_index t8754_type_at_index t8754_id_or_value_at_index \n \"\"\"" ]
[ { "param": "typegraph", "type": null }, { "param": "array_id", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "typegraph", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "array_id", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [ { "identifier": "examples", "docstring": null, "docstring_tokens": [ "None" ] } ] }
def tagged_array_columns(typegraph, array_id): array_id = str(array_id) colheader = ('array_id', 'value_index', 'type_at_index', 'id_or_value_at_index') return { header:"t{:s}_{:s}".format(array_id, header) for header in colheader}
563
381
17248cc4fe790963693a27efe196284c98d337ff
tangkong/pcdsdevices
pcdsdevices/make_ophyd_device.py
[ "BSD-3-Clause-LBNL" ]
Python
make_signal_wrbv
null
def make_signal_wrbv(suffix, lines): """ Create a Cpt line with RBV pv and separate write_PV. This line is added to a supplied list, used for storing components within a particular class. """ s = (" {0} = Cpt(EpicsSignal, \':{1}_RBV\', write_pv=\':{1}\', " "kind=\'normal\')") lines.append(s.format(suffix.lower(), suffix))
Create a Cpt line with RBV pv and separate write_PV. This line is added to a supplied list, used for storing components within a particular class.
Create a Cpt line with RBV pv and separate write_PV. This line is added to a supplied list, used for storing components within a particular class.
[ "Create", "a", "Cpt", "line", "with", "RBV", "pv", "and", "separate", "write_PV", ".", "This", "line", "is", "added", "to", "a", "supplied", "list", "used", "for", "storing", "components", "within", "a", "particular", "class", "." ]
def make_signal_wrbv(suffix, lines): s = (" {0} = Cpt(EpicsSignal, \':{1}_RBV\', write_pv=\':{1}\', " "kind=\'normal\')") lines.append(s.format(suffix.lower(), suffix))
[ "def", "make_signal_wrbv", "(", "suffix", ",", "lines", ")", ":", "s", "=", "(", "\" {0} = Cpt(EpicsSignal, \\':{1}_RBV\\', write_pv=\\':{1}\\', \"", "\"kind=\\'normal\\')\"", ")", "lines", ".", "append", "(", "s", ".", "format", "(", "suffix", ".", "lower", "(", ")", ",", "suffix", ")", ")" ]
Create a Cpt line with RBV pv and separate write_PV.
[ "Create", "a", "Cpt", "line", "with", "RBV", "pv", "and", "separate", "write_PV", "." ]
[ "\"\"\"\n Create a Cpt line with RBV pv and separate write_PV. This line is added to\n a supplied list, used for storing components within a particular class.\n \"\"\"" ]
[ { "param": "suffix", "type": null }, { "param": "lines", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "suffix", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "lines", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def make_signal_wrbv(suffix, lines): s = (" {0} = Cpt(EpicsSignal, \':{1}_RBV\', write_pv=\':{1}\', " "kind=\'normal\')") lines.append(s.format(suffix.lower(), suffix))
564
295
a36f734b74b5245d898b138f53d82e20d305bf8c
gboluwaga/ProjectEuler
problem014.py
[ "MIT" ]
Python
collatz
<not_specific>
def collatz(n:int): """ Computes and returns the collatz sequence for an integer n """ sequence = [] while True: sequence.append(n) if n == 1: return sequence elif n % 2 == 0: n = n // 2 else: n = 3 * n + 1
Computes and returns the collatz sequence for an integer n
Computes and returns the collatz sequence for an integer n
[ "Computes", "and", "returns", "the", "collatz", "sequence", "for", "an", "integer", "n" ]
def collatz(n:int): sequence = [] while True: sequence.append(n) if n == 1: return sequence elif n % 2 == 0: n = n // 2 else: n = 3 * n + 1
[ "def", "collatz", "(", "n", ":", "int", ")", ":", "sequence", "=", "[", "]", "while", "True", ":", "sequence", ".", "append", "(", "n", ")", "if", "n", "==", "1", ":", "return", "sequence", "elif", "n", "%", "2", "==", "0", ":", "n", "=", "n", "//", "2", "else", ":", "n", "=", "3", "*", "n", "+", "1" ]
Computes and returns the collatz sequence for an integer n
[ "Computes", "and", "returns", "the", "collatz", "sequence", "for", "an", "integer", "n" ]
[ "\"\"\"\n Computes and returns the collatz sequence for an integer n\n \"\"\"" ]
[ { "param": "n", "type": "int" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "n", "type": "int", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def collatz(n:int): sequence = [] while True: sequence.append(n) if n == 1: return sequence elif n % 2 == 0: n = n // 2 else: n = 3 * n + 1
565
23
9dc4f139bfafafba30ad7356837000d15cb3891a
jokersus/pywal
pywal/theme.py
[ "MIT" ]
Python
terminal_sexy_to_wal
<not_specific>
def terminal_sexy_to_wal(data): """Convert terminal.sexy json schema to wal.""" data["colors"] = {} data["special"] = { "foreground": data["foreground"], "background": data["background"], "cursor": data["color"][9] } for i, color in enumerate(data["color"]): data["colors"]["color%s" % i] = color return data
Convert terminal.sexy json schema to wal.
Convert terminal.sexy json schema to wal.
[ "Convert", "terminal", ".", "sexy", "json", "schema", "to", "wal", "." ]
def terminal_sexy_to_wal(data): data["colors"] = {} data["special"] = { "foreground": data["foreground"], "background": data["background"], "cursor": data["color"][9] } for i, color in enumerate(data["color"]): data["colors"]["color%s" % i] = color return data
[ "def", "terminal_sexy_to_wal", "(", "data", ")", ":", "data", "[", "\"colors\"", "]", "=", "{", "}", "data", "[", "\"special\"", "]", "=", "{", "\"foreground\"", ":", "data", "[", "\"foreground\"", "]", ",", "\"background\"", ":", "data", "[", "\"background\"", "]", ",", "\"cursor\"", ":", "data", "[", "\"color\"", "]", "[", "9", "]", "}", "for", "i", ",", "color", "in", "enumerate", "(", "data", "[", "\"color\"", "]", ")", ":", "data", "[", "\"colors\"", "]", "[", "\"color%s\"", "%", "i", "]", "=", "color", "return", "data" ]
Convert terminal.sexy json schema to wal.
[ "Convert", "terminal", ".", "sexy", "json", "schema", "to", "wal", "." ]
[ "\"\"\"Convert terminal.sexy json schema to wal.\"\"\"" ]
[ { "param": "data", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "data", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def terminal_sexy_to_wal(data): data["colors"] = {} data["special"] = { "foreground": data["foreground"], "background": data["background"], "cursor": data["color"][9] } for i, color in enumerate(data["color"]): data["colors"]["color%s" % i] = color return data
566
673
7fd7d44edc76c15cd3db39993653323e8ba4fa54
DLunin/bayescraft
graphmodels/representation.py
[ "MIT" ]
Python
immoralities
<not_specific>
def immoralities(G): """ Iterate over all immoralities in a graph. :param G: target graph :return: iterator over immoralities in form (node, parent1, parent2) """ return filter(lambda v: (not G.has_edge(v[1], v[2])) and (not G.has_edge(v[2], v[1])), G.v_structures)
Iterate over all immoralities in a graph. :param G: target graph :return: iterator over immoralities in form (node, parent1, parent2)
Iterate over all immoralities in a graph.
[ "Iterate", "over", "all", "immoralities", "in", "a", "graph", "." ]
def immoralities(G): return filter(lambda v: (not G.has_edge(v[1], v[2])) and (not G.has_edge(v[2], v[1])), G.v_structures)
[ "def", "immoralities", "(", "G", ")", ":", "return", "filter", "(", "lambda", "v", ":", "(", "not", "G", ".", "has_edge", "(", "v", "[", "1", "]", ",", "v", "[", "2", "]", ")", ")", "and", "(", "not", "G", ".", "has_edge", "(", "v", "[", "2", "]", ",", "v", "[", "1", "]", ")", ")", ",", "G", ".", "v_structures", ")" ]
Iterate over all immoralities in a graph.
[ "Iterate", "over", "all", "immoralities", "in", "a", "graph", "." ]
[ "\"\"\"\n Iterate over all immoralities in a graph.\n :param G: target graph\n :return: iterator over immoralities in form (node, parent1, parent2)\n \"\"\"" ]
[ { "param": "G", "type": null } ]
{ "returns": [ { "docstring": "iterator over immoralities in form (node, parent1, parent2)", "docstring_tokens": [ "iterator", "over", "immoralities", "in", "form", "(", "node", "parent1", "parent2", ")" ], "type": null } ], "raises": [], "params": [ { "identifier": "G", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def immoralities(G): return filter(lambda v: (not G.has_edge(v[1], v[2])) and (not G.has_edge(v[2], v[1])), G.v_structures)
567
886
00263d7bc9c4cb68021565ad277e721edb09ce42
twdb/sonde3
tests/format_tests.py
[ "BSD-3-Clause" ]
Python
_convert_to_aware_datetime
<not_specific>
def _convert_to_aware_datetime(datetime_string): """ Convert to a datetime string to a datetime object, taking tz into account if it is set """ date_format = "%m/%d/%Y %H:%M:%S" dt = datetime.strptime(datetime_string, date_format) #if tz: # dt = dt.replace(tzinfo=tz) return dt
Convert to a datetime string to a datetime object, taking tz into account if it is set
Convert to a datetime string to a datetime object, taking tz into account if it is set
[ "Convert", "to", "a", "datetime", "string", "to", "a", "datetime", "object", "taking", "tz", "into", "account", "if", "it", "is", "set" ]
def _convert_to_aware_datetime(datetime_string): date_format = "%m/%d/%Y %H:%M:%S" dt = datetime.strptime(datetime_string, date_format) return dt
[ "def", "_convert_to_aware_datetime", "(", "datetime_string", ")", ":", "date_format", "=", "\"%m/%d/%Y %H:%M:%S\"", "dt", "=", "datetime", ".", "strptime", "(", "datetime_string", ",", "date_format", ")", "return", "dt" ]
Convert to a datetime string to a datetime object, taking tz into account if it is set
[ "Convert", "to", "a", "datetime", "string", "to", "a", "datetime", "object", "taking", "tz", "into", "account", "if", "it", "is", "set" ]
[ "\"\"\"\n Convert to a datetime string to a datetime object, taking tz into\n account if it is set\n \"\"\"", "#if tz:", "# dt = dt.replace(tzinfo=tz)" ]
[ { "param": "datetime_string", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "datetime_string", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import datetime def _convert_to_aware_datetime(datetime_string): date_format = "%m/%d/%Y %H:%M:%S" dt = datetime.strptime(datetime_string, date_format) return dt
568
769
a8284cb9d1b7053c689887c876385f047d77a7ee
rtobar/askapsoft
Tests/hudson/Documentation/generate-docs.py
[ "BSL-1.0", "Apache-2.0", "OpenSSL" ]
Python
BuildTree
<not_specific>
def BuildTree(path, partial_paths): ''' Given a package directory path, return list of tuples containing indent level and directory name of partial paths. Need to keep track of partial paths already seen. ''' path_components = path.split('/') p = '' items = [] for comp in path_components[0:-1]: p += comp + '/' if not p in partial_paths: partial_paths.append(p) items.append((len(p.split('/')), "<li> %s </li>" % comp)) return items
Given a package directory path, return list of tuples containing indent level and directory name of partial paths. Need to keep track of partial paths already seen.
Given a package directory path, return list of tuples containing indent level and directory name of partial paths. Need to keep track of partial paths already seen.
[ "Given", "a", "package", "directory", "path", "return", "list", "of", "tuples", "containing", "indent", "level", "and", "directory", "name", "of", "partial", "paths", ".", "Need", "to", "keep", "track", "of", "partial", "paths", "already", "seen", "." ]
def BuildTree(path, partial_paths): path_components = path.split('/') p = '' items = [] for comp in path_components[0:-1]: p += comp + '/' if not p in partial_paths: partial_paths.append(p) items.append((len(p.split('/')), "<li> %s </li>" % comp)) return items
[ "def", "BuildTree", "(", "path", ",", "partial_paths", ")", ":", "path_components", "=", "path", ".", "split", "(", "'/'", ")", "p", "=", "''", "items", "=", "[", "]", "for", "comp", "in", "path_components", "[", "0", ":", "-", "1", "]", ":", "p", "+=", "comp", "+", "'/'", "if", "not", "p", "in", "partial_paths", ":", "partial_paths", ".", "append", "(", "p", ")", "items", ".", "append", "(", "(", "len", "(", "p", ".", "split", "(", "'/'", ")", ")", ",", "\"<li> %s </li>\"", "%", "comp", ")", ")", "return", "items" ]
Given a package directory path, return list of tuples containing indent level and directory name of partial paths.
[ "Given", "a", "package", "directory", "path", "return", "list", "of", "tuples", "containing", "indent", "level", "and", "directory", "name", "of", "partial", "paths", "." ]
[ "'''\n Given a package directory path, return list of tuples containing indent\n level and directory name of partial paths.\n Need to keep track of partial paths already seen.\n '''" ]
[ { "param": "path", "type": null }, { "param": "partial_paths", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "path", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "partial_paths", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def BuildTree(path, partial_paths): path_components = path.split('/') p = '' items = [] for comp in path_components[0:-1]: p += comp + '/' if not p in partial_paths: partial_paths.append(p) items.append((len(p.split('/')), "<li> %s </li>" % comp)) return items
569
153
5b93f24b3679fbba5182098555bd485360282718
shohirose/pastools
pastools.py
[ "Unlicense" ]
Python
create_access_env_vars
<not_specific>
def create_access_env_vars(host, path): # type: (str, str) -> dict[str, str] """Create environment variables for Access. This function creates theree environment variables for jobs in shared file systems: - ACCESS_INPUT_FILES - ACCESS_OUTPUT_FILES - ACCESS_RUNNING_FILES Args: host: Host of the primary file path: Path to the primary file Returns: Environment variables for Access """ filename = os.path.basename(path) dirname = os.path.dirname(path) return {'ACCESS_INPUT_FILES': filename + '@' + host + ':' + path, 'ACCESS_OUTPUT_FILES': '*@' + host + ':' + dirname, 'ACCESS_RUNNING_FILES': dirname}
Create environment variables for Access. This function creates theree environment variables for jobs in shared file systems: - ACCESS_INPUT_FILES - ACCESS_OUTPUT_FILES - ACCESS_RUNNING_FILES Args: host: Host of the primary file path: Path to the primary file Returns: Environment variables for Access
Create environment variables for Access. This function creates theree environment variables for jobs in shared file systems: ACCESS_INPUT_FILES ACCESS_OUTPUT_FILES ACCESS_RUNNING_FILES
[ "Create", "environment", "variables", "for", "Access", ".", "This", "function", "creates", "theree", "environment", "variables", "for", "jobs", "in", "shared", "file", "systems", ":", "ACCESS_INPUT_FILES", "ACCESS_OUTPUT_FILES", "ACCESS_RUNNING_FILES" ]
def create_access_env_vars(host, path): filename = os.path.basename(path) dirname = os.path.dirname(path) return {'ACCESS_INPUT_FILES': filename + '@' + host + ':' + path, 'ACCESS_OUTPUT_FILES': '*@' + host + ':' + dirname, 'ACCESS_RUNNING_FILES': dirname}
[ "def", "create_access_env_vars", "(", "host", ",", "path", ")", ":", "filename", "=", "os", ".", "path", ".", "basename", "(", "path", ")", "dirname", "=", "os", ".", "path", ".", "dirname", "(", "path", ")", "return", "{", "'ACCESS_INPUT_FILES'", ":", "filename", "+", "'@'", "+", "host", "+", "':'", "+", "path", ",", "'ACCESS_OUTPUT_FILES'", ":", "'*@'", "+", "host", "+", "':'", "+", "dirname", ",", "'ACCESS_RUNNING_FILES'", ":", "dirname", "}" ]
Create environment variables for Access.
[ "Create", "environment", "variables", "for", "Access", "." ]
[ "# type: (str, str) -> dict[str, str]", "\"\"\"Create environment variables for Access.\n\n This function creates theree environment variables for jobs in shared\n file systems:\n - ACCESS_INPUT_FILES\n - ACCESS_OUTPUT_FILES\n - ACCESS_RUNNING_FILES\n\n Args:\n host: Host of the primary file\n path: Path to the primary file\n\n Returns:\n Environment variables for Access\n \"\"\"" ]
[ { "param": "host", "type": null }, { "param": "path", "type": null } ]
{ "returns": [ { "docstring": "Environment variables for Access", "docstring_tokens": [ "Environment", "variables", "for", "Access" ], "type": null } ], "raises": [], "params": [ { "identifier": "host", "type": null, "docstring": "Host of the primary file", "docstring_tokens": [ "Host", "of", "the", "primary", "file" ], "default": null, "is_optional": null }, { "identifier": "path", "type": null, "docstring": "Path to the primary file", "docstring_tokens": [ "Path", "to", "the", "primary", "file" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import os def create_access_env_vars(host, path): filename = os.path.basename(path) dirname = os.path.dirname(path) return {'ACCESS_INPUT_FILES': filename + '@' + host + ':' + path, 'ACCESS_OUTPUT_FILES': '*@' + host + ':' + dirname, 'ACCESS_RUNNING_FILES': dirname}
570
762
13fc469048ccf6701572ce121717a31be93c5f5a
Lab41/soft-boiled
src/algorithms/gmm.py
[ "Apache-2.0" ]
Python
save_model
null
def save_model(model, output_fname): """ Save the current model for future use Args: model (dict): A dictionary of the form {word: (mixture.GMM, error)} output_fname (str): Local file path to store GMM model """ if output_fname.endswith('.gz'): output_file = gzip.open(output_fname, 'w') else: output_file = open(output_fname, 'w') csv_writer = csv.writer(output_file) LAT = 0 LON = 1 for word in model: (gmm, error) = model[word] row = [word.encode('utf-8'), error, gmm.n_components, gmm.covariance_type] for mean, weight, covar in zip(gmm.means_, gmm.weights_, gmm.covars_): row.extend([mean[LAT], mean[LON], weight, covar[0][0], covar[0][1], covar[1][0], covar[1][1]]) csv_writer.writerow(row) output_file.close()
Save the current model for future use Args: model (dict): A dictionary of the form {word: (mixture.GMM, error)} output_fname (str): Local file path to store GMM model
Save the current model for future use
[ "Save", "the", "current", "model", "for", "future", "use" ]
def save_model(model, output_fname): if output_fname.endswith('.gz'): output_file = gzip.open(output_fname, 'w') else: output_file = open(output_fname, 'w') csv_writer = csv.writer(output_file) LAT = 0 LON = 1 for word in model: (gmm, error) = model[word] row = [word.encode('utf-8'), error, gmm.n_components, gmm.covariance_type] for mean, weight, covar in zip(gmm.means_, gmm.weights_, gmm.covars_): row.extend([mean[LAT], mean[LON], weight, covar[0][0], covar[0][1], covar[1][0], covar[1][1]]) csv_writer.writerow(row) output_file.close()
[ "def", "save_model", "(", "model", ",", "output_fname", ")", ":", "if", "output_fname", ".", "endswith", "(", "'.gz'", ")", ":", "output_file", "=", "gzip", ".", "open", "(", "output_fname", ",", "'w'", ")", "else", ":", "output_file", "=", "open", "(", "output_fname", ",", "'w'", ")", "csv_writer", "=", "csv", ".", "writer", "(", "output_file", ")", "LAT", "=", "0", "LON", "=", "1", "for", "word", "in", "model", ":", "(", "gmm", ",", "error", ")", "=", "model", "[", "word", "]", "row", "=", "[", "word", ".", "encode", "(", "'utf-8'", ")", ",", "error", ",", "gmm", ".", "n_components", ",", "gmm", ".", "covariance_type", "]", "for", "mean", ",", "weight", ",", "covar", "in", "zip", "(", "gmm", ".", "means_", ",", "gmm", ".", "weights_", ",", "gmm", ".", "covars_", ")", ":", "row", ".", "extend", "(", "[", "mean", "[", "LAT", "]", ",", "mean", "[", "LON", "]", ",", "weight", ",", "covar", "[", "0", "]", "[", "0", "]", ",", "covar", "[", "0", "]", "[", "1", "]", ",", "covar", "[", "1", "]", "[", "0", "]", ",", "covar", "[", "1", "]", "[", "1", "]", "]", ")", "csv_writer", ".", "writerow", "(", "row", ")", "output_file", ".", "close", "(", ")" ]
Save the current model for future use
[ "Save", "the", "current", "model", "for", "future", "use" ]
[ "\"\"\"\n Save the current model for future use\n\n Args:\n model (dict): A dictionary of the form {word: (mixture.GMM, error)}\n output_fname (str): Local file path to store GMM model\n \"\"\"" ]
[ { "param": "model", "type": null }, { "param": "output_fname", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "model", "type": null, "docstring": "A dictionary of the form {word: (mixture.GMM, error)}", "docstring_tokens": [ "A", "dictionary", "of", "the", "form", "{", "word", ":", "(", "mixture", ".", "GMM", "error", ")", "}" ], "default": null, "is_optional": false }, { "identifier": "output_fname", "type": null, "docstring": "Local file path to store GMM model", "docstring_tokens": [ "Local", "file", "path", "to", "store", "GMM", "model" ], "default": null, "is_optional": false } ], "outlier_params": [], "others": [] }
import gzip import csv def save_model(model, output_fname): if output_fname.endswith('.gz'): output_file = gzip.open(output_fname, 'w') else: output_file = open(output_fname, 'w') csv_writer = csv.writer(output_file) LAT = 0 LON = 1 for word in model: (gmm, error) = model[word] row = [word.encode('utf-8'), error, gmm.n_components, gmm.covariance_type] for mean, weight, covar in zip(gmm.means_, gmm.weights_, gmm.covars_): row.extend([mean[LAT], mean[LON], weight, covar[0][0], covar[0][1], covar[1][0], covar[1][1]]) csv_writer.writerow(row) output_file.close()
571
855
361613e94afaddc0b452defaa947d09eac0efc50
hernot/hickle
hickle/helpers.py
[ "MIT" ]
Python
check_iterable_item_type
<not_specific>
def check_iterable_item_type(iter_obj): """ Check if all items within an iterable are the same type. Args: iter_obj: iterable object Returns: iter_type: type of item contained within the iterable. If the iterable has many types, a boolean False is returned instead. References: http://stackoverflow.com/questions/13252333 """ iseq = iter(iter_obj) try: first_type = type(next(iseq)) except StopIteration: return False except Exception: # pragma: no cover return False else: if all([type(x) is first_type for x in iseq]): return(first_type) else: return(False)
Check if all items within an iterable are the same type. Args: iter_obj: iterable object Returns: iter_type: type of item contained within the iterable. If the iterable has many types, a boolean False is returned instead. References: http://stackoverflow.com/questions/13252333
Check if all items within an iterable are the same type.
[ "Check", "if", "all", "items", "within", "an", "iterable", "are", "the", "same", "type", "." ]
def check_iterable_item_type(iter_obj): iseq = iter(iter_obj) try: first_type = type(next(iseq)) except StopIteration: return False except Exception: return False else: if all([type(x) is first_type for x in iseq]): return(first_type) else: return(False)
[ "def", "check_iterable_item_type", "(", "iter_obj", ")", ":", "iseq", "=", "iter", "(", "iter_obj", ")", "try", ":", "first_type", "=", "type", "(", "next", "(", "iseq", ")", ")", "except", "StopIteration", ":", "return", "False", "except", "Exception", ":", "return", "False", "else", ":", "if", "all", "(", "[", "type", "(", "x", ")", "is", "first_type", "for", "x", "in", "iseq", "]", ")", ":", "return", "(", "first_type", ")", "else", ":", "return", "(", "False", ")" ]
Check if all items within an iterable are the same type.
[ "Check", "if", "all", "items", "within", "an", "iterable", "are", "the", "same", "type", "." ]
[ "\"\"\" Check if all items within an iterable are the same type.\n\n Args:\n iter_obj: iterable object\n\n Returns:\n iter_type: type of item contained within the iterable. If\n the iterable has many types, a boolean False is returned instead.\n\n References:\n http://stackoverflow.com/questions/13252333\n \"\"\"", "# pragma: no cover" ]
[ { "param": "iter_obj", "type": null } ]
{ "returns": [ { "docstring": "type of item contained within the iterable. If\nthe iterable has many types, a boolean False is returned instead.", "docstring_tokens": [ "type", "of", "item", "contained", "within", "the", "iterable", ".", "If", "the", "iterable", "has", "many", "types", "a", "boolean", "False", "is", "returned", "instead", "." ], "type": "iter_type" } ], "raises": [], "params": [ { "identifier": "iter_obj", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def check_iterable_item_type(iter_obj): iseq = iter(iter_obj) try: first_type = type(next(iseq)) except StopIteration: return False except Exception: return False else: if all([type(x) is first_type for x in iseq]): return(first_type) else: return(False)
572
1,012
49a65c1f6a2571fbeed4121c9759ce6f38a79628
gregbreese/vicedtools
src/vicedtools/acer/patresults.py
[ "Apache-2.0" ]
Python
locate_header_row
int
def locate_header_row(df: pd.DataFrame) -> int: '''Identifies the row in a group report containing the column headings. Args: df: A pandas DataFrame containing the data from a PAT group report xlsx. Returns: The row number containing the column headings. Raises: ValueError: DataFrame did not contain the expected header row. ''' row_id = 0 while row_id < len(df.index): if df[0][row_id] == "Unique ID": return row_id row_id += 1 raise ValueError("DataFrame did not contain the expected header row.")
Identifies the row in a group report containing the column headings. Args: df: A pandas DataFrame containing the data from a PAT group report xlsx. Returns: The row number containing the column headings. Raises: ValueError: DataFrame did not contain the expected header row.
Identifies the row in a group report containing the column headings.
[ "Identifies", "the", "row", "in", "a", "group", "report", "containing", "the", "column", "headings", "." ]
def locate_header_row(df: pd.DataFrame) -> int: row_id = 0 while row_id < len(df.index): if df[0][row_id] == "Unique ID": return row_id row_id += 1 raise ValueError("DataFrame did not contain the expected header row.")
[ "def", "locate_header_row", "(", "df", ":", "pd", ".", "DataFrame", ")", "->", "int", ":", "row_id", "=", "0", "while", "row_id", "<", "len", "(", "df", ".", "index", ")", ":", "if", "df", "[", "0", "]", "[", "row_id", "]", "==", "\"Unique ID\"", ":", "return", "row_id", "row_id", "+=", "1", "raise", "ValueError", "(", "\"DataFrame did not contain the expected header row.\"", ")" ]
Identifies the row in a group report containing the column headings.
[ "Identifies", "the", "row", "in", "a", "group", "report", "containing", "the", "column", "headings", "." ]
[ "'''Identifies the row in a group report containing the column headings.\n\n Args:\n df: A pandas DataFrame containing the data from a PAT group report xlsx.\n\n Returns:\n The row number containing the column headings.\n\n Raises:\n ValueError: DataFrame did not contain the expected header row.\n '''" ]
[ { "param": "df", "type": "pd.DataFrame" } ]
{ "returns": [ { "docstring": "The row number containing the column headings.", "docstring_tokens": [ "The", "row", "number", "containing", "the", "column", "headings", "." ], "type": null } ], "raises": [ { "docstring": "DataFrame did not contain the expected header row.", "docstring_tokens": [ "DataFrame", "did", "not", "contain", "the", "expected", "header", "row", "." ], "type": "ValueError" } ], "params": [ { "identifier": "df", "type": "pd.DataFrame", "docstring": "A pandas DataFrame containing the data from a PAT group report xlsx.", "docstring_tokens": [ "A", "pandas", "DataFrame", "containing", "the", "data", "from", "a", "PAT", "group", "report", "xlsx", "." ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def locate_header_row(df: pd.DataFrame) -> int: row_id = 0 while row_id < len(df.index): if df[0][row_id] == "Unique ID": return row_id row_id += 1 raise ValueError("DataFrame did not contain the expected header row.")
575
13
f850f064224cf1ecd1c1507b33267dd394409aad
juddc/BlenderUnrealWorkflow
addon_unreal_workflow.py
[ "MIT" ]
Python
parse_ucx
<not_specific>
def parse_ucx(name): """ Helper function that takes an object name and returns a 2-tuple consisting of the original object name (without 'UCX_' prefix) and UCX index suffix as an int. https://docs.unrealengine.com/latest/INT/Engine/Content/FBX/StaticMeshes/index.html#collision Will return (None, None) if the input name is not a UCX_ at all. Will return an index of -1 if the input is a UCX, but no index could be parsed. """ if not name.startswith("UCX_"): return (None, None) else: # strip UCX_ prefix name = name[4:] # index starting value idx = -1 # check if this has Blender's duplicated object naming scheme if len(name) > 4: if name[-1].isdigit() and name[-2].isdigit() and name[-3].isdigit() and name[-4] == ".": # add to the index whatever value is in the last 3 digits idx += int(name[-3:]) # strip the numbers and dot from the name name = name[:-4] # extract all characters from the end that are numerica last_digits = [] for i in range(1, len(name)): if name[-i].isdigit(): last_digits.insert(0, name[-i]) else: break # strip the digits off the end of the name name = name[:-len(last_digits)] # if there was a dot or underscore seperating the digit, strip that too if name.endswith(".") or name.endswith("_"): name = name[:-1] # convert last digits (an array of digit characters) into an int try: idx += int("".join(last_digits)) except ValueError: # failed to get an index, but this is still a UCX return (name, idx) return (name, idx)
Helper function that takes an object name and returns a 2-tuple consisting of the original object name (without 'UCX_' prefix) and UCX index suffix as an int. https://docs.unrealengine.com/latest/INT/Engine/Content/FBX/StaticMeshes/index.html#collision Will return (None, None) if the input name is not a UCX_ at all. Will return an index of -1 if the input is a UCX, but no index could be parsed.
Helper function that takes an object name and returns a 2-tuple consisting of the original object name (without 'UCX_' prefix) and UCX index suffix as an int. Will return (None, None) if the input name is not a UCX_ at all. Will return an index of -1 if the input is a UCX, but no index could be parsed.
[ "Helper", "function", "that", "takes", "an", "object", "name", "and", "returns", "a", "2", "-", "tuple", "consisting", "of", "the", "original", "object", "name", "(", "without", "'", "UCX_", "'", "prefix", ")", "and", "UCX", "index", "suffix", "as", "an", "int", ".", "Will", "return", "(", "None", "None", ")", "if", "the", "input", "name", "is", "not", "a", "UCX_", "at", "all", ".", "Will", "return", "an", "index", "of", "-", "1", "if", "the", "input", "is", "a", "UCX", "but", "no", "index", "could", "be", "parsed", "." ]
def parse_ucx(name): if not name.startswith("UCX_"): return (None, None) else: name = name[4:] idx = -1 if len(name) > 4: if name[-1].isdigit() and name[-2].isdigit() and name[-3].isdigit() and name[-4] == ".": idx += int(name[-3:]) name = name[:-4] last_digits = [] for i in range(1, len(name)): if name[-i].isdigit(): last_digits.insert(0, name[-i]) else: break name = name[:-len(last_digits)] if name.endswith(".") or name.endswith("_"): name = name[:-1] try: idx += int("".join(last_digits)) except ValueError: return (name, idx) return (name, idx)
[ "def", "parse_ucx", "(", "name", ")", ":", "if", "not", "name", ".", "startswith", "(", "\"UCX_\"", ")", ":", "return", "(", "None", ",", "None", ")", "else", ":", "name", "=", "name", "[", "4", ":", "]", "idx", "=", "-", "1", "if", "len", "(", "name", ")", ">", "4", ":", "if", "name", "[", "-", "1", "]", ".", "isdigit", "(", ")", "and", "name", "[", "-", "2", "]", ".", "isdigit", "(", ")", "and", "name", "[", "-", "3", "]", ".", "isdigit", "(", ")", "and", "name", "[", "-", "4", "]", "==", "\".\"", ":", "idx", "+=", "int", "(", "name", "[", "-", "3", ":", "]", ")", "name", "=", "name", "[", ":", "-", "4", "]", "last_digits", "=", "[", "]", "for", "i", "in", "range", "(", "1", ",", "len", "(", "name", ")", ")", ":", "if", "name", "[", "-", "i", "]", ".", "isdigit", "(", ")", ":", "last_digits", ".", "insert", "(", "0", ",", "name", "[", "-", "i", "]", ")", "else", ":", "break", "name", "=", "name", "[", ":", "-", "len", "(", "last_digits", ")", "]", "if", "name", ".", "endswith", "(", "\".\"", ")", "or", "name", ".", "endswith", "(", "\"_\"", ")", ":", "name", "=", "name", "[", ":", "-", "1", "]", "try", ":", "idx", "+=", "int", "(", "\"\"", ".", "join", "(", "last_digits", ")", ")", "except", "ValueError", ":", "return", "(", "name", ",", "idx", ")", "return", "(", "name", ",", "idx", ")" ]
Helper function that takes an object name and returns a 2-tuple consisting of the original object name (without 'UCX_' prefix) and UCX index suffix as an int.
[ "Helper", "function", "that", "takes", "an", "object", "name", "and", "returns", "a", "2", "-", "tuple", "consisting", "of", "the", "original", "object", "name", "(", "without", "'", "UCX_", "'", "prefix", ")", "and", "UCX", "index", "suffix", "as", "an", "int", "." ]
[ "\"\"\"\r\n Helper function that takes an object name and returns a 2-tuple consisting of\r\n the original object name (without 'UCX_' prefix) and UCX index suffix as an int.\r\n\r\n https://docs.unrealengine.com/latest/INT/Engine/Content/FBX/StaticMeshes/index.html#collision\r\n\r\n Will return (None, None) if the input name is not a UCX_ at all.\r\n\r\n Will return an index of -1 if the input is a UCX, but no index could be parsed.\r\n \"\"\"", "# strip UCX_ prefix\r", "# index starting value\r", "# check if this has Blender's duplicated object naming scheme\r", "# add to the index whatever value is in the last 3 digits\r", "# strip the numbers and dot from the name\r", "# extract all characters from the end that are numerica\r", "# strip the digits off the end of the name\r", "# if there was a dot or underscore seperating the digit, strip that too\r", "# convert last digits (an array of digit characters) into an int\r", "# failed to get an index, but this is still a UCX\r" ]
[ { "param": "name", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "name", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def parse_ucx(name): if not name.startswith("UCX_"): return (None, None) else: name = name[4:] idx = -1 if len(name) > 4: if name[-1].isdigit() and name[-2].isdigit() and name[-3].isdigit() and name[-4] == ".": idx += int(name[-3:]) name = name[:-4] last_digits = [] for i in range(1, len(name)): if name[-i].isdigit(): last_digits.insert(0, name[-i]) else: break name = name[:-len(last_digits)] if name.endswith(".") or name.endswith("_"): name = name[:-1] try: idx += int("".join(last_digits)) except ValueError: return (name, idx) return (name, idx)
577
939
b4ddd0fe0e7ada51cfd5dfa19e371d4ff5b86648
revanwabale/SPARK_WORKS
SPARK_ALGO_BIGQUERY_BIGTABLE/d_data_generator.py
[ "Apache-2.0" ]
Python
timestamp_conversion
<not_specific>
def timestamp_conversion(timestamp): ''' Converting UTC timestamp to unix epoch timestamp ''' import calendar ts=timestamp[0:19] t=time.strptime(ts,"%Y-%m-%d %H:%M:%S") return(calendar.timegm(t))
Converting UTC timestamp to unix epoch timestamp
Converting UTC timestamp to unix epoch timestamp
[ "Converting", "UTC", "timestamp", "to", "unix", "epoch", "timestamp" ]
def timestamp_conversion(timestamp): import calendar ts=timestamp[0:19] t=time.strptime(ts,"%Y-%m-%d %H:%M:%S") return(calendar.timegm(t))
[ "def", "timestamp_conversion", "(", "timestamp", ")", ":", "import", "calendar", "ts", "=", "timestamp", "[", "0", ":", "19", "]", "t", "=", "time", ".", "strptime", "(", "ts", ",", "\"%Y-%m-%d %H:%M:%S\"", ")", "return", "(", "calendar", ".", "timegm", "(", "t", ")", ")" ]
Converting UTC timestamp to unix epoch timestamp
[ "Converting", "UTC", "timestamp", "to", "unix", "epoch", "timestamp" ]
[ "'''\n Converting UTC timestamp to unix epoch timestamp\n '''" ]
[ { "param": "timestamp", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "timestamp", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import time import calendar def timestamp_conversion(timestamp): import calendar ts=timestamp[0:19] t=time.strptime(ts,"%Y-%m-%d %H:%M:%S") return(calendar.timegm(t))
579
205
1b6dab834b889709b6b5704c2b78999cb8da8a03
co-map-v/co-map-v.github.io
comapv/data/data_cleaning.py
[ "BSD-2-Clause" ]
Python
merge_data
<not_specific>
def merge_data(data_pos, data_death, pop): """ Merge data_pos, data_death, and pop together as one dataframe Args: dadta_pos (dataframe): pandas dataframe data_death (dataframe): pandas dataframe Returns: One pandas dataframe """ incorrect_dtypes = [] columns = ['fips_code','county','population_2010'] incorrect_columns = [] for column in columns: if column not in pop: incorrect_columns.append(column) else: pass if incorrect_columns != []: raise NameError('No column named ' + str(incorrect_columns)) for column in pop.columns: if column == 'fips_code' or column == 'population_2010': if pop[column].dtype != 'int64': incorrect_dtypes.append(column) else: if pop[column].dtype != 'object': incorrect_dtypes.append(column) if incorrect_dtypes != []: raise ValueError('The following columns have incorrect dtypes: ', incorrect_dtypes) data_flat = data_pos.merge (data_death, how='outer') data_flat_pop_fips = data_flat.merge (pop, how='outer') data_flat_pop_fips.fillna(value=0, inplace=True) #filling in the NaN with 0 return data_flat_pop_fips
Merge data_pos, data_death, and pop together as one dataframe Args: dadta_pos (dataframe): pandas dataframe data_death (dataframe): pandas dataframe Returns: One pandas dataframe
Merge data_pos, data_death, and pop together as one dataframe
[ "Merge", "data_pos", "data_death", "and", "pop", "together", "as", "one", "dataframe" ]
def merge_data(data_pos, data_death, pop): incorrect_dtypes = [] columns = ['fips_code','county','population_2010'] incorrect_columns = [] for column in columns: if column not in pop: incorrect_columns.append(column) else: pass if incorrect_columns != []: raise NameError('No column named ' + str(incorrect_columns)) for column in pop.columns: if column == 'fips_code' or column == 'population_2010': if pop[column].dtype != 'int64': incorrect_dtypes.append(column) else: if pop[column].dtype != 'object': incorrect_dtypes.append(column) if incorrect_dtypes != []: raise ValueError('The following columns have incorrect dtypes: ', incorrect_dtypes) data_flat = data_pos.merge (data_death, how='outer') data_flat_pop_fips = data_flat.merge (pop, how='outer') data_flat_pop_fips.fillna(value=0, inplace=True) return data_flat_pop_fips
[ "def", "merge_data", "(", "data_pos", ",", "data_death", ",", "pop", ")", ":", "incorrect_dtypes", "=", "[", "]", "columns", "=", "[", "'fips_code'", ",", "'county'", ",", "'population_2010'", "]", "incorrect_columns", "=", "[", "]", "for", "column", "in", "columns", ":", "if", "column", "not", "in", "pop", ":", "incorrect_columns", ".", "append", "(", "column", ")", "else", ":", "pass", "if", "incorrect_columns", "!=", "[", "]", ":", "raise", "NameError", "(", "'No column named '", "+", "str", "(", "incorrect_columns", ")", ")", "for", "column", "in", "pop", ".", "columns", ":", "if", "column", "==", "'fips_code'", "or", "column", "==", "'population_2010'", ":", "if", "pop", "[", "column", "]", ".", "dtype", "!=", "'int64'", ":", "incorrect_dtypes", ".", "append", "(", "column", ")", "else", ":", "if", "pop", "[", "column", "]", ".", "dtype", "!=", "'object'", ":", "incorrect_dtypes", ".", "append", "(", "column", ")", "if", "incorrect_dtypes", "!=", "[", "]", ":", "raise", "ValueError", "(", "'The following columns have incorrect dtypes: '", ",", "incorrect_dtypes", ")", "data_flat", "=", "data_pos", ".", "merge", "(", "data_death", ",", "how", "=", "'outer'", ")", "data_flat_pop_fips", "=", "data_flat", ".", "merge", "(", "pop", ",", "how", "=", "'outer'", ")", "data_flat_pop_fips", ".", "fillna", "(", "value", "=", "0", ",", "inplace", "=", "True", ")", "return", "data_flat_pop_fips" ]
Merge data_pos, data_death, and pop together as one dataframe
[ "Merge", "data_pos", "data_death", "and", "pop", "together", "as", "one", "dataframe" ]
[ "\"\"\"\n Merge data_pos, data_death, and pop together as one dataframe\n\n Args:\n dadta_pos (dataframe): pandas dataframe\n data_death (dataframe): pandas dataframe\n\n Returns:\n One pandas dataframe\n \"\"\"", "#filling in the NaN with 0" ]
[ { "param": "data_pos", "type": null }, { "param": "data_death", "type": null }, { "param": "pop", "type": null } ]
{ "returns": [ { "docstring": "One pandas dataframe", "docstring_tokens": [ "One", "pandas", "dataframe" ], "type": null } ], "raises": [], "params": [ { "identifier": "data_pos", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "data_death", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": false }, { "identifier": "pop", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [ { "identifier": "dadta_pos", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": false } ], "others": [] }
def merge_data(data_pos, data_death, pop): incorrect_dtypes = [] columns = ['fips_code','county','population_2010'] incorrect_columns = [] for column in columns: if column not in pop: incorrect_columns.append(column) else: pass if incorrect_columns != []: raise NameError('No column named ' + str(incorrect_columns)) for column in pop.columns: if column == 'fips_code' or column == 'population_2010': if pop[column].dtype != 'int64': incorrect_dtypes.append(column) else: if pop[column].dtype != 'object': incorrect_dtypes.append(column) if incorrect_dtypes != []: raise ValueError('The following columns have incorrect dtypes: ', incorrect_dtypes) data_flat = data_pos.merge (data_death, how='outer') data_flat_pop_fips = data_flat.merge (pop, how='outer') data_flat_pop_fips.fillna(value=0, inplace=True) return data_flat_pop_fips
581
468
2c030a3c44ac35008d6db90f5c7413cf8239b795
wthamisupposedtowritethere/Simple-Backtest-Environment
src/btengine/financefunctions.py
[ "MIT" ]
Python
skewness
<not_specific>
def skewness(r): ''' ARGS: Series or Dataframe RETURNS: Float or a series data with the calculated skewness ''' # Calculate the demeaned returns demeaned_r = r - r.mean() # Use the population standard deviation, ddof=0 sigma_r = r.std(ddof=0) # Calculate the expectation of the demeaned returns raised to the third power exp = (demeaned_r**3).mean() # Calcualte the skew if(sigma_r != 0): skew = exp/sigma_r**3 return skew return exp
ARGS: Series or Dataframe RETURNS: Float or a series data with the calculated skewness
Series or Dataframe RETURNS: Float or a series data with the calculated skewness
[ "Series", "or", "Dataframe", "RETURNS", ":", "Float", "or", "a", "series", "data", "with", "the", "calculated", "skewness" ]
def skewness(r): demeaned_r = r - r.mean() sigma_r = r.std(ddof=0) exp = (demeaned_r**3).mean() if(sigma_r != 0): skew = exp/sigma_r**3 return skew return exp
[ "def", "skewness", "(", "r", ")", ":", "demeaned_r", "=", "r", "-", "r", ".", "mean", "(", ")", "sigma_r", "=", "r", ".", "std", "(", "ddof", "=", "0", ")", "exp", "=", "(", "demeaned_r", "**", "3", ")", ".", "mean", "(", ")", "if", "(", "sigma_r", "!=", "0", ")", ":", "skew", "=", "exp", "/", "sigma_r", "**", "3", "return", "skew", "return", "exp" ]
ARGS: Series or Dataframe RETURNS: Float or a series data with the calculated skewness
[ "ARGS", ":", "Series", "or", "Dataframe", "RETURNS", ":", "Float", "or", "a", "series", "data", "with", "the", "calculated", "skewness" ]
[ "'''\r\n ARGS:\r\n Series or Dataframe\r\n RETURNS: \r\n Float or a series data with the calculated skewness\r\n '''", "# Calculate the demeaned returns \r", "# Use the population standard deviation, ddof=0\r", "# Calculate the expectation of the demeaned returns raised to the third power\r", "# Calcualte the skew\r" ]
[ { "param": "r", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "r", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def skewness(r): demeaned_r = r - r.mean() sigma_r = r.std(ddof=0) exp = (demeaned_r**3).mean() if(sigma_r != 0): skew = exp/sigma_r**3 return skew return exp
582
543
976454db1a93b0cca814bce700ee51458b280b1d
shopuz/galaxy_docker
export_user_files.py
[ "MIT", "Unlicense" ]
Python
change_path
null
def change_path( src ): """ src will be copied to /export/`src` and a symlink will be placed in src pointing to /export/ """ if os.path.exists( src ): dest = os.path.join( '/export/', src.strip('/') ) # if destination is empty move all files into /export/ and symlink back to source if not os.path.exists( dest ): dest_dir = os.path.dirname(dest) if not os.path.exists( dest_dir ): os.makedirs(dest_dir) shutil.move( src, dest ) os.symlink( dest, src.rstrip('/') ) # if destination exists (e.g. continuing a previous session), remove source and symlink else: if os.path.isdir( src ): shutil.rmtree( src ) else: os.unlink( src ) os.symlink( dest, src.rstrip('/') )
src will be copied to /export/`src` and a symlink will be placed in src pointing to /export/
src will be copied to /export/`src` and a symlink will be placed in src pointing to /export
[ "src", "will", "be", "copied", "to", "/", "export", "/", "`", "src", "`", "and", "a", "symlink", "will", "be", "placed", "in", "src", "pointing", "to", "/", "export" ]
def change_path( src ): if os.path.exists( src ): dest = os.path.join( '/export/', src.strip('/') ) if not os.path.exists( dest ): dest_dir = os.path.dirname(dest) if not os.path.exists( dest_dir ): os.makedirs(dest_dir) shutil.move( src, dest ) os.symlink( dest, src.rstrip('/') ) else: if os.path.isdir( src ): shutil.rmtree( src ) else: os.unlink( src ) os.symlink( dest, src.rstrip('/') )
[ "def", "change_path", "(", "src", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "src", ")", ":", "dest", "=", "os", ".", "path", ".", "join", "(", "'/export/'", ",", "src", ".", "strip", "(", "'/'", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "dest", ")", ":", "dest_dir", "=", "os", ".", "path", ".", "dirname", "(", "dest", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "dest_dir", ")", ":", "os", ".", "makedirs", "(", "dest_dir", ")", "shutil", ".", "move", "(", "src", ",", "dest", ")", "os", ".", "symlink", "(", "dest", ",", "src", ".", "rstrip", "(", "'/'", ")", ")", "else", ":", "if", "os", ".", "path", ".", "isdir", "(", "src", ")", ":", "shutil", ".", "rmtree", "(", "src", ")", "else", ":", "os", ".", "unlink", "(", "src", ")", "os", ".", "symlink", "(", "dest", ",", "src", ".", "rstrip", "(", "'/'", ")", ")" ]
src will be copied to /export/`src` and a symlink will be placed in src pointing to /export
[ "src", "will", "be", "copied", "to", "/", "export", "/", "`", "src", "`", "and", "a", "symlink", "will", "be", "placed", "in", "src", "pointing", "to", "/", "export" ]
[ "\"\"\"\n src will be copied to /export/`src` and a symlink will be placed in src pointing to /export/\n \"\"\"", "# if destination is empty move all files into /export/ and symlink back to source", "# if destination exists (e.g. continuing a previous session), remove source and symlink" ]
[ { "param": "src", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "src", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import shutil import os def change_path( src ): if os.path.exists( src ): dest = os.path.join( '/export/', src.strip('/') ) if not os.path.exists( dest ): dest_dir = os.path.dirname(dest) if not os.path.exists( dest_dir ): os.makedirs(dest_dir) shutil.move( src, dest ) os.symlink( dest, src.rstrip('/') ) else: if os.path.isdir( src ): shutil.rmtree( src ) else: os.unlink( src ) os.symlink( dest, src.rstrip('/') )
583
256
1c41a46a988da3d4e057e2e594df382646f14b9c
nearspacelabs/stac-client-python
nsl/stac/utils.py
[ "Apache-2.0" ]
Python
has_asset
<not_specific>
def has_asset(stac_item: StacItem, asset: Asset): """ check whether a stac_item has a perfect match to the provided asset :param stac_item: stac item whose assets we're checking against asset :param asset: asset we're looking for in stac_item :return: """ for test_asset in stac_item.assets.values(): b_matches = True for field in test_asset.DESCRIPTOR.fields: if getattr(test_asset, field.name) != getattr(asset, field.name): b_matches = False break if b_matches: return b_matches return False
check whether a stac_item has a perfect match to the provided asset :param stac_item: stac item whose assets we're checking against asset :param asset: asset we're looking for in stac_item :return:
check whether a stac_item has a perfect match to the provided asset
[ "check", "whether", "a", "stac_item", "has", "a", "perfect", "match", "to", "the", "provided", "asset" ]
def has_asset(stac_item: StacItem, asset: Asset): for test_asset in stac_item.assets.values(): b_matches = True for field in test_asset.DESCRIPTOR.fields: if getattr(test_asset, field.name) != getattr(asset, field.name): b_matches = False break if b_matches: return b_matches return False
[ "def", "has_asset", "(", "stac_item", ":", "StacItem", ",", "asset", ":", "Asset", ")", ":", "for", "test_asset", "in", "stac_item", ".", "assets", ".", "values", "(", ")", ":", "b_matches", "=", "True", "for", "field", "in", "test_asset", ".", "DESCRIPTOR", ".", "fields", ":", "if", "getattr", "(", "test_asset", ",", "field", ".", "name", ")", "!=", "getattr", "(", "asset", ",", "field", ".", "name", ")", ":", "b_matches", "=", "False", "break", "if", "b_matches", ":", "return", "b_matches", "return", "False" ]
check whether a stac_item has a perfect match to the provided asset
[ "check", "whether", "a", "stac_item", "has", "a", "perfect", "match", "to", "the", "provided", "asset" ]
[ "\"\"\"\n check whether a stac_item has a perfect match to the provided asset\n :param stac_item: stac item whose assets we're checking against asset\n :param asset: asset we're looking for in stac_item\n :return:\n \"\"\"" ]
[ { "param": "stac_item", "type": "StacItem" }, { "param": "asset", "type": "Asset" } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": null } ], "raises": [], "params": [ { "identifier": "stac_item", "type": "StacItem", "docstring": "stac item whose assets we're checking against asset", "docstring_tokens": [ "stac", "item", "whose", "assets", "we", "'", "re", "checking", "against", "asset" ], "default": null, "is_optional": null }, { "identifier": "asset", "type": "Asset", "docstring": "asset we're looking for in stac_item", "docstring_tokens": [ "asset", "we", "'", "re", "looking", "for", "in", "stac_item" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def has_asset(stac_item: StacItem, asset: Asset): for test_asset in stac_item.assets.values(): b_matches = True for field in test_asset.DESCRIPTOR.fields: if getattr(test_asset, field.name) != getattr(asset, field.name): b_matches = False break if b_matches: return b_matches return False
584
779
234b6d0bde9a8028684f305dc233e3f06681d3e5
FabianWe/schulze_voting
schulze_voting/schulze_voting.py
[ "MIT" ]
Python
compute_p
<not_specific>
def compute_p(d, n): """Compute the matrix p given the matrix d. Args: d (list of list of int): The matrix d. n (int): Number of options in the vote. Returns: list of list of int: The matrix p. """ res = [[0 for _ in range(n)] for _ in range(n)] for i in range(n): for j in range(n): if i != j: if d[i][j] > d[j][i]: res[i][j] = d[i][j] for i in range(n): for j in range(n): if i == j: continue for k in range(n): if i == k or j == k: continue res[j][k] = max(res[j][k], min(res[j][i], res[i][k])) return res
Compute the matrix p given the matrix d. Args: d (list of list of int): The matrix d. n (int): Number of options in the vote. Returns: list of list of int: The matrix p.
Compute the matrix p given the matrix d.
[ "Compute", "the", "matrix", "p", "given", "the", "matrix", "d", "." ]
def compute_p(d, n): res = [[0 for _ in range(n)] for _ in range(n)] for i in range(n): for j in range(n): if i != j: if d[i][j] > d[j][i]: res[i][j] = d[i][j] for i in range(n): for j in range(n): if i == j: continue for k in range(n): if i == k or j == k: continue res[j][k] = max(res[j][k], min(res[j][i], res[i][k])) return res
[ "def", "compute_p", "(", "d", ",", "n", ")", ":", "res", "=", "[", "[", "0", "for", "_", "in", "range", "(", "n", ")", "]", "for", "_", "in", "range", "(", "n", ")", "]", "for", "i", "in", "range", "(", "n", ")", ":", "for", "j", "in", "range", "(", "n", ")", ":", "if", "i", "!=", "j", ":", "if", "d", "[", "i", "]", "[", "j", "]", ">", "d", "[", "j", "]", "[", "i", "]", ":", "res", "[", "i", "]", "[", "j", "]", "=", "d", "[", "i", "]", "[", "j", "]", "for", "i", "in", "range", "(", "n", ")", ":", "for", "j", "in", "range", "(", "n", ")", ":", "if", "i", "==", "j", ":", "continue", "for", "k", "in", "range", "(", "n", ")", ":", "if", "i", "==", "k", "or", "j", "==", "k", ":", "continue", "res", "[", "j", "]", "[", "k", "]", "=", "max", "(", "res", "[", "j", "]", "[", "k", "]", ",", "min", "(", "res", "[", "j", "]", "[", "i", "]", ",", "res", "[", "i", "]", "[", "k", "]", ")", ")", "return", "res" ]
Compute the matrix p given the matrix d.
[ "Compute", "the", "matrix", "p", "given", "the", "matrix", "d", "." ]
[ "\"\"\"Compute the matrix p given the matrix d.\n\n Args:\n d (list of list of int): The matrix d.\n n (int): Number of options in the vote.\n\n Returns:\n list of list of int: The matrix p.\n \"\"\"" ]
[ { "param": "d", "type": null }, { "param": "n", "type": null } ]
{ "returns": [ { "docstring": "list of list of int: The matrix p.", "docstring_tokens": [ "list", "of", "list", "of", "int", ":", "The", "matrix", "p", "." ], "type": null } ], "raises": [], "params": [ { "identifier": "d", "type": null, "docstring": "The matrix d.", "docstring_tokens": [ "The", "matrix", "d", "." ], "default": null, "is_optional": false }, { "identifier": "n", "type": null, "docstring": "Number of options in the vote.", "docstring_tokens": [ "Number", "of", "options", "in", "the", "vote", "." ], "default": null, "is_optional": false } ], "outlier_params": [], "others": [] }
def compute_p(d, n): res = [[0 for _ in range(n)] for _ in range(n)] for i in range(n): for j in range(n): if i != j: if d[i][j] > d[j][i]: res[i][j] = d[i][j] for i in range(n): for j in range(n): if i == j: continue for k in range(n): if i == k or j == k: continue res[j][k] = max(res[j][k], min(res[j][i], res[i][k])) return res
585
184
7b9f2a15136ba20afdd3269f800c8dece1df6fff
Warchant/autosar
autosar/base.py
[ "MIT" ]
Python
splitRef
<not_specific>
def splitRef(ref): """splits an autosar url string into an array""" if isinstance(ref,str): if ref[0]=='/': return ref[1:].split('/') else: return ref.split('/') return None
splits an autosar url string into an array
splits an autosar url string into an array
[ "splits", "an", "autosar", "url", "string", "into", "an", "array" ]
def splitRef(ref): if isinstance(ref,str): if ref[0]=='/': return ref[1:].split('/') else: return ref.split('/') return None
[ "def", "splitRef", "(", "ref", ")", ":", "if", "isinstance", "(", "ref", ",", "str", ")", ":", "if", "ref", "[", "0", "]", "==", "'/'", ":", "return", "ref", "[", "1", ":", "]", ".", "split", "(", "'/'", ")", "else", ":", "return", "ref", ".", "split", "(", "'/'", ")", "return", "None" ]
splits an autosar url string into an array
[ "splits", "an", "autosar", "url", "string", "into", "an", "array" ]
[ "\"\"\"splits an autosar url string into an array\"\"\"" ]
[ { "param": "ref", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "ref", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def splitRef(ref): if isinstance(ref,str): if ref[0]=='/': return ref[1:].split('/') else: return ref.split('/') return None
586
132
f97370351a5af1d3dacbc36614e7120d199e1e98
rivoric/adventofcode2021
9/main.py
[ "CC0-1.0" ]
Python
low_sum
int
def low_sum (heightmap: list) -> int: "Find the sum (+1) of all the low points" sum = 0 maxx = len(heightmap[0]) maxy = len(heightmap) for y in range(maxy): for x in range(maxx): low_point = True if x > 0 and heightmap[y][x] >= heightmap[y][x-1]: low_point = False if x < maxx - 1 and heightmap[y][x] >= heightmap[y][x+1]: low_point = False if y > 0 and heightmap[y][x] >= heightmap[y-1][x]: low_point = False if y < maxy - 1 and heightmap[y][x] >= heightmap[y+1][x]: low_point = False if low_point: sum += int(heightmap[y][x]) + 1 return sum
Find the sum (+1) of all the low points
Find the sum (+1) of all the low points
[ "Find", "the", "sum", "(", "+", "1", ")", "of", "all", "the", "low", "points" ]
def low_sum (heightmap: list) -> int: sum = 0 maxx = len(heightmap[0]) maxy = len(heightmap) for y in range(maxy): for x in range(maxx): low_point = True if x > 0 and heightmap[y][x] >= heightmap[y][x-1]: low_point = False if x < maxx - 1 and heightmap[y][x] >= heightmap[y][x+1]: low_point = False if y > 0 and heightmap[y][x] >= heightmap[y-1][x]: low_point = False if y < maxy - 1 and heightmap[y][x] >= heightmap[y+1][x]: low_point = False if low_point: sum += int(heightmap[y][x]) + 1 return sum
[ "def", "low_sum", "(", "heightmap", ":", "list", ")", "->", "int", ":", "sum", "=", "0", "maxx", "=", "len", "(", "heightmap", "[", "0", "]", ")", "maxy", "=", "len", "(", "heightmap", ")", "for", "y", "in", "range", "(", "maxy", ")", ":", "for", "x", "in", "range", "(", "maxx", ")", ":", "low_point", "=", "True", "if", "x", ">", "0", "and", "heightmap", "[", "y", "]", "[", "x", "]", ">=", "heightmap", "[", "y", "]", "[", "x", "-", "1", "]", ":", "low_point", "=", "False", "if", "x", "<", "maxx", "-", "1", "and", "heightmap", "[", "y", "]", "[", "x", "]", ">=", "heightmap", "[", "y", "]", "[", "x", "+", "1", "]", ":", "low_point", "=", "False", "if", "y", ">", "0", "and", "heightmap", "[", "y", "]", "[", "x", "]", ">=", "heightmap", "[", "y", "-", "1", "]", "[", "x", "]", ":", "low_point", "=", "False", "if", "y", "<", "maxy", "-", "1", "and", "heightmap", "[", "y", "]", "[", "x", "]", ">=", "heightmap", "[", "y", "+", "1", "]", "[", "x", "]", ":", "low_point", "=", "False", "if", "low_point", ":", "sum", "+=", "int", "(", "heightmap", "[", "y", "]", "[", "x", "]", ")", "+", "1", "return", "sum" ]
Find the sum (+1) of all the low points
[ "Find", "the", "sum", "(", "+", "1", ")", "of", "all", "the", "low", "points" ]
[ "\"Find the sum (+1) of all the low points\"" ]
[ { "param": "heightmap", "type": "list" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "heightmap", "type": "list", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def low_sum (heightmap: list) -> int: sum = 0 maxx = len(heightmap[0]) maxy = len(heightmap) for y in range(maxy): for x in range(maxx): low_point = True if x > 0 and heightmap[y][x] >= heightmap[y][x-1]: low_point = False if x < maxx - 1 and heightmap[y][x] >= heightmap[y][x+1]: low_point = False if y > 0 and heightmap[y][x] >= heightmap[y-1][x]: low_point = False if y < maxy - 1 and heightmap[y][x] >= heightmap[y+1][x]: low_point = False if low_point: sum += int(heightmap[y][x]) + 1 return sum
587
272
190e0ce02de20d442313205e211bd2c9d127f875
EtiennePerot/submerger
submerger.py
[ "WTFPL" ]
Python
excludeAss
null
def excludeAss(doc, reg): """Remove all styles and events with the style name matching the given regex.""" badStyles = frozenset(s.name for s in doc.styles if reg.match(s.name)) for s in list(doc.styles): if s.name in badStyles: doc.styles.remove(s) for e in list(doc.events): if e.style in badStyles: doc.events.remove(e)
Remove all styles and events with the style name matching the given regex.
Remove all styles and events with the style name matching the given regex.
[ "Remove", "all", "styles", "and", "events", "with", "the", "style", "name", "matching", "the", "given", "regex", "." ]
def excludeAss(doc, reg): badStyles = frozenset(s.name for s in doc.styles if reg.match(s.name)) for s in list(doc.styles): if s.name in badStyles: doc.styles.remove(s) for e in list(doc.events): if e.style in badStyles: doc.events.remove(e)
[ "def", "excludeAss", "(", "doc", ",", "reg", ")", ":", "badStyles", "=", "frozenset", "(", "s", ".", "name", "for", "s", "in", "doc", ".", "styles", "if", "reg", ".", "match", "(", "s", ".", "name", ")", ")", "for", "s", "in", "list", "(", "doc", ".", "styles", ")", ":", "if", "s", ".", "name", "in", "badStyles", ":", "doc", ".", "styles", ".", "remove", "(", "s", ")", "for", "e", "in", "list", "(", "doc", ".", "events", ")", ":", "if", "e", ".", "style", "in", "badStyles", ":", "doc", ".", "events", ".", "remove", "(", "e", ")" ]
Remove all styles and events with the style name matching the given regex.
[ "Remove", "all", "styles", "and", "events", "with", "the", "style", "name", "matching", "the", "given", "regex", "." ]
[ "\"\"\"Remove all styles and events with the style name matching the given regex.\"\"\"" ]
[ { "param": "doc", "type": null }, { "param": "reg", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "doc", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "reg", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def excludeAss(doc, reg): badStyles = frozenset(s.name for s in doc.styles if reg.match(s.name)) for s in list(doc.styles): if s.name in badStyles: doc.styles.remove(s) for e in list(doc.events): if e.style in badStyles: doc.events.remove(e)
588
170
d5718ef6818f683b23b9723c9e4154dadde77a27
Serpinex3/rx-anon
anon/nlp/similarity_module.py
[ "BSD-3-Clause" ]
Python
compare_complete_match
<not_specific>
def compare_complete_match(span1, span2): """ Compare two spans and return true, if lower words in those spans are equal (stop words excluded) Parameters ---------- span1: Span First span to compare. span2: Span Second span to compare. Returns ------- bool True if spans match. """ if " ".join([t.lemma_.lower() for t in span1 if not t.is_stop]) == " ".join([a.lemma_.lower() for a in span2 if not a.is_stop]): return True return False
Compare two spans and return true, if lower words in those spans are equal (stop words excluded) Parameters ---------- span1: Span First span to compare. span2: Span Second span to compare. Returns ------- bool True if spans match.
Compare two spans and return true, if lower words in those spans are equal (stop words excluded) Parameters Span First span to compare. span2: Span Second span to compare. Returns bool True if spans match.
[ "Compare", "two", "spans", "and", "return", "true", "if", "lower", "words", "in", "those", "spans", "are", "equal", "(", "stop", "words", "excluded", ")", "Parameters", "Span", "First", "span", "to", "compare", ".", "span2", ":", "Span", "Second", "span", "to", "compare", ".", "Returns", "bool", "True", "if", "spans", "match", "." ]
def compare_complete_match(span1, span2): if " ".join([t.lemma_.lower() for t in span1 if not t.is_stop]) == " ".join([a.lemma_.lower() for a in span2 if not a.is_stop]): return True return False
[ "def", "compare_complete_match", "(", "span1", ",", "span2", ")", ":", "if", "\" \"", ".", "join", "(", "[", "t", ".", "lemma_", ".", "lower", "(", ")", "for", "t", "in", "span1", "if", "not", "t", ".", "is_stop", "]", ")", "==", "\" \"", ".", "join", "(", "[", "a", ".", "lemma_", ".", "lower", "(", ")", "for", "a", "in", "span2", "if", "not", "a", ".", "is_stop", "]", ")", ":", "return", "True", "return", "False" ]
Compare two spans and return true, if lower words in those spans are equal (stop words excluded) Parameters
[ "Compare", "two", "spans", "and", "return", "true", "if", "lower", "words", "in", "those", "spans", "are", "equal", "(", "stop", "words", "excluded", ")", "Parameters" ]
[ "\"\"\"\n Compare two spans and return true, if lower words in those spans are equal (stop words excluded)\n Parameters\n ----------\n span1: Span\n First span to compare.\n span2: Span\n Second span to compare.\n Returns\n -------\n bool\n True if spans match.\n \"\"\"" ]
[ { "param": "span1", "type": null }, { "param": "span2", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "span1", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "span2", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def compare_complete_match(span1, span2): if " ".join([t.lemma_.lower() for t in span1 if not t.is_stop]) == " ".join([a.lemma_.lower() for a in span2 if not a.is_stop]): return True return False
589
97
3cdc751ca62821f0c0ac49cf5e56eeef802a0f62
diego-9407/holbertonschool-higher_level_programming
0x0B-python-input_output/4-append_write.py
[ "FSFAP" ]
Python
append_write
<not_specific>
def append_write(filename="", text=""): """ Write a new file or append info if exists Args: filename: string containing the name or "" if not given. text: content of the file Return: number of chars written """ with open(filename, 'a', encoding="utf-8") as fl_opened: return fl_opened.write(text)
Write a new file or append info if exists Args: filename: string containing the name or "" if not given. text: content of the file Return: number of chars written
Write a new file or append info if exists Args: filename: string containing the name or "" if not given. text: content of the file Return: number of chars written
[ "Write", "a", "new", "file", "or", "append", "info", "if", "exists", "Args", ":", "filename", ":", "string", "containing", "the", "name", "or", "\"", "\"", "if", "not", "given", ".", "text", ":", "content", "of", "the", "file", "Return", ":", "number", "of", "chars", "written" ]
def append_write(filename="", text=""): with open(filename, 'a', encoding="utf-8") as fl_opened: return fl_opened.write(text)
[ "def", "append_write", "(", "filename", "=", "\"\"", ",", "text", "=", "\"\"", ")", ":", "with", "open", "(", "filename", ",", "'a'", ",", "encoding", "=", "\"utf-8\"", ")", "as", "fl_opened", ":", "return", "fl_opened", ".", "write", "(", "text", ")" ]
Write a new file or append info if exists Args: filename: string containing the name or "" if not given.
[ "Write", "a", "new", "file", "or", "append", "info", "if", "exists", "Args", ":", "filename", ":", "string", "containing", "the", "name", "or", "\"", "\"", "if", "not", "given", "." ]
[ "\"\"\" Write a new file or append info if exists\n Args:\n filename: string containing the name or \"\" if\n not given.\n text: content of the file\n Return: number of chars written\n \"\"\"" ]
[ { "param": "filename", "type": null }, { "param": "text", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "filename", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "text", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def append_write(filename="", text=""): with open(filename, 'a', encoding="utf-8") as fl_opened: return fl_opened.write(text)
590
389
e9a20bdc6e89fbebca97fc29b7984e1604ef2862
nickrobinson/paasta
paasta_tools/paasta_metastatus.py
[ "Apache-2.0" ]
Python
critical_events_in_outputs
<not_specific>
def critical_events_in_outputs(healthcheck_outputs): """Given a list of healthcheck pairs (output, healthy), return those which are unhealthy. """ return [healthcheck for healthcheck in healthcheck_outputs if healthcheck[-1] is False]
Given a list of healthcheck pairs (output, healthy), return those which are unhealthy.
Given a list of healthcheck pairs (output, healthy), return those which are unhealthy.
[ "Given", "a", "list", "of", "healthcheck", "pairs", "(", "output", "healthy", ")", "return", "those", "which", "are", "unhealthy", "." ]
def critical_events_in_outputs(healthcheck_outputs): return [healthcheck for healthcheck in healthcheck_outputs if healthcheck[-1] is False]
[ "def", "critical_events_in_outputs", "(", "healthcheck_outputs", ")", ":", "return", "[", "healthcheck", "for", "healthcheck", "in", "healthcheck_outputs", "if", "healthcheck", "[", "-", "1", "]", "is", "False", "]" ]
Given a list of healthcheck pairs (output, healthy), return those which are unhealthy.
[ "Given", "a", "list", "of", "healthcheck", "pairs", "(", "output", "healthy", ")", "return", "those", "which", "are", "unhealthy", "." ]
[ "\"\"\"Given a list of healthcheck pairs (output, healthy), return\n those which are unhealthy.\n \"\"\"" ]
[ { "param": "healthcheck_outputs", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "healthcheck_outputs", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def critical_events_in_outputs(healthcheck_outputs): return [healthcheck for healthcheck in healthcheck_outputs if healthcheck[-1] is False]
591
996
b69835a982e60cb930b34afb80f088665b089f0c
drvinceknight/Nashpy
src/nashpy/algorithms/support_enumeration.py
[ "MIT" ]
Python
obey_support
bool
def obey_support(strategy, support: npt.NDArray, tol: float = 10 ** -16) -> bool: """ Test if a strategy obeys its support Parameters ---------- strategy: array A given strategy vector support: array A strategy support tol : float A tolerance parameter for equality. Returns ------- bool whether or not that strategy does indeed have the given support """ if strategy is False: return False if not all( (i in support and value > tol) or (i not in support and value <= tol) for i, value in enumerate(strategy) ): return False return True
Test if a strategy obeys its support Parameters ---------- strategy: array A given strategy vector support: array A strategy support tol : float A tolerance parameter for equality. Returns ------- bool whether or not that strategy does indeed have the given support
Test if a strategy obeys its support Parameters array A given strategy vector support: array A strategy support tol : float A tolerance parameter for equality. Returns bool whether or not that strategy does indeed have the given support
[ "Test", "if", "a", "strategy", "obeys", "its", "support", "Parameters", "array", "A", "given", "strategy", "vector", "support", ":", "array", "A", "strategy", "support", "tol", ":", "float", "A", "tolerance", "parameter", "for", "equality", ".", "Returns", "bool", "whether", "or", "not", "that", "strategy", "does", "indeed", "have", "the", "given", "support" ]
def obey_support(strategy, support: npt.NDArray, tol: float = 10 ** -16) -> bool: if strategy is False: return False if not all( (i in support and value > tol) or (i not in support and value <= tol) for i, value in enumerate(strategy) ): return False return True
[ "def", "obey_support", "(", "strategy", ",", "support", ":", "npt", ".", "NDArray", ",", "tol", ":", "float", "=", "10", "**", "-", "16", ")", "->", "bool", ":", "if", "strategy", "is", "False", ":", "return", "False", "if", "not", "all", "(", "(", "i", "in", "support", "and", "value", ">", "tol", ")", "or", "(", "i", "not", "in", "support", "and", "value", "<=", "tol", ")", "for", "i", ",", "value", "in", "enumerate", "(", "strategy", ")", ")", ":", "return", "False", "return", "True" ]
Test if a strategy obeys its support Parameters
[ "Test", "if", "a", "strategy", "obeys", "its", "support", "Parameters" ]
[ "\"\"\"\n Test if a strategy obeys its support\n\n Parameters\n ----------\n strategy: array\n A given strategy vector\n support: array\n A strategy support\n tol : float\n A tolerance parameter for equality.\n\n Returns\n -------\n bool\n whether or not that strategy does indeed have the given support\n \"\"\"" ]
[ { "param": "strategy", "type": null }, { "param": "support", "type": "npt.NDArray" }, { "param": "tol", "type": "float" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "strategy", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "support", "type": "npt.NDArray", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "tol", "type": "float", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def obey_support(strategy, support: npt.NDArray, tol: float = 10 ** -16) -> bool: if strategy is False: return False if not all( (i in support and value > tol) or (i not in support and value <= tol) for i, value in enumerate(strategy) ): return False return True
592
165
d3babda2861d43d31c2fb867897245d4fe9e1ea6
chrhenning/posterior_replay_cl
probabilistic/ewc_args.py
[ "Apache-2.0" ]
Python
check_invalid_args_ewc
null
def check_invalid_args_ewc(config): """Sanity check for some EWC command-line arguments. Args: config: Parsed command-line arguments. """ if config.train_from_scratch and config.ewc_lambda > 0: raise ValueError('CL regularizer has to be turned off when training ' + 'from scratch.') if hasattr(config, 'multi_head') and not config.multi_head and \ config.val_sample_size > 1: warnings.warn('Setting "val_sample_size" to 1.') config.val_sample_size = 1 if hasattr(config, 'cl_scenario') and not (config.cl_scenario == 1 or \ config.cl_scenario == 3 and config.split_head_cl3) and \ config.val_sample_size > 1: warnings.warn('Setting "val_sample_size" to 1.') config.val_sample_size = 1 if hasattr(config, 'det_multi_head') and config.det_multi_head and \ config.val_sample_size > 1: warnings.warn('Dirac posterior used. Setting "val_sample_size" to 1.') config.val_sample_size = 1
Sanity check for some EWC command-line arguments. Args: config: Parsed command-line arguments.
Sanity check for some EWC command-line arguments.
[ "Sanity", "check", "for", "some", "EWC", "command", "-", "line", "arguments", "." ]
def check_invalid_args_ewc(config): if config.train_from_scratch and config.ewc_lambda > 0: raise ValueError('CL regularizer has to be turned off when training ' + 'from scratch.') if hasattr(config, 'multi_head') and not config.multi_head and \ config.val_sample_size > 1: warnings.warn('Setting "val_sample_size" to 1.') config.val_sample_size = 1 if hasattr(config, 'cl_scenario') and not (config.cl_scenario == 1 or \ config.cl_scenario == 3 and config.split_head_cl3) and \ config.val_sample_size > 1: warnings.warn('Setting "val_sample_size" to 1.') config.val_sample_size = 1 if hasattr(config, 'det_multi_head') and config.det_multi_head and \ config.val_sample_size > 1: warnings.warn('Dirac posterior used. Setting "val_sample_size" to 1.') config.val_sample_size = 1
[ "def", "check_invalid_args_ewc", "(", "config", ")", ":", "if", "config", ".", "train_from_scratch", "and", "config", ".", "ewc_lambda", ">", "0", ":", "raise", "ValueError", "(", "'CL regularizer has to be turned off when training '", "+", "'from scratch.'", ")", "if", "hasattr", "(", "config", ",", "'multi_head'", ")", "and", "not", "config", ".", "multi_head", "and", "config", ".", "val_sample_size", ">", "1", ":", "warnings", ".", "warn", "(", "'Setting \"val_sample_size\" to 1.'", ")", "config", ".", "val_sample_size", "=", "1", "if", "hasattr", "(", "config", ",", "'cl_scenario'", ")", "and", "not", "(", "config", ".", "cl_scenario", "==", "1", "or", "config", ".", "cl_scenario", "==", "3", "and", "config", ".", "split_head_cl3", ")", "and", "config", ".", "val_sample_size", ">", "1", ":", "warnings", ".", "warn", "(", "'Setting \"val_sample_size\" to 1.'", ")", "config", ".", "val_sample_size", "=", "1", "if", "hasattr", "(", "config", ",", "'det_multi_head'", ")", "and", "config", ".", "det_multi_head", "and", "config", ".", "val_sample_size", ">", "1", ":", "warnings", ".", "warn", "(", "'Dirac posterior used. Setting \"val_sample_size\" to 1.'", ")", "config", ".", "val_sample_size", "=", "1" ]
Sanity check for some EWC command-line arguments.
[ "Sanity", "check", "for", "some", "EWC", "command", "-", "line", "arguments", "." ]
[ "\"\"\"Sanity check for some EWC command-line arguments.\n\n Args:\n config: Parsed command-line arguments.\n \"\"\"" ]
[ { "param": "config", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "config", "type": null, "docstring": "Parsed command-line arguments.", "docstring_tokens": [ "Parsed", "command", "-", "line", "arguments", "." ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import warnings def check_invalid_args_ewc(config): if config.train_from_scratch and config.ewc_lambda > 0: raise ValueError('CL regularizer has to be turned off when training ' + 'from scratch.') if hasattr(config, 'multi_head') and not config.multi_head and \ config.val_sample_size > 1: warnings.warn('Setting "val_sample_size" to 1.') config.val_sample_size = 1 if hasattr(config, 'cl_scenario') and not (config.cl_scenario == 1 or \ config.cl_scenario == 3 and config.split_head_cl3) and \ config.val_sample_size > 1: warnings.warn('Setting "val_sample_size" to 1.') config.val_sample_size = 1 if hasattr(config, 'det_multi_head') and config.det_multi_head and \ config.val_sample_size > 1: warnings.warn('Dirac posterior used. Setting "val_sample_size" to 1.') config.val_sample_size = 1
593
789
196150c9a8d4f6f663fa893e2f3310a84943c0ca
osfclient/osf-cli
osfclient/utils.py
[ "BSD-3-Clause" ]
Python
checksum
<not_specific>
def checksum(file_path, hash_type='md5', block_size=65536): """Returns either the md5 or sha256 hash of a file at `file_path`. md5 is the default hash_type as it is faster than sha256 The default block size is 64 kb, which appears to be one of a few command choices according to https://stackoverflow.com/a/44873382/2680. The code below is an extension of the example presented in that post. """ if hash_type == 'md5': hash_ = hashlib.md5() elif hash_type == 'sha256': hash_ = hashlib.sha256() else: raise ValueError( "{} is an invalid hash_type. Expected 'md5' or 'sha256'." .format(hash_type) ) with open(file_path, 'rb') as f: for block in iter(lambda: f.read(block_size), b''): hash_.update(block) return hash_.hexdigest()
Returns either the md5 or sha256 hash of a file at `file_path`. md5 is the default hash_type as it is faster than sha256 The default block size is 64 kb, which appears to be one of a few command choices according to https://stackoverflow.com/a/44873382/2680. The code below is an extension of the example presented in that post.
Returns either the md5 or sha256 hash of a file at `file_path`. md5 is the default hash_type as it is faster than sha256
[ "Returns", "either", "the", "md5", "or", "sha256", "hash", "of", "a", "file", "at", "`", "file_path", "`", ".", "md5", "is", "the", "default", "hash_type", "as", "it", "is", "faster", "than", "sha256" ]
def checksum(file_path, hash_type='md5', block_size=65536): if hash_type == 'md5': hash_ = hashlib.md5() elif hash_type == 'sha256': hash_ = hashlib.sha256() else: raise ValueError( "{} is an invalid hash_type. Expected 'md5' or 'sha256'." .format(hash_type) ) with open(file_path, 'rb') as f: for block in iter(lambda: f.read(block_size), b''): hash_.update(block) return hash_.hexdigest()
[ "def", "checksum", "(", "file_path", ",", "hash_type", "=", "'md5'", ",", "block_size", "=", "65536", ")", ":", "if", "hash_type", "==", "'md5'", ":", "hash_", "=", "hashlib", ".", "md5", "(", ")", "elif", "hash_type", "==", "'sha256'", ":", "hash_", "=", "hashlib", ".", "sha256", "(", ")", "else", ":", "raise", "ValueError", "(", "\"{} is an invalid hash_type. Expected 'md5' or 'sha256'.\"", ".", "format", "(", "hash_type", ")", ")", "with", "open", "(", "file_path", ",", "'rb'", ")", "as", "f", ":", "for", "block", "in", "iter", "(", "lambda", ":", "f", ".", "read", "(", "block_size", ")", ",", "b''", ")", ":", "hash_", ".", "update", "(", "block", ")", "return", "hash_", ".", "hexdigest", "(", ")" ]
Returns either the md5 or sha256 hash of a file at `file_path`.
[ "Returns", "either", "the", "md5", "or", "sha256", "hash", "of", "a", "file", "at", "`", "file_path", "`", "." ]
[ "\"\"\"Returns either the md5 or sha256 hash of a file at `file_path`.\n \n md5 is the default hash_type as it is faster than sha256\n\n The default block size is 64 kb, which appears to be one of a few command\n choices according to https://stackoverflow.com/a/44873382/2680. The code\n below is an extension of the example presented in that post.\n \"\"\"" ]
[ { "param": "file_path", "type": null }, { "param": "hash_type", "type": null }, { "param": "block_size", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "file_path", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "hash_type", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "block_size", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import hashlib def checksum(file_path, hash_type='md5', block_size=65536): if hash_type == 'md5': hash_ = hashlib.md5() elif hash_type == 'sha256': hash_ = hashlib.sha256() else: raise ValueError( "{} is an invalid hash_type. Expected 'md5' or 'sha256'." .format(hash_type) ) with open(file_path, 'rb') as f: for block in iter(lambda: f.read(block_size), b''): hash_.update(block) return hash_.hexdigest()
594
301
100c2895fde78c178c6b6b2c406d2bb397fbbb55
aimakerspace/synergos_algorithm
synalgo/interfaces/arguments.py
[ "Apache-2.0" ]
Python
fate_lr_decay
<not_specific>
def fate_lr_decay(initial_lr, lr_decay, epochs): """ FATE's learning rate decay equation Args: initial_lr (float): Initial learning rate specified lr_decay (float): Scaling factor for Learning rate epochs (int): No. of epochs that have passed Returns: Scaled learning rate (float) """ lr = initial_lr / (1 + (lr_decay * epochs)) return lr
FATE's learning rate decay equation Args: initial_lr (float): Initial learning rate specified lr_decay (float): Scaling factor for Learning rate epochs (int): No. of epochs that have passed Returns: Scaled learning rate (float)
FATE's learning rate decay equation
[ "FATE", "'", "s", "learning", "rate", "decay", "equation" ]
def fate_lr_decay(initial_lr, lr_decay, epochs): lr = initial_lr / (1 + (lr_decay * epochs)) return lr
[ "def", "fate_lr_decay", "(", "initial_lr", ",", "lr_decay", ",", "epochs", ")", ":", "lr", "=", "initial_lr", "/", "(", "1", "+", "(", "lr_decay", "*", "epochs", ")", ")", "return", "lr" ]
FATE's learning rate decay equation
[ "FATE", "'", "s", "learning", "rate", "decay", "equation" ]
[ "\"\"\" FATE's learning rate decay equation \r\n \r\n Args:\r\n initial_lr (float): Initial learning rate specified\r\n lr_decay (float): Scaling factor for Learning rate \r\n epochs (int): No. of epochs that have passed\r\n Returns:\r\n Scaled learning rate (float)\r\n \"\"\"" ]
[ { "param": "initial_lr", "type": null }, { "param": "lr_decay", "type": null }, { "param": "epochs", "type": null } ]
{ "returns": [ { "docstring": "Scaled learning rate (float)", "docstring_tokens": [ "Scaled", "learning", "rate", "(", "float", ")" ], "type": null } ], "raises": [], "params": [ { "identifier": "initial_lr", "type": null, "docstring": "Initial learning rate specified", "docstring_tokens": [ "Initial", "learning", "rate", "specified" ], "default": null, "is_optional": false }, { "identifier": "lr_decay", "type": null, "docstring": "Scaling factor for Learning rate", "docstring_tokens": [ "Scaling", "factor", "for", "Learning", "rate" ], "default": null, "is_optional": false }, { "identifier": "epochs", "type": null, "docstring": "No. of epochs that have passed", "docstring_tokens": [ "No", ".", "of", "epochs", "that", "have", "passed" ], "default": null, "is_optional": false } ], "outlier_params": [], "others": [] }
def fate_lr_decay(initial_lr, lr_decay, epochs): lr = initial_lr / (1 + (lr_decay * epochs)) return lr
597
885
ff3a71f5de4ed2539e47660309dc793b9b8c2611
WadeBarnes/tfrs
backend/api/migrations/0154_add_address_effective_dates.py
[ "Apache-2.0" ]
Python
add_effective_dates
null
def add_effective_dates(apps, schema_editor): """ Adds effective dates to the organization addresses """ db_alias = schema_editor.connection.alias organization_address = apps.get_model('api', 'OrganizationAddress') organization_address.objects.using(db_alias).update( effective_date="2017-01-01" )
Adds effective dates to the organization addresses
Adds effective dates to the organization addresses
[ "Adds", "effective", "dates", "to", "the", "organization", "addresses" ]
def add_effective_dates(apps, schema_editor): db_alias = schema_editor.connection.alias organization_address = apps.get_model('api', 'OrganizationAddress') organization_address.objects.using(db_alias).update( effective_date="2017-01-01" )
[ "def", "add_effective_dates", "(", "apps", ",", "schema_editor", ")", ":", "db_alias", "=", "schema_editor", ".", "connection", ".", "alias", "organization_address", "=", "apps", ".", "get_model", "(", "'api'", ",", "'OrganizationAddress'", ")", "organization_address", ".", "objects", ".", "using", "(", "db_alias", ")", ".", "update", "(", "effective_date", "=", "\"2017-01-01\"", ")" ]
Adds effective dates to the organization addresses
[ "Adds", "effective", "dates", "to", "the", "organization", "addresses" ]
[ "\"\"\"\n Adds effective dates to the organization addresses\n \"\"\"" ]
[ { "param": "apps", "type": null }, { "param": "schema_editor", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "apps", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "schema_editor", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def add_effective_dates(apps, schema_editor): db_alias = schema_editor.connection.alias organization_address = apps.get_model('api', 'OrganizationAddress') organization_address.objects.using(db_alias).update( effective_date="2017-01-01" )
598
729
53fc84dfc22af776bfd7e37253bb5a995364c5cf
chrisstpierre/storyscript
storyscript/compiler/semantics/functions/Mutation.py
[ "Apache-2.0" ]
Python
compute_arg_names_hash
<not_specific>
def compute_arg_names_hash(keys): """ Converts a list of argument names to a hashable key. """ return hash(tuple(sorted(keys)))
Converts a list of argument names to a hashable key.
Converts a list of argument names to a hashable key.
[ "Converts", "a", "list", "of", "argument", "names", "to", "a", "hashable", "key", "." ]
def compute_arg_names_hash(keys): return hash(tuple(sorted(keys)))
[ "def", "compute_arg_names_hash", "(", "keys", ")", ":", "return", "hash", "(", "tuple", "(", "sorted", "(", "keys", ")", ")", ")" ]
Converts a list of argument names to a hashable key.
[ "Converts", "a", "list", "of", "argument", "names", "to", "a", "hashable", "key", "." ]
[ "\"\"\"\n Converts a list of argument names to a hashable key.\n \"\"\"" ]
[ { "param": "keys", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "keys", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def compute_arg_names_hash(keys): return hash(tuple(sorted(keys)))
600
562
5f109a46323a1d916779a9b7b1998f7db265a1c3
paulswartz/registered
registered/parser.py
[ "MIT" ]
Python
from_line
<not_specific>
def from_line(cls, parts): """ Convert a list of parts to a PatternStop """ [stop_id, timepoint_id, sign_code, revenue_type, _a] = parts return cls(stop_id, timepoint_id, sign_code, revenue_type)
Convert a list of parts to a PatternStop
Convert a list of parts to a PatternStop
[ "Convert", "a", "list", "of", "parts", "to", "a", "PatternStop" ]
def from_line(cls, parts): [stop_id, timepoint_id, sign_code, revenue_type, _a] = parts return cls(stop_id, timepoint_id, sign_code, revenue_type)
[ "def", "from_line", "(", "cls", ",", "parts", ")", ":", "[", "stop_id", ",", "timepoint_id", ",", "sign_code", ",", "revenue_type", ",", "_a", "]", "=", "parts", "return", "cls", "(", "stop_id", ",", "timepoint_id", ",", "sign_code", ",", "revenue_type", ")" ]
Convert a list of parts to a PatternStop
[ "Convert", "a", "list", "of", "parts", "to", "a", "PatternStop" ]
[ "\"\"\"\n Convert a list of parts to a PatternStop\n \"\"\"" ]
[ { "param": "cls", "type": null }, { "param": "parts", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "cls", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "parts", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def from_line(cls, parts): [stop_id, timepoint_id, sign_code, revenue_type, _a] = parts return cls(stop_id, timepoint_id, sign_code, revenue_type)
601
917
e591ac2463279e3d3227d88b7b1484954d7b0ecf
YixiaoZhang/Accuracy-and-Robustness
evaluate.py
[ "BSD-2-Clause" ]
Python
show
<not_specific>
def show(img): """ Show MNSIT digits in the console. """ remap = " .*#"+"#"*100 img = (img.flatten()+.5)*3 if len(img) != 784: return print("START") for i in range(28): print("".join([remap[int(round(x))] for x in img[i*28:i*28+28]]))
Show MNSIT digits in the console.
Show MNSIT digits in the console.
[ "Show", "MNSIT", "digits", "in", "the", "console", "." ]
def show(img): remap = " .*#"+"#"*100 img = (img.flatten()+.5)*3 if len(img) != 784: return print("START") for i in range(28): print("".join([remap[int(round(x))] for x in img[i*28:i*28+28]]))
[ "def", "show", "(", "img", ")", ":", "remap", "=", "\" .*#\"", "+", "\"#\"", "*", "100", "img", "=", "(", "img", ".", "flatten", "(", ")", "+", ".5", ")", "*", "3", "if", "len", "(", "img", ")", "!=", "784", ":", "return", "print", "(", "\"START\"", ")", "for", "i", "in", "range", "(", "28", ")", ":", "print", "(", "\"\"", ".", "join", "(", "[", "remap", "[", "int", "(", "round", "(", "x", ")", ")", "]", "for", "x", "in", "img", "[", "i", "*", "28", ":", "i", "*", "28", "+", "28", "]", "]", ")", ")" ]
Show MNSIT digits in the console.
[ "Show", "MNSIT", "digits", "in", "the", "console", "." ]
[ "\"\"\"\n Show MNSIT digits in the console.\n \"\"\"" ]
[ { "param": "img", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "img", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def show(img): remap = " .*#"+"#"*100 img = (img.flatten()+.5)*3 if len(img) != 784: return print("START") for i in range(28): print("".join([remap[int(round(x))] for x in img[i*28:i*28+28]]))
602
185
cabc4ca40d8a45f2bbc574fb9a5358b2a61b9e41
MarcoFavorito/information-extraction-from-annotated-wikipedia
disambiguation/babelfy_man.py
[ "MIT" ]
Python
_normalize_annotation
<not_specific>
def _normalize_annotation(annotation, tag_index): """ Normalize the annotation anchorStart and anchorEnd, in the sense that we start to count the position from the beginning of the sentence and not from the beginning of the disambiguated page. :param annotation: Annotation object :param tag_index: start index (int) :return: a new Annotation object """ # norm_annotation = copy.deepcopy(annotation) norm_annotation = annotation norm_annotation.anchorStart = int(annotation.anchorStart) - tag_index norm_annotation.anchorEnd = int(annotation.anchorEnd) - tag_index return copy.copy(norm_annotation)
Normalize the annotation anchorStart and anchorEnd, in the sense that we start to count the position from the beginning of the sentence and not from the beginning of the disambiguated page. :param annotation: Annotation object :param tag_index: start index (int) :return: a new Annotation object
Normalize the annotation anchorStart and anchorEnd, in the sense that we start to count the position from the beginning of the sentence and not from the beginning of the disambiguated page.
[ "Normalize", "the", "annotation", "anchorStart", "and", "anchorEnd", "in", "the", "sense", "that", "we", "start", "to", "count", "the", "position", "from", "the", "beginning", "of", "the", "sentence", "and", "not", "from", "the", "beginning", "of", "the", "disambiguated", "page", "." ]
def _normalize_annotation(annotation, tag_index): norm_annotation = annotation norm_annotation.anchorStart = int(annotation.anchorStart) - tag_index norm_annotation.anchorEnd = int(annotation.anchorEnd) - tag_index return copy.copy(norm_annotation)
[ "def", "_normalize_annotation", "(", "annotation", ",", "tag_index", ")", ":", "norm_annotation", "=", "annotation", "norm_annotation", ".", "anchorStart", "=", "int", "(", "annotation", ".", "anchorStart", ")", "-", "tag_index", "norm_annotation", ".", "anchorEnd", "=", "int", "(", "annotation", ".", "anchorEnd", ")", "-", "tag_index", "return", "copy", ".", "copy", "(", "norm_annotation", ")" ]
Normalize the annotation anchorStart and anchorEnd, in the sense that we start to count the position from the beginning of the sentence and not from the beginning of the disambiguated page.
[ "Normalize", "the", "annotation", "anchorStart", "and", "anchorEnd", "in", "the", "sense", "that", "we", "start", "to", "count", "the", "position", "from", "the", "beginning", "of", "the", "sentence", "and", "not", "from", "the", "beginning", "of", "the", "disambiguated", "page", "." ]
[ "\"\"\"\n\tNormalize the annotation anchorStart and anchorEnd,\n\tin the sense that we start to count the position\n\tfrom the beginning of the sentence\n\tand not from the beginning of the disambiguated page.\n\t:param annotation: Annotation object\n\t:param tag_index: start index (int)\n\t:return: a new Annotation object\n\t\"\"\"", "# norm_annotation = copy.deepcopy(annotation)" ]
[ { "param": "annotation", "type": null }, { "param": "tag_index", "type": null } ]
{ "returns": [ { "docstring": "a new Annotation object", "docstring_tokens": [ "a", "new", "Annotation", "object" ], "type": null } ], "raises": [], "params": [ { "identifier": "annotation", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null }, { "identifier": "tag_index", "type": null, "docstring": "start index (int)", "docstring_tokens": [ "start", "index", "(", "int", ")" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import copy def _normalize_annotation(annotation, tag_index): norm_annotation = annotation norm_annotation.anchorStart = int(annotation.anchorStart) - tag_index norm_annotation.anchorEnd = int(annotation.anchorEnd) - tag_index return copy.copy(norm_annotation)
603
375
69c96c57615af08ddd67af6cbc645b131f85d6d0
milescsmith/isotools
src/isotools/_transcriptome_io.py
[ "MIT" ]
Python
aligned_part
<not_specific>
def aligned_part(cigartuples, is_reverse): "returns the interval of the trasncript that is aligned (e.g. not clipped) according to cigar. Positions are according to transcript strand" start = end = 0 for cigar in reversed(cigartuples) if is_reverse else cigartuples: if cigar[0] in (0, 1, 7, 8): # MI=X -> move forward on read: end += cigar[1] elif cigar[0] in (4, 5): # clipping at end if end > start: return (start, end) end += cigar[1] start = end return (start, end)
returns the interval of the trasncript that is aligned (e.g. not clipped) according to cigar. Positions are according to transcript strand
returns the interval of the trasncript that is aligned according to cigar. Positions are according to transcript strand
[ "returns", "the", "interval", "of", "the", "trasncript", "that", "is", "aligned", "according", "to", "cigar", ".", "Positions", "are", "according", "to", "transcript", "strand" ]
def aligned_part(cigartuples, is_reverse): start = end = 0 for cigar in reversed(cigartuples) if is_reverse else cigartuples: if cigar[0] in (0, 1, 7, 8): end += cigar[1] elif cigar[0] in (4, 5): if end > start: return (start, end) end += cigar[1] start = end return (start, end)
[ "def", "aligned_part", "(", "cigartuples", ",", "is_reverse", ")", ":", "start", "=", "end", "=", "0", "for", "cigar", "in", "reversed", "(", "cigartuples", ")", "if", "is_reverse", "else", "cigartuples", ":", "if", "cigar", "[", "0", "]", "in", "(", "0", ",", "1", ",", "7", ",", "8", ")", ":", "end", "+=", "cigar", "[", "1", "]", "elif", "cigar", "[", "0", "]", "in", "(", "4", ",", "5", ")", ":", "if", "end", ">", "start", ":", "return", "(", "start", ",", "end", ")", "end", "+=", "cigar", "[", "1", "]", "start", "=", "end", "return", "(", "start", ",", "end", ")" ]
returns the interval of the trasncript that is aligned (e.g.
[ "returns", "the", "interval", "of", "the", "trasncript", "that", "is", "aligned", "(", "e", ".", "g", "." ]
[ "\"returns the interval of the trasncript that is aligned (e.g. not clipped) according to cigar. Positions are according to transcript strand\"", "# MI=X -> move forward on read:", "# clipping at end" ]
[ { "param": "cigartuples", "type": null }, { "param": "is_reverse", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "cigartuples", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "is_reverse", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def aligned_part(cigartuples, is_reverse): start = end = 0 for cigar in reversed(cigartuples) if is_reverse else cigartuples: if cigar[0] in (0, 1, 7, 8): end += cigar[1] elif cigar[0] in (4, 5): if end > start: return (start, end) end += cigar[1] start = end return (start, end)
604
952
4ee1a8408b8973513d0c17c4c125d71814f6212c
cameronabel/advent2020py
day1/day1.py
[ "MIT" ]
Python
build_array
<not_specific>
def build_array(text): """Returns an array of numbers contained in a text file""" with open(text, 'r') as f: nums = {int(line.strip()) for line in f} return nums
Returns an array of numbers contained in a text file
Returns an array of numbers contained in a text file
[ "Returns", "an", "array", "of", "numbers", "contained", "in", "a", "text", "file" ]
def build_array(text): with open(text, 'r') as f: nums = {int(line.strip()) for line in f} return nums
[ "def", "build_array", "(", "text", ")", ":", "with", "open", "(", "text", ",", "'r'", ")", "as", "f", ":", "nums", "=", "{", "int", "(", "line", ".", "strip", "(", ")", ")", "for", "line", "in", "f", "}", "return", "nums" ]
Returns an array of numbers contained in a text file
[ "Returns", "an", "array", "of", "numbers", "contained", "in", "a", "text", "file" ]
[ "\"\"\"Returns an array of numbers contained in a text file\"\"\"" ]
[ { "param": "text", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "text", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def build_array(text): with open(text, 'r') as f: nums = {int(line.strip()) for line in f} return nums
605
951
0bba16ade8f9537412bd1b2cc599b285977c408e
GryffindorLi/CS4240_Deep_Learning_Reproduce
petal.py
[ "Apache-2.0" ]
Python
filter_words
<not_specific>
def filter_words(tokens: List[str], word_counts=None, max_words: int = -1): """ Given a list of tokens, return a reduced list that contains only tokens from the list that correspond to actual words and occur a given number of times. :param tokens: the list of tokens to filter :param word_counts: a dictionary mapping words to their number of occurrences :param max_words: if set to a value >0, only the `max_words` most frequent words according to `word_counts` are kept :return: the filtered list of tokens """ tokens = (word for word in tokens if word[0] == 'Ġ' and len([char for char in word[1:] if char.isalpha()]) >= 2) if word_counts and max_words > 0: tokens = sorted(tokens, key=lambda word: word_counts[word[1:]], reverse=True)[:max_words] return tokens
Given a list of tokens, return a reduced list that contains only tokens from the list that correspond to actual words and occur a given number of times. :param tokens: the list of tokens to filter :param word_counts: a dictionary mapping words to their number of occurrences :param max_words: if set to a value >0, only the `max_words` most frequent words according to `word_counts` are kept :return: the filtered list of tokens
Given a list of tokens, return a reduced list that contains only tokens from the list that correspond to actual words and occur a given number of times.
[ "Given", "a", "list", "of", "tokens", "return", "a", "reduced", "list", "that", "contains", "only", "tokens", "from", "the", "list", "that", "correspond", "to", "actual", "words", "and", "occur", "a", "given", "number", "of", "times", "." ]
def filter_words(tokens: List[str], word_counts=None, max_words: int = -1): tokens = (word for word in tokens if word[0] == 'Ġ' and len([char for char in word[1:] if char.isalpha()]) >= 2) if word_counts and max_words > 0: tokens = sorted(tokens, key=lambda word: word_counts[word[1:]], reverse=True)[:max_words] return tokens
[ "def", "filter_words", "(", "tokens", ":", "List", "[", "str", "]", ",", "word_counts", "=", "None", ",", "max_words", ":", "int", "=", "-", "1", ")", ":", "tokens", "=", "(", "word", "for", "word", "in", "tokens", "if", "word", "[", "0", "]", "==", "'Ġ' ", "nd ", "en(", "[", "c", "har ", "or ", "har ", "n ", "ord[", "1", ":", "]", " ", "f ", "har.", "i", "salpha(", ")", "]", ")", " ", "= ", ")", "", "if", "word_counts", "and", "max_words", ">", "0", ":", "tokens", "=", "sorted", "(", "tokens", ",", "key", "=", "lambda", "word", ":", "word_counts", "[", "word", "[", "1", ":", "]", "]", ",", "reverse", "=", "True", ")", "[", ":", "max_words", "]", "return", "tokens" ]
Given a list of tokens, return a reduced list that contains only tokens from the list that correspond to actual words and occur a given number of times.
[ "Given", "a", "list", "of", "tokens", "return", "a", "reduced", "list", "that", "contains", "only", "tokens", "from", "the", "list", "that", "correspond", "to", "actual", "words", "and", "occur", "a", "given", "number", "of", "times", "." ]
[ "\"\"\"\n Given a list of tokens, return a reduced list that contains only tokens from the list that correspond\n to actual words and occur a given number of times.\n :param tokens: the list of tokens to filter\n :param word_counts: a dictionary mapping words to their number of occurrences\n :param max_words: if set to a value >0, only the `max_words` most frequent words according to `word_counts` are kept\n :return: the filtered list of tokens\n \"\"\"" ]
[ { "param": "tokens", "type": "List[str]" }, { "param": "word_counts", "type": null }, { "param": "max_words", "type": "int" } ]
{ "returns": [ { "docstring": "the filtered list of tokens", "docstring_tokens": [ "the", "filtered", "list", "of", "tokens" ], "type": null } ], "raises": [], "params": [ { "identifier": "tokens", "type": "List[str]", "docstring": "the list of tokens to filter", "docstring_tokens": [ "the", "list", "of", "tokens", "to", "filter" ], "default": null, "is_optional": null }, { "identifier": "word_counts", "type": null, "docstring": "a dictionary mapping words to their number of occurrences", "docstring_tokens": [ "a", "dictionary", "mapping", "words", "to", "their", "number", "of", "occurrences" ], "default": null, "is_optional": null }, { "identifier": "max_words", "type": "int", "docstring": "if set to a value >0, only the `max_words` most frequent words according to `word_counts` are kept", "docstring_tokens": [ "if", "set", "to", "a", "value", ">", "0", "only", "the", "`", "max_words", "`", "most", "frequent", "words", "according", "to", "`", "word_counts", "`", "are", "kept" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def filter_words(tokens: List[str], word_counts=None, max_words: int = -1): tokens = (word for word in tokens if word[0] == 'Ġ' and len([char for char in word[1:] if char.isalpha()]) >= 2) if word_counts and max_words > 0: tokens = sorted(tokens, key=lambda word: word_counts[word[1:]], reverse=True)[:max_words] return tokens
606
659
b914b42d7b50b0c39478d9a34cb097993b6d2515
szufix/mapel
mapel/voting/elections/group_separable.py
[ "MIT" ]
Python
_add_num_leaf_descendants
<not_specific>
def _add_num_leaf_descendants(node): """ add total number of descendants to each internal node """ if node.leaf: node.num_leaf_descendants = 1 else: node.num_leaf_descendants = 0 for child in node.children: node.num_leaf_descendants += _add_num_leaf_descendants(child) return node.num_leaf_descendants
add total number of descendants to each internal node
add total number of descendants to each internal node
[ "add", "total", "number", "of", "descendants", "to", "each", "internal", "node" ]
def _add_num_leaf_descendants(node): if node.leaf: node.num_leaf_descendants = 1 else: node.num_leaf_descendants = 0 for child in node.children: node.num_leaf_descendants += _add_num_leaf_descendants(child) return node.num_leaf_descendants
[ "def", "_add_num_leaf_descendants", "(", "node", ")", ":", "if", "node", ".", "leaf", ":", "node", ".", "num_leaf_descendants", "=", "1", "else", ":", "node", ".", "num_leaf_descendants", "=", "0", "for", "child", "in", "node", ".", "children", ":", "node", ".", "num_leaf_descendants", "+=", "_add_num_leaf_descendants", "(", "child", ")", "return", "node", ".", "num_leaf_descendants" ]
add total number of descendants to each internal node
[ "add", "total", "number", "of", "descendants", "to", "each", "internal", "node" ]
[ "\"\"\" add total number of descendants to each internal node \"\"\"" ]
[ { "param": "node", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "node", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _add_num_leaf_descendants(node): if node.leaf: node.num_leaf_descendants = 1 else: node.num_leaf_descendants = 0 for child in node.children: node.num_leaf_descendants += _add_num_leaf_descendants(child) return node.num_leaf_descendants
607
73
96202ff97579fd4d10f66e9c4e0a34fa1445fd26
jessehamner/svgscantron
svgscantron.py
[ "Apache-2.0" ]
Python
__drawStub
<not_specific>
def __drawStub(x,y,height,width,color,filehandle,onlyprintanswers,verbose): '''Draw a small rectangle/line (strut) aligned with x,y specified''' if(onlyprintanswers): return 1 if (color=="black"): if (width==1.5): classname='class="strut"' if (width==3): classname='class="fatstrut"' else: classname='style="stroke:' + color + ';stroke-width:' + \ str(width) + 'mm"' y2 = y + height mark = str('<line x1="' + str(x) + 'mm" y1="' + str(y) + 'mm" x2="' + \ str(x) + 'mm" y2="' + str(y2) + 'mm" ' + classname + '/>\n') filehandle.write(str(mark)) return 1
Draw a small rectangle/line (strut) aligned with x,y specified
Draw a small rectangle/line (strut) aligned with x,y specified
[ "Draw", "a", "small", "rectangle", "/", "line", "(", "strut", ")", "aligned", "with", "x", "y", "specified" ]
def __drawStub(x,y,height,width,color,filehandle,onlyprintanswers,verbose): if(onlyprintanswers): return 1 if (color=="black"): if (width==1.5): classname='class="strut"' if (width==3): classname='class="fatstrut"' else: classname='style="stroke:' + color + ';stroke-width:' + \ str(width) + 'mm"' y2 = y + height mark = str('<line x1="' + str(x) + 'mm" y1="' + str(y) + 'mm" x2="' + \ str(x) + 'mm" y2="' + str(y2) + 'mm" ' + classname + '/>\n') filehandle.write(str(mark)) return 1
[ "def", "__drawStub", "(", "x", ",", "y", ",", "height", ",", "width", ",", "color", ",", "filehandle", ",", "onlyprintanswers", ",", "verbose", ")", ":", "if", "(", "onlyprintanswers", ")", ":", "return", "1", "if", "(", "color", "==", "\"black\"", ")", ":", "if", "(", "width", "==", "1.5", ")", ":", "classname", "=", "'class=\"strut\"'", "if", "(", "width", "==", "3", ")", ":", "classname", "=", "'class=\"fatstrut\"'", "else", ":", "classname", "=", "'style=\"stroke:'", "+", "color", "+", "';stroke-width:'", "+", "str", "(", "width", ")", "+", "'mm\"'", "y2", "=", "y", "+", "height", "mark", "=", "str", "(", "'<line x1=\"'", "+", "str", "(", "x", ")", "+", "'mm\" y1=\"'", "+", "str", "(", "y", ")", "+", "'mm\" x2=\"'", "+", "str", "(", "x", ")", "+", "'mm\" y2=\"'", "+", "str", "(", "y2", ")", "+", "'mm\" '", "+", "classname", "+", "'/>\\n'", ")", "filehandle", ".", "write", "(", "str", "(", "mark", ")", ")", "return", "1" ]
Draw a small rectangle/line (strut) aligned with x,y specified
[ "Draw", "a", "small", "rectangle", "/", "line", "(", "strut", ")", "aligned", "with", "x", "y", "specified" ]
[ "'''Draw a small rectangle/line (strut) aligned with x,y specified'''" ]
[ { "param": "x", "type": null }, { "param": "y", "type": null }, { "param": "height", "type": null }, { "param": "width", "type": null }, { "param": "color", "type": null }, { "param": "filehandle", "type": null }, { "param": "onlyprintanswers", "type": null }, { "param": "verbose", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "x", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "y", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "height", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "width", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "color", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "filehandle", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "onlyprintanswers", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "verbose", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def __drawStub(x,y,height,width,color,filehandle,onlyprintanswers,verbose): if(onlyprintanswers): return 1 if (color=="black"): if (width==1.5): classname='class="strut"' if (width==3): classname='class="fatstrut"' else: classname='style="stroke:' + color + ';stroke-width:' + \ str(width) + 'mm"' y2 = y + height mark = str('<line x1="' + str(x) + 'mm" y1="' + str(y) + 'mm" x2="' + \ str(x) + 'mm" y2="' + str(y2) + 'mm" ' + classname + '/>\n') filehandle.write(str(mark)) return 1
608
268
9948d835e30e96ea5825f188fa43e01b0ae0aa0b
ssameerr/pyro
examples/air/viz.py
[ "MIT" ]
Python
bounding_box
<not_specific>
def bounding_box(z_where, x_size): """This doesn't take into account interpolation, but it's close enough to be usable.""" w = x_size / z_where.s h = x_size / z_where.s xtrans = -z_where.x / z_where.s * x_size / 2. ytrans = -z_where.y / z_where.s * x_size / 2. x = (x_size - w) / 2 + xtrans # origin is top left y = (x_size - h) / 2 + ytrans return (x, y), w, h
This doesn't take into account interpolation, but it's close enough to be usable.
This doesn't take into account interpolation, but it's close enough to be usable.
[ "This", "doesn", "'", "t", "take", "into", "account", "interpolation", "but", "it", "'", "s", "close", "enough", "to", "be", "usable", "." ]
def bounding_box(z_where, x_size): w = x_size / z_where.s h = x_size / z_where.s xtrans = -z_where.x / z_where.s * x_size / 2. ytrans = -z_where.y / z_where.s * x_size / 2. x = (x_size - w) / 2 + xtrans y = (x_size - h) / 2 + ytrans return (x, y), w, h
[ "def", "bounding_box", "(", "z_where", ",", "x_size", ")", ":", "w", "=", "x_size", "/", "z_where", ".", "s", "h", "=", "x_size", "/", "z_where", ".", "s", "xtrans", "=", "-", "z_where", ".", "x", "/", "z_where", ".", "s", "*", "x_size", "/", "2.", "ytrans", "=", "-", "z_where", ".", "y", "/", "z_where", ".", "s", "*", "x_size", "/", "2.", "x", "=", "(", "x_size", "-", "w", ")", "/", "2", "+", "xtrans", "y", "=", "(", "x_size", "-", "h", ")", "/", "2", "+", "ytrans", "return", "(", "x", ",", "y", ")", ",", "w", ",", "h" ]
This doesn't take into account interpolation, but it's close enough to be usable.
[ "This", "doesn", "'", "t", "take", "into", "account", "interpolation", "but", "it", "'", "s", "close", "enough", "to", "be", "usable", "." ]
[ "\"\"\"This doesn't take into account interpolation, but it's close\n enough to be usable.\"\"\"", "# origin is top left" ]
[ { "param": "z_where", "type": null }, { "param": "x_size", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "z_where", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "x_size", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def bounding_box(z_where, x_size): w = x_size / z_where.s h = x_size / z_where.s xtrans = -z_where.x / z_where.s * x_size / 2. ytrans = -z_where.y / z_where.s * x_size / 2. x = (x_size - w) / 2 + xtrans y = (x_size - h) / 2 + ytrans return (x, y), w, h
609
482
e0210c8fee973ed7183eb1d51279837d90ab34e1
scgbckbone/btc-hd-wallet
btc_hd_wallet/__main__.py
[ "MIT" ]
Python
file_
str
def file_(value: str) -> str: """ File related checks: 1. fail if path exists 2. fail if path is directory 3. fail if parent directory is not writable :param value: file path :return: file path """ path = pathlib.Path(value) if path.is_dir(): raise argparse.ArgumentError( argument=None, message="{} is directory".format(value) ) if path.exists(): raise argparse.ArgumentError( argument=None, message="File {} already exists".format(value) ) parent_dir_path = str(path.parent) if not os.access(parent_dir_path, os.W_OK): raise argparse.ArgumentError( argument=None, message="Parent directory {} not writable".format(parent_dir_path) ) return value
File related checks: 1. fail if path exists 2. fail if path is directory 3. fail if parent directory is not writable :param value: file path :return: file path
File related checks: 1. fail if path exists 2. fail if path is directory 3. fail if parent directory is not writable
[ "File", "related", "checks", ":", "1", ".", "fail", "if", "path", "exists", "2", ".", "fail", "if", "path", "is", "directory", "3", ".", "fail", "if", "parent", "directory", "is", "not", "writable" ]
def file_(value: str) -> str: path = pathlib.Path(value) if path.is_dir(): raise argparse.ArgumentError( argument=None, message="{} is directory".format(value) ) if path.exists(): raise argparse.ArgumentError( argument=None, message="File {} already exists".format(value) ) parent_dir_path = str(path.parent) if not os.access(parent_dir_path, os.W_OK): raise argparse.ArgumentError( argument=None, message="Parent directory {} not writable".format(parent_dir_path) ) return value
[ "def", "file_", "(", "value", ":", "str", ")", "->", "str", ":", "path", "=", "pathlib", ".", "Path", "(", "value", ")", "if", "path", ".", "is_dir", "(", ")", ":", "raise", "argparse", ".", "ArgumentError", "(", "argument", "=", "None", ",", "message", "=", "\"{} is directory\"", ".", "format", "(", "value", ")", ")", "if", "path", ".", "exists", "(", ")", ":", "raise", "argparse", ".", "ArgumentError", "(", "argument", "=", "None", ",", "message", "=", "\"File {} already exists\"", ".", "format", "(", "value", ")", ")", "parent_dir_path", "=", "str", "(", "path", ".", "parent", ")", "if", "not", "os", ".", "access", "(", "parent_dir_path", ",", "os", ".", "W_OK", ")", ":", "raise", "argparse", ".", "ArgumentError", "(", "argument", "=", "None", ",", "message", "=", "\"Parent directory {} not writable\"", ".", "format", "(", "parent_dir_path", ")", ")", "return", "value" ]
File related checks: 1. fail if path exists 2. fail if path is directory 3. fail if parent directory is not writable
[ "File", "related", "checks", ":", "1", ".", "fail", "if", "path", "exists", "2", ".", "fail", "if", "path", "is", "directory", "3", ".", "fail", "if", "parent", "directory", "is", "not", "writable" ]
[ "\"\"\"\n File related checks:\n 1. fail if path exists\n 2. fail if path is directory\n 3. fail if parent directory is not writable\n\n :param value: file path\n :return: file path\n \"\"\"" ]
[ { "param": "value", "type": "str" } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": null } ], "raises": [], "params": [ { "identifier": "value", "type": "str", "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import pathlib import argparse import os def file_(value: str) -> str: path = pathlib.Path(value) if path.is_dir(): raise argparse.ArgumentError( argument=None, message="{} is directory".format(value) ) if path.exists(): raise argparse.ArgumentError( argument=None, message="File {} already exists".format(value) ) parent_dir_path = str(path.parent) if not os.access(parent_dir_path, os.W_OK): raise argparse.ArgumentError( argument=None, message="Parent directory {} not writable".format(parent_dir_path) ) return value
610
860
2db27cb09e2620ce45ee8725dc01554bf7cee420
AgPipeline/docker-support
base-image/entrypoint.py
[ "BSD-3-Clause" ]
Python
parse_continue_result
tuple
def parse_continue_result(result) -> tuple: """Parses the result of calling transformer.check_continue and returns the code and/or message Arguments: result: the result from calling transformer.check_continue Return: A tuple containing the result code and result message. One or both of these values in the tuple may be None Notes: A string parameter will always return a result code of None and message of None indicating the caller needs to decide what to do. An integer parameter will cause the result message value of None, the caller needs to decide what an appropriate message is. A parameter that's iterable with a length > 0 will have the first value as the result code and the second value as the result message. No checks are made for type conformity. If the parameter is something other than the above, an exception will most likely be thrown. """ result_code = None result_message = None if isinstance(result, int): result_code = result elif not isinstance(result, str): result_len = len(result) if result_len > 0: result_code = result[0] if result_len > 1: result_message = result[1] return (result_code, result_message)
Parses the result of calling transformer.check_continue and returns the code and/or message Arguments: result: the result from calling transformer.check_continue Return: A tuple containing the result code and result message. One or both of these values in the tuple may be None Notes: A string parameter will always return a result code of None and message of None indicating the caller needs to decide what to do. An integer parameter will cause the result message value of None, the caller needs to decide what an appropriate message is. A parameter that's iterable with a length > 0 will have the first value as the result code and the second value as the result message. No checks are made for type conformity. If the parameter is something other than the above, an exception will most likely be thrown.
Parses the result of calling transformer.check_continue and returns the code and/or message
[ "Parses", "the", "result", "of", "calling", "transformer", ".", "check_continue", "and", "returns", "the", "code", "and", "/", "or", "message" ]
def parse_continue_result(result) -> tuple: result_code = None result_message = None if isinstance(result, int): result_code = result elif not isinstance(result, str): result_len = len(result) if result_len > 0: result_code = result[0] if result_len > 1: result_message = result[1] return (result_code, result_message)
[ "def", "parse_continue_result", "(", "result", ")", "->", "tuple", ":", "result_code", "=", "None", "result_message", "=", "None", "if", "isinstance", "(", "result", ",", "int", ")", ":", "result_code", "=", "result", "elif", "not", "isinstance", "(", "result", ",", "str", ")", ":", "result_len", "=", "len", "(", "result", ")", "if", "result_len", ">", "0", ":", "result_code", "=", "result", "[", "0", "]", "if", "result_len", ">", "1", ":", "result_message", "=", "result", "[", "1", "]", "return", "(", "result_code", ",", "result_message", ")" ]
Parses the result of calling transformer.check_continue and returns the code and/or message
[ "Parses", "the", "result", "of", "calling", "transformer", ".", "check_continue", "and", "returns", "the", "code", "and", "/", "or", "message" ]
[ "\"\"\"Parses the result of calling transformer.check_continue and returns\n the code and/or message\n Arguments:\n result: the result from calling transformer.check_continue\n Return:\n A tuple containing the result code and result message. One or both of these\n values in the tuple may be None\n Notes:\n A string parameter will always return a result code of None and message of None indicating\n the caller needs to decide what to do.\n An integer parameter will cause the result message value of None, the caller needs to decide\n what an appropriate message is.\n A parameter that's iterable with a length > 0 will have the first value as the result code and the\n second value as the result message. No checks are made for type conformity.\n If the parameter is something other than the above, an exception will most likely be thrown.\n \"\"\"" ]
[ { "param": "result", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "result", "type": null, "docstring": "the result from calling transformer.check_continue", "docstring_tokens": [ "the", "result", "from", "calling", "transformer", ".", "check_continue" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def parse_continue_result(result) -> tuple: result_code = None result_message = None if isinstance(result, int): result_code = result elif not isinstance(result, str): result_len = len(result) if result_len > 0: result_code = result[0] if result_len > 1: result_message = result[1] return (result_code, result_message)
611
215
9571ed204c059e19f1275e4dea3bb4ba7b0cd016
darrylmelander/Egret
egret/model_library/transmission/tx_utils.py
[ "BSD-3-Clause" ]
Python
load_shed_limit
<not_specific>
def load_shed_limit(load, gens, gen_mins): ''' Calculates the maximum amount of load shedding given a load and set of generators with associated minimum outputs ''' max_load_shed = 0. if load > 0.: max_load_shed += load for g in gens: g_min = gen_mins[g] if g_min < 0: max_load_shed += -g_min return max_load_shed
Calculates the maximum amount of load shedding given a load and set of generators with associated minimum outputs
Calculates the maximum amount of load shedding given a load and set of generators with associated minimum outputs
[ "Calculates", "the", "maximum", "amount", "of", "load", "shedding", "given", "a", "load", "and", "set", "of", "generators", "with", "associated", "minimum", "outputs" ]
def load_shed_limit(load, gens, gen_mins): max_load_shed = 0. if load > 0.: max_load_shed += load for g in gens: g_min = gen_mins[g] if g_min < 0: max_load_shed += -g_min return max_load_shed
[ "def", "load_shed_limit", "(", "load", ",", "gens", ",", "gen_mins", ")", ":", "max_load_shed", "=", "0.", "if", "load", ">", "0.", ":", "max_load_shed", "+=", "load", "for", "g", "in", "gens", ":", "g_min", "=", "gen_mins", "[", "g", "]", "if", "g_min", "<", "0", ":", "max_load_shed", "+=", "-", "g_min", "return", "max_load_shed" ]
Calculates the maximum amount of load shedding given a load and set of generators with associated minimum outputs
[ "Calculates", "the", "maximum", "amount", "of", "load", "shedding", "given", "a", "load", "and", "set", "of", "generators", "with", "associated", "minimum", "outputs" ]
[ "'''\n Calculates the maximum amount of load shedding\n given a load and set of generators with\n associated minimum outputs\n '''" ]
[ { "param": "load", "type": null }, { "param": "gens", "type": null }, { "param": "gen_mins", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "load", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "gens", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "gen_mins", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def load_shed_limit(load, gens, gen_mins): max_load_shed = 0. if load > 0.: max_load_shed += load for g in gens: g_min = gen_mins[g] if g_min < 0: max_load_shed += -g_min return max_load_shed
612
415
e617f4b85c6c010f456c23518e0655a45797c8ec
haiwangyang/demultiplexer
demultiplexer/download.py
[ "MIT" ]
Python
fetch_binary_file_from_helix_ftp
null
def fetch_binary_file_from_helix_ftp(filename): """ get binary file such as bam """ data = urllib.request.urlopen("ftp://helix.nih.gov/pub/haiwang/" + filename) # check if the output dir exist, if not create it outputdir = "data" try: os.stat(outputdir) except: os.mkdir(outputdir) with open(outputdir + "/" + filename, 'wb') as f: for line in data: f.write(line)
get binary file such as bam
get binary file such as bam
[ "get", "binary", "file", "such", "as", "bam" ]
def fetch_binary_file_from_helix_ftp(filename): data = urllib.request.urlopen("ftp://helix.nih.gov/pub/haiwang/" + filename) outputdir = "data" try: os.stat(outputdir) except: os.mkdir(outputdir) with open(outputdir + "/" + filename, 'wb') as f: for line in data: f.write(line)
[ "def", "fetch_binary_file_from_helix_ftp", "(", "filename", ")", ":", "data", "=", "urllib", ".", "request", ".", "urlopen", "(", "\"ftp://helix.nih.gov/pub/haiwang/\"", "+", "filename", ")", "outputdir", "=", "\"data\"", "try", ":", "os", ".", "stat", "(", "outputdir", ")", "except", ":", "os", ".", "mkdir", "(", "outputdir", ")", "with", "open", "(", "outputdir", "+", "\"/\"", "+", "filename", ",", "'wb'", ")", "as", "f", ":", "for", "line", "in", "data", ":", "f", ".", "write", "(", "line", ")" ]
get binary file such as bam
[ "get", "binary", "file", "such", "as", "bam" ]
[ "\"\"\" \n get binary file such as bam\n \"\"\"", "# check if the output dir exist, if not create it" ]
[ { "param": "filename", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "filename", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import urllib import os def fetch_binary_file_from_helix_ftp(filename): data = urllib.request.urlopen("ftp://helix.nih.gov/pub/haiwang/" + filename) outputdir = "data" try: os.stat(outputdir) except: os.mkdir(outputdir) with open(outputdir + "/" + filename, 'wb') as f: for line in data: f.write(line)
614
186
25690d888224b56169f736dedf133b813b3587bf
usgs-pygsflow/pygsflow
gsflow/utils/gsflow_io.py
[ "Unlicense" ]
Python
_warning
null
def _warning(msg, frame, wtype=UserWarning): """ Method to standardize the warning output in pyGSFLOW and avoid absolute file paths in warning messages Parameters ---------- msg : str error message frame : named tuple named tuple from inspect.getframeinfo wtype : warning type to be displayed defaults to UserWarning """ module = os.path.split(frame.filename)[-1] warnings.warn_explicit(msg, wtype, module, frame.lineno)
Method to standardize the warning output in pyGSFLOW and avoid absolute file paths in warning messages Parameters ---------- msg : str error message frame : named tuple named tuple from inspect.getframeinfo wtype : warning type to be displayed defaults to UserWarning
Method to standardize the warning output in pyGSFLOW and avoid absolute file paths in warning messages Parameters msg : str error message frame : named tuple named tuple from inspect.getframeinfo wtype : warning type to be displayed defaults to UserWarning
[ "Method", "to", "standardize", "the", "warning", "output", "in", "pyGSFLOW", "and", "avoid", "absolute", "file", "paths", "in", "warning", "messages", "Parameters", "msg", ":", "str", "error", "message", "frame", ":", "named", "tuple", "named", "tuple", "from", "inspect", ".", "getframeinfo", "wtype", ":", "warning", "type", "to", "be", "displayed", "defaults", "to", "UserWarning" ]
def _warning(msg, frame, wtype=UserWarning): module = os.path.split(frame.filename)[-1] warnings.warn_explicit(msg, wtype, module, frame.lineno)
[ "def", "_warning", "(", "msg", ",", "frame", ",", "wtype", "=", "UserWarning", ")", ":", "module", "=", "os", ".", "path", ".", "split", "(", "frame", ".", "filename", ")", "[", "-", "1", "]", "warnings", ".", "warn_explicit", "(", "msg", ",", "wtype", ",", "module", ",", "frame", ".", "lineno", ")" ]
Method to standardize the warning output in pyGSFLOW and avoid absolute file paths in warning messages
[ "Method", "to", "standardize", "the", "warning", "output", "in", "pyGSFLOW", "and", "avoid", "absolute", "file", "paths", "in", "warning", "messages" ]
[ "\"\"\"\n Method to standardize the warning output in pyGSFLOW and avoid\n absolute file paths in warning messages\n\n Parameters\n ----------\n msg : str\n error message\n frame : named tuple\n named tuple from inspect.getframeinfo\n wtype :\n warning type to be displayed defaults to UserWarning\n\n \"\"\"" ]
[ { "param": "msg", "type": null }, { "param": "frame", "type": null }, { "param": "wtype", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "msg", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "frame", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "wtype", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import warnings import os def _warning(msg, frame, wtype=UserWarning): module = os.path.split(frame.filename)[-1] warnings.warn_explicit(msg, wtype, module, frame.lineno)
616
884
9a5e22fd7321e57946ce8dc3317166ad370e3947
schwark/alfred-smartthings-py
filter.py
[ "MIT" ]
Python
search_key_for_scene
<not_specific>
def search_key_for_scene(scene): """Generate a string search key for a scene""" elements = [] elements.append(scene['sceneName']) # name of scene return u' '.join(elements)
Generate a string search key for a scene
Generate a string search key for a scene
[ "Generate", "a", "string", "search", "key", "for", "a", "scene" ]
def search_key_for_scene(scene): elements = [] elements.append(scene['sceneName']) return u' '.join(elements)
[ "def", "search_key_for_scene", "(", "scene", ")", ":", "elements", "=", "[", "]", "elements", ".", "append", "(", "scene", "[", "'sceneName'", "]", ")", "return", "u' '", ".", "join", "(", "elements", ")" ]
Generate a string search key for a scene
[ "Generate", "a", "string", "search", "key", "for", "a", "scene" ]
[ "\"\"\"Generate a string search key for a scene\"\"\"", "# name of scene" ]
[ { "param": "scene", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "scene", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def search_key_for_scene(scene): elements = [] elements.append(scene['sceneName']) return u' '.join(elements)
617
643
963a2768e41ba100164f32cd0a428e3549021cdb
kobidor12/apm-integration-testing
scripts/modules/service.py
[ "Apache-2.0" ]
Python
add_arguments
null
def add_arguments(cls, parser): """add service-specific command line arguments""" # allow port overrides if hasattr(cls, 'SERVICE_PORT'): parser.add_argument( '--' + cls.name() + '-port', type=int, default=cls.SERVICE_PORT, dest=cls.option_name() + '_port', help="service port" ) parser.add_argument( '--' + cls.name() + '-env-var', action="append", dest=cls.option_name() + "_env_vars", help="arbitrary enviornment variables to set" )
add service-specific command line arguments
add service-specific command line arguments
[ "add", "service", "-", "specific", "command", "line", "arguments" ]
def add_arguments(cls, parser): if hasattr(cls, 'SERVICE_PORT'): parser.add_argument( '--' + cls.name() + '-port', type=int, default=cls.SERVICE_PORT, dest=cls.option_name() + '_port', help="service port" ) parser.add_argument( '--' + cls.name() + '-env-var', action="append", dest=cls.option_name() + "_env_vars", help="arbitrary enviornment variables to set" )
[ "def", "add_arguments", "(", "cls", ",", "parser", ")", ":", "if", "hasattr", "(", "cls", ",", "'SERVICE_PORT'", ")", ":", "parser", ".", "add_argument", "(", "'--'", "+", "cls", ".", "name", "(", ")", "+", "'-port'", ",", "type", "=", "int", ",", "default", "=", "cls", ".", "SERVICE_PORT", ",", "dest", "=", "cls", ".", "option_name", "(", ")", "+", "'_port'", ",", "help", "=", "\"service port\"", ")", "parser", ".", "add_argument", "(", "'--'", "+", "cls", ".", "name", "(", ")", "+", "'-env-var'", ",", "action", "=", "\"append\"", ",", "dest", "=", "cls", ".", "option_name", "(", ")", "+", "\"_env_vars\"", ",", "help", "=", "\"arbitrary enviornment variables to set\"", ")" ]
add service-specific command line arguments
[ "add", "service", "-", "specific", "command", "line", "arguments" ]
[ "\"\"\"add service-specific command line arguments\"\"\"", "# allow port overrides" ]
[ { "param": "cls", "type": null }, { "param": "parser", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "cls", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "parser", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def add_arguments(cls, parser): if hasattr(cls, 'SERVICE_PORT'): parser.add_argument( '--' + cls.name() + '-port', type=int, default=cls.SERVICE_PORT, dest=cls.option_name() + '_port', help="service port" ) parser.add_argument( '--' + cls.name() + '-env-var', action="append", dest=cls.option_name() + "_env_vars", help="arbitrary enviornment variables to set" )
618
849
f8a9760039c4bac9e58c95fc2b05cc2c62a94f8b
moritzploss/tribology
tribology/process_slim_mapper.py
[ "MIT" ]
Python
__distance
<not_specific>
def __distance(c_1, c_2): """ Get the absolute distance between two rgb color tuples in the 3D rgb space. Parameters ---------- c_1: tuple First set of rgb values. c_2: tuple Second set of rgb values. Returns ------- dist: float Absolute distance between color tuples. """ (r_1, g_1, b_1) = c_1 (r_2, g_2, b_2) = c_2 dist = math.sqrt((r_1 - r_2) ** 2 + (g_1 - g_2) ** 2 + (b_1 - b_2) ** 2) return dist
Get the absolute distance between two rgb color tuples in the 3D rgb space. Parameters ---------- c_1: tuple First set of rgb values. c_2: tuple Second set of rgb values. Returns ------- dist: float Absolute distance between color tuples.
Get the absolute distance between two rgb color tuples in the 3D rgb space. Parameters tuple First set of rgb values. c_2: tuple Second set of rgb values. Returns float Absolute distance between color tuples.
[ "Get", "the", "absolute", "distance", "between", "two", "rgb", "color", "tuples", "in", "the", "3D", "rgb", "space", ".", "Parameters", "tuple", "First", "set", "of", "rgb", "values", ".", "c_2", ":", "tuple", "Second", "set", "of", "rgb", "values", ".", "Returns", "float", "Absolute", "distance", "between", "color", "tuples", "." ]
def __distance(c_1, c_2): (r_1, g_1, b_1) = c_1 (r_2, g_2, b_2) = c_2 dist = math.sqrt((r_1 - r_2) ** 2 + (g_1 - g_2) ** 2 + (b_1 - b_2) ** 2) return dist
[ "def", "__distance", "(", "c_1", ",", "c_2", ")", ":", "(", "r_1", ",", "g_1", ",", "b_1", ")", "=", "c_1", "(", "r_2", ",", "g_2", ",", "b_2", ")", "=", "c_2", "dist", "=", "math", ".", "sqrt", "(", "(", "r_1", "-", "r_2", ")", "**", "2", "+", "(", "g_1", "-", "g_2", ")", "**", "2", "+", "(", "b_1", "-", "b_2", ")", "**", "2", ")", "return", "dist" ]
Get the absolute distance between two rgb color tuples in the 3D rgb space.
[ "Get", "the", "absolute", "distance", "between", "two", "rgb", "color", "tuples", "in", "the", "3D", "rgb", "space", "." ]
[ "\"\"\"\n\n Get the absolute distance between two rgb color tuples in the 3D rgb space.\n\n Parameters\n ----------\n c_1: tuple\n First set of rgb values.\n c_2: tuple\n Second set of rgb values.\n\n Returns\n -------\n dist: float\n Absolute distance between color tuples.\n\n \"\"\"" ]
[ { "param": "c_1", "type": null }, { "param": "c_2", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "c_1", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "c_2", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import math def __distance(c_1, c_2): (r_1, g_1, b_1) = c_1 (r_2, g_2, b_2) = c_2 dist = math.sqrt((r_1 - r_2) ** 2 + (g_1 - g_2) ** 2 + (b_1 - b_2) ** 2) return dist
619
805
9f89f446c3196f5daaaaaa3e95f6c7e5770232d1
HumanCompatibleAI/population-irl
pirl/utils.py
[ "MIT" ]
Python
map_nested_dict
<not_specific>
def map_nested_dict(ob, func, init=[], level=1): '''Recurse to level depth in a nested mapping, applying func.''' if len(init) < level: assert isinstance(ob, collections.Mapping) return {k: map_nested_dict(v, func, init + [k], level=level) for k, v in ob.items()} else: return func(ob, init)
Recurse to level depth in a nested mapping, applying func.
Recurse to level depth in a nested mapping, applying func.
[ "Recurse", "to", "level", "depth", "in", "a", "nested", "mapping", "applying", "func", "." ]
def map_nested_dict(ob, func, init=[], level=1): if len(init) < level: assert isinstance(ob, collections.Mapping) return {k: map_nested_dict(v, func, init + [k], level=level) for k, v in ob.items()} else: return func(ob, init)
[ "def", "map_nested_dict", "(", "ob", ",", "func", ",", "init", "=", "[", "]", ",", "level", "=", "1", ")", ":", "if", "len", "(", "init", ")", "<", "level", ":", "assert", "isinstance", "(", "ob", ",", "collections", ".", "Mapping", ")", "return", "{", "k", ":", "map_nested_dict", "(", "v", ",", "func", ",", "init", "+", "[", "k", "]", ",", "level", "=", "level", ")", "for", "k", ",", "v", "in", "ob", ".", "items", "(", ")", "}", "else", ":", "return", "func", "(", "ob", ",", "init", ")" ]
Recurse to level depth in a nested mapping, applying func.
[ "Recurse", "to", "level", "depth", "in", "a", "nested", "mapping", "applying", "func", "." ]
[ "'''Recurse to level depth in a nested mapping, applying func.'''" ]
[ { "param": "ob", "type": null }, { "param": "func", "type": null }, { "param": "init", "type": null }, { "param": "level", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "ob", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "func", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "init", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "level", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import collections def map_nested_dict(ob, func, init=[], level=1): if len(init) < level: assert isinstance(ob, collections.Mapping) return {k: map_nested_dict(v, func, init + [k], level=level) for k, v in ob.items()} else: return func(ob, init)
620
703
cd0f8a4c1fccb2c3b1254e9dc423deb66ecc4c41
Packer-Lab/packerlabimaging
src/packerlabimaging/utils/utils.py
[ "MIT" ]
Python
myround
<not_specific>
def myround(x, base=5): '''allow rounding to nearest base number for use with multiplane stack slicing''' return base * round(x / base)
allow rounding to nearest base number for use with multiplane stack slicing
allow rounding to nearest base number for use with multiplane stack slicing
[ "allow", "rounding", "to", "nearest", "base", "number", "for", "use", "with", "multiplane", "stack", "slicing" ]
def myround(x, base=5): return base * round(x / base)
[ "def", "myround", "(", "x", ",", "base", "=", "5", ")", ":", "return", "base", "*", "round", "(", "x", "/", "base", ")" ]
allow rounding to nearest base number for use with multiplane stack slicing
[ "allow", "rounding", "to", "nearest", "base", "number", "for", "use", "with", "multiplane", "stack", "slicing" ]
[ "'''allow rounding to nearest base number for\n use with multiplane stack slicing'''" ]
[ { "param": "x", "type": null }, { "param": "base", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "x", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "base", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def myround(x, base=5): return base * round(x / base)
622
960
668c91e6240b1176455a52ba46772df983f8d934
geeksville/project-birdfeeder
bird_classify.py
[ "Apache-2.0" ]
Python
print_results
null
def print_results(start_time, last_time, end_time, results): """Print results to terminal for debugging.""" inference_rate = ((end_time - start_time) * 1000) fps = (1.0/(end_time - last_time)) print('\nInference: %.2f ms, FPS: %.2f fps' % (inference_rate, fps)) for label, score in results: print(' %s, score=%.2f' %(label, score))
Print results to terminal for debugging.
Print results to terminal for debugging.
[ "Print", "results", "to", "terminal", "for", "debugging", "." ]
def print_results(start_time, last_time, end_time, results): inference_rate = ((end_time - start_time) * 1000) fps = (1.0/(end_time - last_time)) print('\nInference: %.2f ms, FPS: %.2f fps' % (inference_rate, fps)) for label, score in results: print(' %s, score=%.2f' %(label, score))
[ "def", "print_results", "(", "start_time", ",", "last_time", ",", "end_time", ",", "results", ")", ":", "inference_rate", "=", "(", "(", "end_time", "-", "start_time", ")", "*", "1000", ")", "fps", "=", "(", "1.0", "/", "(", "end_time", "-", "last_time", ")", ")", "print", "(", "'\\nInference: %.2f ms, FPS: %.2f fps'", "%", "(", "inference_rate", ",", "fps", ")", ")", "for", "label", ",", "score", "in", "results", ":", "print", "(", "' %s, score=%.2f'", "%", "(", "label", ",", "score", ")", ")" ]
Print results to terminal for debugging.
[ "Print", "results", "to", "terminal", "for", "debugging", "." ]
[ "\"\"\"Print results to terminal for debugging.\"\"\"" ]
[ { "param": "start_time", "type": null }, { "param": "last_time", "type": null }, { "param": "end_time", "type": null }, { "param": "results", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "start_time", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "last_time", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "end_time", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "results", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def print_results(start_time, last_time, end_time, results): inference_rate = ((end_time - start_time) * 1000) fps = (1.0/(end_time - last_time)) print('\nInference: %.2f ms, FPS: %.2f fps' % (inference_rate, fps)) for label, score in results: print(' %s, score=%.2f' %(label, score))
623
406
ed3d6536a2de4484bb3b063ad8f86242f903aef6
bmd3k/tensorboard
tensorboard/backend/process_graph.py
[ "Apache-2.0" ]
Python
prepare_graph_for_ui
null
def prepare_graph_for_ui( graph, limit_attr_size=1024, large_attrs_key="_too_large_attrs" ): """Prepares (modifies in-place) the graph to be served to the front-end. For now, it supports filtering out attributes that are too large to be shown in the graph UI. Args: graph: The GraphDef proto message. limit_attr_size: Maximum allowed size in bytes, before the attribute is considered large. Default is 1024 (1KB). Must be > 0 or None. If None, there will be no filtering. large_attrs_key: The attribute key that will be used for storing attributes that are too large. Default is '_too_large_attrs'. Must be != None if `limit_attr_size` is != None. Raises: ValueError: If `large_attrs_key is None` while `limit_attr_size != None`. ValueError: If `limit_attr_size` is defined, but <= 0. """ # TODO(@davidsoergel): detect whether a graph has been filtered already # (to a limit_attr_size <= what is requested here). If it is already # filtered, return immediately. # Check input for validity. if limit_attr_size is not None: if large_attrs_key is None: raise ValueError( "large_attrs_key must be != None when limit_attr_size" "!= None." ) if limit_attr_size <= 0: raise ValueError( "limit_attr_size must be > 0, but is %d" % limit_attr_size ) # Filter only if a limit size is defined. if limit_attr_size is not None: for node in graph.node: # Go through all the attributes and filter out ones bigger than the # limit. keys = list(node.attr.keys()) for key in keys: size = node.attr[key].ByteSize() if size > limit_attr_size or size < 0: del node.attr[key] # Add the attribute key to the list of "too large" attributes. # This is used in the info card in the graph UI to show the user # that some attributes are too large to be shown. node.attr[large_attrs_key].list.s.append( key.encode("utf-8") )
Prepares (modifies in-place) the graph to be served to the front-end. For now, it supports filtering out attributes that are too large to be shown in the graph UI. Args: graph: The GraphDef proto message. limit_attr_size: Maximum allowed size in bytes, before the attribute is considered large. Default is 1024 (1KB). Must be > 0 or None. If None, there will be no filtering. large_attrs_key: The attribute key that will be used for storing attributes that are too large. Default is '_too_large_attrs'. Must be != None if `limit_attr_size` is != None. Raises: ValueError: If `large_attrs_key is None` while `limit_attr_size != None`. ValueError: If `limit_attr_size` is defined, but <= 0.
Prepares (modifies in-place) the graph to be served to the front-end. For now, it supports filtering out attributes that are too large to be shown in the graph UI.
[ "Prepares", "(", "modifies", "in", "-", "place", ")", "the", "graph", "to", "be", "served", "to", "the", "front", "-", "end", ".", "For", "now", "it", "supports", "filtering", "out", "attributes", "that", "are", "too", "large", "to", "be", "shown", "in", "the", "graph", "UI", "." ]
def prepare_graph_for_ui( graph, limit_attr_size=1024, large_attrs_key="_too_large_attrs" ): if limit_attr_size is not None: if large_attrs_key is None: raise ValueError( "large_attrs_key must be != None when limit_attr_size" "!= None." ) if limit_attr_size <= 0: raise ValueError( "limit_attr_size must be > 0, but is %d" % limit_attr_size ) if limit_attr_size is not None: for node in graph.node: keys = list(node.attr.keys()) for key in keys: size = node.attr[key].ByteSize() if size > limit_attr_size or size < 0: del node.attr[key] node.attr[large_attrs_key].list.s.append( key.encode("utf-8") )
[ "def", "prepare_graph_for_ui", "(", "graph", ",", "limit_attr_size", "=", "1024", ",", "large_attrs_key", "=", "\"_too_large_attrs\"", ")", ":", "if", "limit_attr_size", "is", "not", "None", ":", "if", "large_attrs_key", "is", "None", ":", "raise", "ValueError", "(", "\"large_attrs_key must be != None when limit_attr_size\"", "\"!= None.\"", ")", "if", "limit_attr_size", "<=", "0", ":", "raise", "ValueError", "(", "\"limit_attr_size must be > 0, but is %d\"", "%", "limit_attr_size", ")", "if", "limit_attr_size", "is", "not", "None", ":", "for", "node", "in", "graph", ".", "node", ":", "keys", "=", "list", "(", "node", ".", "attr", ".", "keys", "(", ")", ")", "for", "key", "in", "keys", ":", "size", "=", "node", ".", "attr", "[", "key", "]", ".", "ByteSize", "(", ")", "if", "size", ">", "limit_attr_size", "or", "size", "<", "0", ":", "del", "node", ".", "attr", "[", "key", "]", "node", ".", "attr", "[", "large_attrs_key", "]", ".", "list", ".", "s", ".", "append", "(", "key", ".", "encode", "(", "\"utf-8\"", ")", ")" ]
Prepares (modifies in-place) the graph to be served to the front-end.
[ "Prepares", "(", "modifies", "in", "-", "place", ")", "the", "graph", "to", "be", "served", "to", "the", "front", "-", "end", "." ]
[ "\"\"\"Prepares (modifies in-place) the graph to be served to the front-end.\n\n For now, it supports filtering out attributes that are\n too large to be shown in the graph UI.\n\n Args:\n graph: The GraphDef proto message.\n limit_attr_size: Maximum allowed size in bytes, before the attribute\n is considered large. Default is 1024 (1KB). Must be > 0 or None.\n If None, there will be no filtering.\n large_attrs_key: The attribute key that will be used for storing attributes\n that are too large. Default is '_too_large_attrs'. Must be != None if\n `limit_attr_size` is != None.\n\n Raises:\n ValueError: If `large_attrs_key is None` while `limit_attr_size != None`.\n ValueError: If `limit_attr_size` is defined, but <= 0.\n \"\"\"", "# TODO(@davidsoergel): detect whether a graph has been filtered already", "# (to a limit_attr_size <= what is requested here). If it is already", "# filtered, return immediately.", "# Check input for validity.", "# Filter only if a limit size is defined.", "# Go through all the attributes and filter out ones bigger than the", "# limit.", "# Add the attribute key to the list of \"too large\" attributes.", "# This is used in the info card in the graph UI to show the user", "# that some attributes are too large to be shown." ]
[ { "param": "graph", "type": null }, { "param": "limit_attr_size", "type": null }, { "param": "large_attrs_key", "type": null } ]
{ "returns": [], "raises": [ { "docstring": "If `large_attrs_key is None` while `limit_attr_size != None`.", "docstring_tokens": [ "If", "`", "large_attrs_key", "is", "None", "`", "while", "`", "limit_attr_size", "!", "=", "None", "`", "." ], "type": "ValueError" }, { "docstring": "If `limit_attr_size` is defined, but <= 0.", "docstring_tokens": [ "If", "`", "limit_attr_size", "`", "is", "defined", "but", "<", "=", "0", "." ], "type": "ValueError" } ], "params": [ { "identifier": "graph", "type": null, "docstring": "The GraphDef proto message.", "docstring_tokens": [ "The", "GraphDef", "proto", "message", "." ], "default": null, "is_optional": null }, { "identifier": "limit_attr_size", "type": null, "docstring": "Maximum allowed size in bytes, before the attribute\nis considered large.", "docstring_tokens": [ "Maximum", "allowed", "size", "in", "bytes", "before", "the", "attribute", "is", "considered", "large", "." ], "default": null, "is_optional": null }, { "identifier": "large_attrs_key", "type": null, "docstring": "The attribute key that will be used for storing attributes\nthat are too large. Default is '_too_large_attrs'. Must be != None if\n`limit_attr_size` is != None.", "docstring_tokens": [ "The", "attribute", "key", "that", "will", "be", "used", "for", "storing", "attributes", "that", "are", "too", "large", ".", "Default", "is", "'", "_too_large_attrs", "'", ".", "Must", "be", "!", "=", "None", "if", "`", "limit_attr_size", "`", "is", "!", "=", "None", "." ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def prepare_graph_for_ui( graph, limit_attr_size=1024, large_attrs_key="_too_large_attrs" ): if limit_attr_size is not None: if large_attrs_key is None: raise ValueError( "large_attrs_key must be != None when limit_attr_size" "!= None." ) if limit_attr_size <= 0: raise ValueError( "limit_attr_size must be > 0, but is %d" % limit_attr_size ) if limit_attr_size is not None: for node in graph.node: keys = list(node.attr.keys()) for key in keys: size = node.attr[key].ByteSize() if size > limit_attr_size or size < 0: del node.attr[key] node.attr[large_attrs_key].list.s.append( key.encode("utf-8") )
624
1,005
553908da16016ef8e1782cf4dcdab3c6f9900c28
matheharry/MicroPython-For-micro-bit
Part_7_Unittest/0014_escape_room/FileManager.py
[ "Apache-2.0" ]
Python
clear_inventory_file
null
def clear_inventory_file(): """ Method to clear inventory file after winning a game """ try: with open('inventory', 'w') as file: file.write('') except OSError: pass
Method to clear inventory file after winning a game
Method to clear inventory file after winning a game
[ "Method", "to", "clear", "inventory", "file", "after", "winning", "a", "game" ]
def clear_inventory_file(): try: with open('inventory', 'w') as file: file.write('') except OSError: pass
[ "def", "clear_inventory_file", "(", ")", ":", "try", ":", "with", "open", "(", "'inventory'", ",", "'w'", ")", "as", "file", ":", "file", ".", "write", "(", "''", ")", "except", "OSError", ":", "pass" ]
Method to clear inventory file after winning a game
[ "Method", "to", "clear", "inventory", "file", "after", "winning", "a", "game" ]
[ "\"\"\"\n Method to clear inventory file after winning a game\n \"\"\"" ]
[]
{ "returns": [], "raises": [], "params": [], "outlier_params": [], "others": [] }
def clear_inventory_file(): try: with open('inventory', 'w') as file: file.write('') except OSError: pass
625
883
d676cc0df4a8fc2232e81a14ab16cba758b87cde
mboudet/python-chado
chakin/commands/feature/load_go.py
[ "MIT" ]
Python
cli
<not_specific>
def cli(ctx, input, organism_id, analysis_id, query_type="polypeptide", match_on_name=False, name_column=2, go_column=5, re_name="", skip_missing=False): """Load GO annotation from a tabular file Output: Number of inserted GO terms """ return ctx.gi.feature.load_go(input, organism_id, analysis_id, query_type=query_type, match_on_name=match_on_name, name_column=name_column, go_column=go_column, re_name=re_name, skip_missing=skip_missing)
Load GO annotation from a tabular file Output: Number of inserted GO terms
Load GO annotation from a tabular file Output. Number of inserted GO terms
[ "Load", "GO", "annotation", "from", "a", "tabular", "file", "Output", ".", "Number", "of", "inserted", "GO", "terms" ]
def cli(ctx, input, organism_id, analysis_id, query_type="polypeptide", match_on_name=False, name_column=2, go_column=5, re_name="", skip_missing=False): return ctx.gi.feature.load_go(input, organism_id, analysis_id, query_type=query_type, match_on_name=match_on_name, name_column=name_column, go_column=go_column, re_name=re_name, skip_missing=skip_missing)
[ "def", "cli", "(", "ctx", ",", "input", ",", "organism_id", ",", "analysis_id", ",", "query_type", "=", "\"polypeptide\"", ",", "match_on_name", "=", "False", ",", "name_column", "=", "2", ",", "go_column", "=", "5", ",", "re_name", "=", "\"\"", ",", "skip_missing", "=", "False", ")", ":", "return", "ctx", ".", "gi", ".", "feature", ".", "load_go", "(", "input", ",", "organism_id", ",", "analysis_id", ",", "query_type", "=", "query_type", ",", "match_on_name", "=", "match_on_name", ",", "name_column", "=", "name_column", ",", "go_column", "=", "go_column", ",", "re_name", "=", "re_name", ",", "skip_missing", "=", "skip_missing", ")" ]
Load GO annotation from a tabular file Output:
[ "Load", "GO", "annotation", "from", "a", "tabular", "file", "Output", ":" ]
[ "\"\"\"Load GO annotation from a tabular file\n\nOutput:\n\n Number of inserted GO terms\n \"\"\"" ]
[ { "param": "ctx", "type": null }, { "param": "input", "type": null }, { "param": "organism_id", "type": null }, { "param": "analysis_id", "type": null }, { "param": "query_type", "type": null }, { "param": "match_on_name", "type": null }, { "param": "name_column", "type": null }, { "param": "go_column", "type": null }, { "param": "re_name", "type": null }, { "param": "skip_missing", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "ctx", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "input", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "organism_id", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "analysis_id", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "query_type", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "match_on_name", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "name_column", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "go_column", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "re_name", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "skip_missing", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def cli(ctx, input, organism_id, analysis_id, query_type="polypeptide", match_on_name=False, name_column=2, go_column=5, re_name="", skip_missing=False): return ctx.gi.feature.load_go(input, organism_id, analysis_id, query_type=query_type, match_on_name=match_on_name, name_column=name_column, go_column=go_column, re_name=re_name, skip_missing=skip_missing)
627
947
bb0f89b12c174fe0dd6d5d2e22aee3c3365cc7bf
Pyabecedarian/Algorithms-and-Data-Structures-using-Python
Stage_3/Task12_Recursion_Backtracking_Divide&Conquer/eight_queen_puzzle.py
[ "MIT" ]
Python
conflict
bool
def conflict(next_x: int, s: tuple) -> bool: """Return a boolean that defines the conflict condition of the next queen's position""" next_i = len(s) for i in range(next_i): if abs(s[i] - next_x) in (0, next_i - i): return True else: return False
Return a boolean that defines the conflict condition of the next queen's position
Return a boolean that defines the conflict condition of the next queen's position
[ "Return", "a", "boolean", "that", "defines", "the", "conflict", "condition", "of", "the", "next", "queen", "'", "s", "position" ]
def conflict(next_x: int, s: tuple) -> bool: next_i = len(s) for i in range(next_i): if abs(s[i] - next_x) in (0, next_i - i): return True else: return False
[ "def", "conflict", "(", "next_x", ":", "int", ",", "s", ":", "tuple", ")", "->", "bool", ":", "next_i", "=", "len", "(", "s", ")", "for", "i", "in", "range", "(", "next_i", ")", ":", "if", "abs", "(", "s", "[", "i", "]", "-", "next_x", ")", "in", "(", "0", ",", "next_i", "-", "i", ")", ":", "return", "True", "else", ":", "return", "False" ]
Return a boolean that defines the conflict condition of the next queen's position
[ "Return", "a", "boolean", "that", "defines", "the", "conflict", "condition", "of", "the", "next", "queen", "'", "s", "position" ]
[ "\"\"\"Return a boolean that defines the conflict condition of the next queen's position\"\"\"" ]
[ { "param": "next_x", "type": "int" }, { "param": "s", "type": "tuple" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "next_x", "type": "int", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "s", "type": "tuple", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def conflict(next_x: int, s: tuple) -> bool: next_i = len(s) for i in range(next_i): if abs(s[i] - next_x) in (0, next_i - i): return True else: return False
628
165
59529111b585bf724573b30b03bbf9f30dd0b766
cffbots/ESMValTool
esmvaltool/diag_scripts/climate_metrics/feedback_parameters.py
[ "Apache-2.0" ]
Python
_check_array_shapes
null
def _check_array_shapes(list_of_arrays, var): """Check if list of arrays has identical shapes.""" shapes = {a.shape for a in list_of_arrays} if len(shapes) > 1: raise ValueError( f"Expected cubes with identical shapes for multi-model mean " f"calculation of '{var}', got {shapes}")
Check if list of arrays has identical shapes.
Check if list of arrays has identical shapes.
[ "Check", "if", "list", "of", "arrays", "has", "identical", "shapes", "." ]
def _check_array_shapes(list_of_arrays, var): shapes = {a.shape for a in list_of_arrays} if len(shapes) > 1: raise ValueError( f"Expected cubes with identical shapes for multi-model mean " f"calculation of '{var}', got {shapes}")
[ "def", "_check_array_shapes", "(", "list_of_arrays", ",", "var", ")", ":", "shapes", "=", "{", "a", ".", "shape", "for", "a", "in", "list_of_arrays", "}", "if", "len", "(", "shapes", ")", ">", "1", ":", "raise", "ValueError", "(", "f\"Expected cubes with identical shapes for multi-model mean \"", "f\"calculation of '{var}', got {shapes}\"", ")" ]
Check if list of arrays has identical shapes.
[ "Check", "if", "list", "of", "arrays", "has", "identical", "shapes", "." ]
[ "\"\"\"Check if list of arrays has identical shapes.\"\"\"" ]
[ { "param": "list_of_arrays", "type": null }, { "param": "var", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "list_of_arrays", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "var", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _check_array_shapes(list_of_arrays, var): shapes = {a.shape for a in list_of_arrays} if len(shapes) > 1: raise ValueError( f"Expected cubes with identical shapes for multi-model mean " f"calculation of '{var}', got {shapes}")
630
228
01f6fe315def9cd9c6603fb00279f443486e9167
jrmwng/dldt
model-optimizer/extensions/front/kaldi/replace_lstm_node_pattern.py
[ "Apache-2.0" ]
Python
unique_id
str
def unique_id(prefix: str = 'id') -> str: """ Generates a unique id The optional string prefix can be specified. """ index = len(unique_id.names) name = prefix while name in unique_id.names: name = '{}_{}'.format(prefix, index) index += 1 unique_id.names.append(name) return name
Generates a unique id The optional string prefix can be specified.
Generates a unique id The optional string prefix can be specified.
[ "Generates", "a", "unique", "id", "The", "optional", "string", "prefix", "can", "be", "specified", "." ]
def unique_id(prefix: str = 'id') -> str: index = len(unique_id.names) name = prefix while name in unique_id.names: name = '{}_{}'.format(prefix, index) index += 1 unique_id.names.append(name) return name
[ "def", "unique_id", "(", "prefix", ":", "str", "=", "'id'", ")", "->", "str", ":", "index", "=", "len", "(", "unique_id", ".", "names", ")", "name", "=", "prefix", "while", "name", "in", "unique_id", ".", "names", ":", "name", "=", "'{}_{}'", ".", "format", "(", "prefix", ",", "index", ")", "index", "+=", "1", "unique_id", ".", "names", ".", "append", "(", "name", ")", "return", "name" ]
Generates a unique id The optional string prefix can be specified.
[ "Generates", "a", "unique", "id", "The", "optional", "string", "prefix", "can", "be", "specified", "." ]
[ "\"\"\"\n Generates a unique id\n The optional string prefix can be specified.\n \"\"\"" ]
[ { "param": "prefix", "type": "str" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "prefix", "type": "str", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def unique_id(prefix: str = 'id') -> str: index = len(unique_id.names) name = prefix while name in unique_id.names: name = '{}_{}'.format(prefix, index) index += 1 unique_id.names.append(name) return name
632
733
bdb7d4e04ef2d9ef6d04fd494798d70b3ec85785
aarsanjani/wo
train_deploy.py
[ "Apache-2.0" ]
Python
parse_args
<not_specific>
def parse_args(): """ Parse arguments passed from the SageMaker API to the container """ parser = argparse.ArgumentParser() # Hyperparameters sent by the client are passed as command-line arguments to the script parser.add_argument('--num_round', type=int, default=5) parser.add_argument('--max_depth', type=int, default=5) parser.add_argument('--eta', type=float, default=0.2) parser.add_argument('--objective', type=str, default='reg:squarederror') # Data directories parser.add_argument('--train', type=str, default=os.environ.get('SM_CHANNEL_TRAIN')) parser.add_argument('--test', type=str, default=os.environ.get('SM_CHANNEL_TEST')) # Model directory: we will use the default set by SageMaker, /opt/ml/model parser.add_argument('--model_dir', type=str, default=os.environ.get('SM_MODEL_DIR')) return parser.parse_known_args()
Parse arguments passed from the SageMaker API to the container
Parse arguments passed from the SageMaker API to the container
[ "Parse", "arguments", "passed", "from", "the", "SageMaker", "API", "to", "the", "container" ]
def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('--num_round', type=int, default=5) parser.add_argument('--max_depth', type=int, default=5) parser.add_argument('--eta', type=float, default=0.2) parser.add_argument('--objective', type=str, default='reg:squarederror') parser.add_argument('--train', type=str, default=os.environ.get('SM_CHANNEL_TRAIN')) parser.add_argument('--test', type=str, default=os.environ.get('SM_CHANNEL_TEST')) parser.add_argument('--model_dir', type=str, default=os.environ.get('SM_MODEL_DIR')) return parser.parse_known_args()
[ "def", "parse_args", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", ")", "parser", ".", "add_argument", "(", "'--num_round'", ",", "type", "=", "int", ",", "default", "=", "5", ")", "parser", ".", "add_argument", "(", "'--max_depth'", ",", "type", "=", "int", ",", "default", "=", "5", ")", "parser", ".", "add_argument", "(", "'--eta'", ",", "type", "=", "float", ",", "default", "=", "0.2", ")", "parser", ".", "add_argument", "(", "'--objective'", ",", "type", "=", "str", ",", "default", "=", "'reg:squarederror'", ")", "parser", ".", "add_argument", "(", "'--train'", ",", "type", "=", "str", ",", "default", "=", "os", ".", "environ", ".", "get", "(", "'SM_CHANNEL_TRAIN'", ")", ")", "parser", ".", "add_argument", "(", "'--test'", ",", "type", "=", "str", ",", "default", "=", "os", ".", "environ", ".", "get", "(", "'SM_CHANNEL_TEST'", ")", ")", "parser", ".", "add_argument", "(", "'--model_dir'", ",", "type", "=", "str", ",", "default", "=", "os", ".", "environ", ".", "get", "(", "'SM_MODEL_DIR'", ")", ")", "return", "parser", ".", "parse_known_args", "(", ")" ]
Parse arguments passed from the SageMaker API to the container
[ "Parse", "arguments", "passed", "from", "the", "SageMaker", "API", "to", "the", "container" ]
[ "\"\"\"\n Parse arguments passed from the SageMaker API\n to the container\n \"\"\"", "# Hyperparameters sent by the client are passed as command-line arguments to the script", "# Data directories", "# Model directory: we will use the default set by SageMaker, /opt/ml/model" ]
[]
{ "returns": [], "raises": [], "params": [], "outlier_params": [], "others": [] }
import argparse import os def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('--num_round', type=int, default=5) parser.add_argument('--max_depth', type=int, default=5) parser.add_argument('--eta', type=float, default=0.2) parser.add_argument('--objective', type=str, default='reg:squarederror') parser.add_argument('--train', type=str, default=os.environ.get('SM_CHANNEL_TRAIN')) parser.add_argument('--test', type=str, default=os.environ.get('SM_CHANNEL_TEST')) parser.add_argument('--model_dir', type=str, default=os.environ.get('SM_MODEL_DIR')) return parser.parse_known_args()
633
810
2334c02ffe6f98125824947fe3815d103e62fb57
wjn1996/Mathematical-Knowledge-Entity-Recognition
data.py
[ "Apache-2.0" ]
Python
read_corpus
<not_specific>
def read_corpus(corpus_path): """ read corpus and return the list of samples :param corpus_path: :return: data """ data = [] with open(corpus_path, encoding='utf-8') as fr: lines = fr.readlines() sent_, tag_ = [], [] for line in lines: if line != '\n': # [char, label] = line.split(' ') [char, label] = line.replace('\n','').split(' ') sent_.append(char) tag_.append(label) else: data.append((sent_, tag_)) sent_, tag_ = [], [] return data
read corpus and return the list of samples :param corpus_path: :return: data
read corpus and return the list of samples
[ "read", "corpus", "and", "return", "the", "list", "of", "samples" ]
def read_corpus(corpus_path): data = [] with open(corpus_path, encoding='utf-8') as fr: lines = fr.readlines() sent_, tag_ = [], [] for line in lines: if line != '\n': [char, label] = line.replace('\n','').split(' ') sent_.append(char) tag_.append(label) else: data.append((sent_, tag_)) sent_, tag_ = [], [] return data
[ "def", "read_corpus", "(", "corpus_path", ")", ":", "data", "=", "[", "]", "with", "open", "(", "corpus_path", ",", "encoding", "=", "'utf-8'", ")", "as", "fr", ":", "lines", "=", "fr", ".", "readlines", "(", ")", "sent_", ",", "tag_", "=", "[", "]", ",", "[", "]", "for", "line", "in", "lines", ":", "if", "line", "!=", "'\\n'", ":", "[", "char", ",", "label", "]", "=", "line", ".", "replace", "(", "'\\n'", ",", "''", ")", ".", "split", "(", "' '", ")", "sent_", ".", "append", "(", "char", ")", "tag_", ".", "append", "(", "label", ")", "else", ":", "data", ".", "append", "(", "(", "sent_", ",", "tag_", ")", ")", "sent_", ",", "tag_", "=", "[", "]", ",", "[", "]", "return", "data" ]
read corpus and return the list of samples
[ "read", "corpus", "and", "return", "the", "list", "of", "samples" ]
[ "\"\"\"\n read corpus and return the list of samples\n :param corpus_path:\n :return: data\n \"\"\"", "# [char, label] = line.split(' ')" ]
[ { "param": "corpus_path", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": null } ], "raises": [], "params": [ { "identifier": "corpus_path", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def read_corpus(corpus_path): data = [] with open(corpus_path, encoding='utf-8') as fr: lines = fr.readlines() sent_, tag_ = [], [] for line in lines: if line != '\n': [char, label] = line.replace('\n','').split(' ') sent_.append(char) tag_.append(label) else: data.append((sent_, tag_)) sent_, tag_ = [], [] return data
634
837
ba7ecaf4a72c09b195ee345b22ae4f2d3cd96da3
Davinci137/doubleTrouble
helpers.py
[ "MIT" ]
Python
hash_file
<not_specific>
def hash_file(filename, hashtype): """ This function returns a defined hash of a file :param filename: full path to file :type filename: string :param hashtype: The hashing function which should be used. It has to be defined in hashlib :type hashtype: String :return type: String representation of a hex number """ # make a hash object. h = eval('hashlib.{}()'.format(hashtype)) # open file for reading in binary mode with open(filename,'rb') as file: # loop till the end of the file chunk = 0 while chunk != b'': # read only 1024 bytes at a time chunk = file.read(1024) h.update(chunk) # return the hex representation of digest return h.hexdigest()
This function returns a defined hash of a file :param filename: full path to file :type filename: string :param hashtype: The hashing function which should be used. It has to be defined in hashlib :type hashtype: String :return type: String representation of a hex number
This function returns a defined hash of a file
[ "This", "function", "returns", "a", "defined", "hash", "of", "a", "file" ]
def hash_file(filename, hashtype): h = eval('hashlib.{}()'.format(hashtype)) with open(filename,'rb') as file: chunk = 0 while chunk != b'': chunk = file.read(1024) h.update(chunk) return h.hexdigest()
[ "def", "hash_file", "(", "filename", ",", "hashtype", ")", ":", "h", "=", "eval", "(", "'hashlib.{}()'", ".", "format", "(", "hashtype", ")", ")", "with", "open", "(", "filename", ",", "'rb'", ")", "as", "file", ":", "chunk", "=", "0", "while", "chunk", "!=", "b''", ":", "chunk", "=", "file", ".", "read", "(", "1024", ")", "h", ".", "update", "(", "chunk", ")", "return", "h", ".", "hexdigest", "(", ")" ]
This function returns a defined hash of a file
[ "This", "function", "returns", "a", "defined", "hash", "of", "a", "file" ]
[ "\"\"\"\n This function returns a defined hash of a file\n \n :param filename: full path to file\n :type filename: string\n :param hashtype: The hashing function which should be used. It has to be defined in hashlib\n :type hashtype: String\n :return type: String representation of a hex number\n\n \"\"\"", "# make a hash object.", "# open file for reading in binary mode", "# loop till the end of the file", "# read only 1024 bytes at a time", "# return the hex representation of digest" ]
[ { "param": "filename", "type": null }, { "param": "hashtype", "type": null } ]
{ "returns": [ { "docstring": "String representation of a hex number", "docstring_tokens": [ "String", "representation", "of", "a", "hex", "number" ], "type": "type" } ], "raises": [], "params": [ { "identifier": "filename", "type": null, "docstring": "full path to file", "docstring_tokens": [ "full", "path", "to", "file" ], "default": null, "is_optional": null }, { "identifier": "hashtype", "type": null, "docstring": "The hashing function which should be used. It has to be defined in hashlib", "docstring_tokens": [ "The", "hashing", "function", "which", "should", "be", "used", ".", "It", "has", "to", "be", "defined", "in", "hashlib" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def hash_file(filename, hashtype): h = eval('hashlib.{}()'.format(hashtype)) with open(filename,'rb') as file: chunk = 0 while chunk != b'': chunk = file.read(1024) h.update(chunk) return h.hexdigest()
635
367
6249fe999be6a10a303e99d753be43890fc4ae05
PaperDevil/pyconfigger
build/lib/file_worker/works.py
[ "MIT" ]
Python
worker
<not_specific>
def worker(worker_function: callable): """Wrapper function for handlers of different file types""" def wrapped(config: object, filename: str): """Populates config with values ​​from files, waiting returns dict.""" with open(filename) as file: fields = worker_function(file) config.config_data = fields return wrapped
Wrapper function for handlers of different file types
Wrapper function for handlers of different file types
[ "Wrapper", "function", "for", "handlers", "of", "different", "file", "types" ]
def worker(worker_function: callable): def wrapped(config: object, filename: str): with open(filename) as file: fields = worker_function(file) config.config_data = fields return wrapped
[ "def", "worker", "(", "worker_function", ":", "callable", ")", ":", "def", "wrapped", "(", "config", ":", "object", ",", "filename", ":", "str", ")", ":", "\"\"\"Populates config with values ​​from files, waiting returns dict.\"\"\"", "with", "open", "(", "filename", ")", "as", "file", ":", "fields", "=", "worker_function", "(", "file", ")", "config", ".", "config_data", "=", "fields", "return", "wrapped" ]
Wrapper function for handlers of different file types
[ "Wrapper", "function", "for", "handlers", "of", "different", "file", "types" ]
[ "\"\"\"Wrapper function for handlers of different file types\"\"\"", "\"\"\"Populates config with values ​​from files, waiting returns dict.\"\"\"" ]
[ { "param": "worker_function", "type": "callable" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "worker_function", "type": "callable", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def worker(worker_function: callable): def wrapped(config: object, filename: str): with open(filename) as file: fields = worker_function(file) config.config_data = fields return wrapped
636
905
cb9270e23a846ecef7af1315c6bd0c2604525867
symmetricapi/django-symmetric
symmetric/functions.py
[ "MIT" ]
Python
iso_8601_to_time
<not_specific>
def iso_8601_to_time(iso): """Parse an iso 8601 date into a datetime.time.""" if not iso: return None return datetime.datetime.strptime(iso, '%H:%M:%S').time()
Parse an iso 8601 date into a datetime.time.
Parse an iso 8601 date into a datetime.time.
[ "Parse", "an", "iso", "8601", "date", "into", "a", "datetime", ".", "time", "." ]
def iso_8601_to_time(iso): if not iso: return None return datetime.datetime.strptime(iso, '%H:%M:%S').time()
[ "def", "iso_8601_to_time", "(", "iso", ")", ":", "if", "not", "iso", ":", "return", "None", "return", "datetime", ".", "datetime", ".", "strptime", "(", "iso", ",", "'%H:%M:%S'", ")", ".", "time", "(", ")" ]
Parse an iso 8601 date into a datetime.time.
[ "Parse", "an", "iso", "8601", "date", "into", "a", "datetime", ".", "time", "." ]
[ "\"\"\"Parse an iso 8601 date into a datetime.time.\"\"\"" ]
[ { "param": "iso", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "iso", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import datetime def iso_8601_to_time(iso): if not iso: return None return datetime.datetime.strptime(iso, '%H:%M:%S').time()
637
488
4bc8fb8221fd475d960959dc0624e6631c80b8f4
marco-willi/camera-trap-classifier
camera_trap_classifier/training/utils.py
[ "MIT" ]
Python
find_the_best_id_in_log
<not_specific>
def find_the_best_id_in_log(log_file_path, metric, id='epoch', offset=-1): """ Returns the path of the best model """ if not os.path.exists(log_file_path): raise FileExistsError("File %s does not exist" % log_file_path) epoch_results = dict() with open(log_file_path, newline='') as logfile: reader = csv.reader(logfile, delimiter=',') for i, row in enumerate(reader): if i == 0: metric_col = row.index(metric) id_col = row.index(id) else: epoch_results[row[id_col]] = float(row[metric_col]) best_model_id = min(epoch_results, key=epoch_results.get) return best_model_id
Returns the path of the best model
Returns the path of the best model
[ "Returns", "the", "path", "of", "the", "best", "model" ]
def find_the_best_id_in_log(log_file_path, metric, id='epoch', offset=-1): if not os.path.exists(log_file_path): raise FileExistsError("File %s does not exist" % log_file_path) epoch_results = dict() with open(log_file_path, newline='') as logfile: reader = csv.reader(logfile, delimiter=',') for i, row in enumerate(reader): if i == 0: metric_col = row.index(metric) id_col = row.index(id) else: epoch_results[row[id_col]] = float(row[metric_col]) best_model_id = min(epoch_results, key=epoch_results.get) return best_model_id
[ "def", "find_the_best_id_in_log", "(", "log_file_path", ",", "metric", ",", "id", "=", "'epoch'", ",", "offset", "=", "-", "1", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "log_file_path", ")", ":", "raise", "FileExistsError", "(", "\"File %s does not exist\"", "%", "log_file_path", ")", "epoch_results", "=", "dict", "(", ")", "with", "open", "(", "log_file_path", ",", "newline", "=", "''", ")", "as", "logfile", ":", "reader", "=", "csv", ".", "reader", "(", "logfile", ",", "delimiter", "=", "','", ")", "for", "i", ",", "row", "in", "enumerate", "(", "reader", ")", ":", "if", "i", "==", "0", ":", "metric_col", "=", "row", ".", "index", "(", "metric", ")", "id_col", "=", "row", ".", "index", "(", "id", ")", "else", ":", "epoch_results", "[", "row", "[", "id_col", "]", "]", "=", "float", "(", "row", "[", "metric_col", "]", ")", "best_model_id", "=", "min", "(", "epoch_results", ",", "key", "=", "epoch_results", ".", "get", ")", "return", "best_model_id" ]
Returns the path of the best model
[ "Returns", "the", "path", "of", "the", "best", "model" ]
[ "\"\"\" Returns the path of the best model \"\"\"" ]
[ { "param": "log_file_path", "type": null }, { "param": "metric", "type": null }, { "param": "id", "type": null }, { "param": "offset", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "log_file_path", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "metric", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "id", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "offset", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import csv import os def find_the_best_id_in_log(log_file_path, metric, id='epoch', offset=-1): if not os.path.exists(log_file_path): raise FileExistsError("File %s does not exist" % log_file_path) epoch_results = dict() with open(log_file_path, newline='') as logfile: reader = csv.reader(logfile, delimiter=',') for i, row in enumerate(reader): if i == 0: metric_col = row.index(metric) id_col = row.index(id) else: epoch_results[row[id_col]] = float(row[metric_col]) best_model_id = min(epoch_results, key=epoch_results.get) return best_model_id
639
490
3eee8352a43db6720a85dc103b4efe43a6f286a9
PortSwigger/lightbulb-framework
libs/symautomata/pythondfa.py
[ "MIT" ]
Python
findpart
<not_specific>
def findpart(stateid, partitions): """Searches for the groupt that the state identifier belongs to. Args: stateid (int): The state identifier partitions (list): The list of the groups Returns: set: The group that the stateid belongs to. """ for group in partitions: if stateid in group: return frozenset(group) return frozenset(set( ))
Searches for the groupt that the state identifier belongs to. Args: stateid (int): The state identifier partitions (list): The list of the groups Returns: set: The group that the stateid belongs to.
Searches for the groupt that the state identifier belongs to.
[ "Searches", "for", "the", "groupt", "that", "the", "state", "identifier", "belongs", "to", "." ]
def findpart(stateid, partitions): for group in partitions: if stateid in group: return frozenset(group) return frozenset(set( ))
[ "def", "findpart", "(", "stateid", ",", "partitions", ")", ":", "for", "group", "in", "partitions", ":", "if", "stateid", "in", "group", ":", "return", "frozenset", "(", "group", ")", "return", "frozenset", "(", "set", "(", ")", ")" ]
Searches for the groupt that the state identifier belongs to.
[ "Searches", "for", "the", "groupt", "that", "the", "state", "identifier", "belongs", "to", "." ]
[ "\"\"\"Searches for the groupt that the state identifier\n belongs to.\n Args:\n stateid (int): The state identifier\n partitions (list): The list of the groups\n Returns:\n set: The group that the stateid belongs to.\n \"\"\"" ]
[ { "param": "stateid", "type": null }, { "param": "partitions", "type": null } ]
{ "returns": [ { "docstring": "The group that the stateid belongs to.", "docstring_tokens": [ "The", "group", "that", "the", "stateid", "belongs", "to", "." ], "type": "set" } ], "raises": [], "params": [ { "identifier": "stateid", "type": null, "docstring": "The state identifier", "docstring_tokens": [ "The", "state", "identifier" ], "default": null, "is_optional": false }, { "identifier": "partitions", "type": null, "docstring": "The list of the groups", "docstring_tokens": [ "The", "list", "of", "the", "groups" ], "default": null, "is_optional": false } ], "outlier_params": [], "others": [] }
def findpart(stateid, partitions): for group in partitions: if stateid in group: return frozenset(group) return frozenset(set( ))
640
103
9c5eabdba61a67b959c3b2f1f349feab47eadcfd
CMCD1996/finance-honours
src/cmcd398-finance-honours-backup.py
[ "MIT" ]
Python
replace_nan
<not_specific>
def replace_nan(df, replacement_method): """ Replace/Remove nan files in a dataframe Args: df (dataframe): Pandas Dataframe replacement_method (int): Specify replacement methods : 0 - remove rows with nan values : 1 - remove columns with nan values : 2 - fill nan with column mean : 3 - fill nan with column median Returns: dataframe: Updated pandas dataframe """ nan_total = df.isnull().sum().sum() print('Number of nan values before processing: ', nan_total) if nan_total > 0: # Replace dataframe level nan (rows or columns) # Replacement methods (0: remove rows with nan values, medium, remove, none) if replacement_method == 0: df.dropna(axis=0, how='any', inplace=True) # Caution: Change to dataframe-columns.txt and features list required (Do not use) if replacement_method == 1: df.dropna(axis=1, how='any', inplace=True) # Replace column level nan for column in df.columns: if df[column].isnull().sum() > 0: if replacement_method == 2: df[column].fillna(df[column].mean(), inplace=True) elif replacement_method == 3: df[column].fillna(df[column].median(), inplace=True) nan_total = df.isnull().sum().sum() print('Number of nan values after processing: ', nan_total) return df
Replace/Remove nan files in a dataframe Args: df (dataframe): Pandas Dataframe replacement_method (int): Specify replacement methods : 0 - remove rows with nan values : 1 - remove columns with nan values : 2 - fill nan with column mean : 3 - fill nan with column median Returns: dataframe: Updated pandas dataframe
Replace/Remove nan files in a dataframe
[ "Replace", "/", "Remove", "nan", "files", "in", "a", "dataframe" ]
def replace_nan(df, replacement_method): nan_total = df.isnull().sum().sum() print('Number of nan values before processing: ', nan_total) if nan_total > 0: if replacement_method == 0: df.dropna(axis=0, how='any', inplace=True) if replacement_method == 1: df.dropna(axis=1, how='any', inplace=True) for column in df.columns: if df[column].isnull().sum() > 0: if replacement_method == 2: df[column].fillna(df[column].mean(), inplace=True) elif replacement_method == 3: df[column].fillna(df[column].median(), inplace=True) nan_total = df.isnull().sum().sum() print('Number of nan values after processing: ', nan_total) return df
[ "def", "replace_nan", "(", "df", ",", "replacement_method", ")", ":", "nan_total", "=", "df", ".", "isnull", "(", ")", ".", "sum", "(", ")", ".", "sum", "(", ")", "print", "(", "'Number of nan values before processing: '", ",", "nan_total", ")", "if", "nan_total", ">", "0", ":", "if", "replacement_method", "==", "0", ":", "df", ".", "dropna", "(", "axis", "=", "0", ",", "how", "=", "'any'", ",", "inplace", "=", "True", ")", "if", "replacement_method", "==", "1", ":", "df", ".", "dropna", "(", "axis", "=", "1", ",", "how", "=", "'any'", ",", "inplace", "=", "True", ")", "for", "column", "in", "df", ".", "columns", ":", "if", "df", "[", "column", "]", ".", "isnull", "(", ")", ".", "sum", "(", ")", ">", "0", ":", "if", "replacement_method", "==", "2", ":", "df", "[", "column", "]", ".", "fillna", "(", "df", "[", "column", "]", ".", "mean", "(", ")", ",", "inplace", "=", "True", ")", "elif", "replacement_method", "==", "3", ":", "df", "[", "column", "]", ".", "fillna", "(", "df", "[", "column", "]", ".", "median", "(", ")", ",", "inplace", "=", "True", ")", "nan_total", "=", "df", ".", "isnull", "(", ")", ".", "sum", "(", ")", ".", "sum", "(", ")", "print", "(", "'Number of nan values after processing: '", ",", "nan_total", ")", "return", "df" ]
Replace/Remove nan files in a dataframe
[ "Replace", "/", "Remove", "nan", "files", "in", "a", "dataframe" ]
[ "\"\"\" Replace/Remove nan files in a dataframe\n\n Args:\n df (dataframe): Pandas Dataframe\n replacement_method (int): Specify replacement methods\n : 0 - remove rows with nan values\n : 1 - remove columns with nan values\n : 2 - fill nan with column mean\n : 3 - fill nan with column median\n Returns:\n dataframe: Updated pandas dataframe\n \"\"\"", "# Replace dataframe level nan (rows or columns)", "# Replacement methods (0: remove rows with nan values, medium, remove, none)", "# Caution: Change to dataframe-columns.txt and features list required (Do not use)", "# Replace column level nan" ]
[ { "param": "df", "type": null }, { "param": "replacement_method", "type": null } ]
{ "returns": [ { "docstring": "Updated pandas dataframe", "docstring_tokens": [ "Updated", "pandas", "dataframe" ], "type": "dataframe" } ], "raises": [], "params": [ { "identifier": "df", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": false }, { "identifier": "replacement_method", "type": null, "docstring": "Specify replacement methods\n: 0 - remove rows with nan values\n: 1 - remove columns with nan values\n: 2 - fill nan with column mean\n: 3 - fill nan with column median", "docstring_tokens": [ "Specify", "replacement", "methods", ":", "0", "-", "remove", "rows", "with", "nan", "values", ":", "1", "-", "remove", "columns", "with", "nan", "values", ":", "2", "-", "fill", "nan", "with", "column", "mean", ":", "3", "-", "fill", "nan", "with", "column", "median" ], "default": null, "is_optional": false } ], "outlier_params": [], "others": [] }
def replace_nan(df, replacement_method): nan_total = df.isnull().sum().sum() print('Number of nan values before processing: ', nan_total) if nan_total > 0: if replacement_method == 0: df.dropna(axis=0, how='any', inplace=True) if replacement_method == 1: df.dropna(axis=1, how='any', inplace=True) for column in df.columns: if df[column].isnull().sum() > 0: if replacement_method == 2: df[column].fillna(df[column].mean(), inplace=True) elif replacement_method == 3: df[column].fillna(df[column].median(), inplace=True) nan_total = df.isnull().sum().sum() print('Number of nan values after processing: ', nan_total) return df
641
256
21110e05a32f093c511685fd19fc5bef7e663a21
manuelcorrales/pysoa
pysoa/test/plugins/pytest/fixtures.py
[ "Apache-2.0" ]
Python
service_client_settings
<not_specific>
def service_client_settings(server_class, server_settings): """Config passed to the service client on instantiation""" return { server_class.service_name: { 'transport': { 'path': 'pysoa.common.transport.local:LocalClientTransport', 'kwargs': { 'server_class': server_class, 'server_settings': server_settings, }, }, }, }
Config passed to the service client on instantiation
Config passed to the service client on instantiation
[ "Config", "passed", "to", "the", "service", "client", "on", "instantiation" ]
def service_client_settings(server_class, server_settings): return { server_class.service_name: { 'transport': { 'path': 'pysoa.common.transport.local:LocalClientTransport', 'kwargs': { 'server_class': server_class, 'server_settings': server_settings, }, }, }, }
[ "def", "service_client_settings", "(", "server_class", ",", "server_settings", ")", ":", "return", "{", "server_class", ".", "service_name", ":", "{", "'transport'", ":", "{", "'path'", ":", "'pysoa.common.transport.local:LocalClientTransport'", ",", "'kwargs'", ":", "{", "'server_class'", ":", "server_class", ",", "'server_settings'", ":", "server_settings", ",", "}", ",", "}", ",", "}", ",", "}" ]
Config passed to the service client on instantiation
[ "Config", "passed", "to", "the", "service", "client", "on", "instantiation" ]
[ "\"\"\"Config passed to the service client on instantiation\"\"\"" ]
[ { "param": "server_class", "type": null }, { "param": "server_settings", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "server_class", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "server_settings", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def service_client_settings(server_class, server_settings): return { server_class.service_name: { 'transport': { 'path': 'pysoa.common.transport.local:LocalClientTransport', 'kwargs': { 'server_class': server_class, 'server_settings': server_settings, }, }, }, }
644
294
2dc3fdf856812369091819e37e5fff4790bc6aae
Xliff/p6-Amazon-AWS-EC2
historical/test.py
[ "Apache-2.0" ]
Python
pretty_print_GET
null
def pretty_print_GET(req): """ At this point it is completely built and ready to be fired; it is "prepared". However pay attention at the formatting used in this function because it is programmed to be pretty printed and may differ from the actual request. """ print('{}\n{}\n{}\n\n'.format( '-----------START-----------', req.method + ' ' + req.url, '\n'.join('{}: {}'.format(k, v) for k, v in req.headers.items()) ))
At this point it is completely built and ready to be fired; it is "prepared". However pay attention at the formatting used in this function because it is programmed to be pretty printed and may differ from the actual request.
At this point it is completely built and ready to be fired; it is "prepared". However pay attention at the formatting used in this function because it is programmed to be pretty printed and may differ from the actual request.
[ "At", "this", "point", "it", "is", "completely", "built", "and", "ready", "to", "be", "fired", ";", "it", "is", "\"", "prepared", "\"", ".", "However", "pay", "attention", "at", "the", "formatting", "used", "in", "this", "function", "because", "it", "is", "programmed", "to", "be", "pretty", "printed", "and", "may", "differ", "from", "the", "actual", "request", "." ]
def pretty_print_GET(req): print('{}\n{}\n{}\n\n'.format( '-----------START-----------', req.method + ' ' + req.url, '\n'.join('{}: {}'.format(k, v) for k, v in req.headers.items()) ))
[ "def", "pretty_print_GET", "(", "req", ")", ":", "print", "(", "'{}\\n{}\\n{}\\n\\n'", ".", "format", "(", "'-----------START-----------'", ",", "req", ".", "method", "+", "' '", "+", "req", ".", "url", ",", "'\\n'", ".", "join", "(", "'{}: {}'", ".", "format", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "req", ".", "headers", ".", "items", "(", ")", ")", ")", ")" ]
At this point it is completely built and ready to be fired; it is "prepared".
[ "At", "this", "point", "it", "is", "completely", "built", "and", "ready", "to", "be", "fired", ";", "it", "is", "\"", "prepared", "\"", "." ]
[ "\"\"\"\n At this point it is completely built and ready\n to be fired; it is \"prepared\".\n\n However pay attention at the formatting used in\n this function because it is programmed to be pretty\n printed and may differ from the actual request.\n \"\"\"" ]
[ { "param": "req", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "req", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def pretty_print_GET(req): print('{}\n{}\n{}\n\n'.format( '-----------START-----------', req.method + ' ' + req.url, '\n'.join('{}: {}'.format(k, v) for k, v in req.headers.items()) ))
645
80
c62facf25bcd80f04c6d06ef4de9b933cc22aeaf
louisVottero/vtool
python/vtool/util.py
[ "MIT" ]
Python
is_windows
<not_specific>
def is_windows(): """ Check to see if running in windows Returns: bool: """ if platform.system() == 'Windows': return True return False
Check to see if running in windows Returns: bool:
Check to see if running in windows
[ "Check", "to", "see", "if", "running", "in", "windows" ]
def is_windows(): if platform.system() == 'Windows': return True return False
[ "def", "is_windows", "(", ")", ":", "if", "platform", ".", "system", "(", ")", "==", "'Windows'", ":", "return", "True", "return", "False" ]
Check to see if running in windows
[ "Check", "to", "see", "if", "running", "in", "windows" ]
[ "\"\"\"\r\n Check to see if running in windows\r\n \r\n Returns:\r\n bool:\r\n \"\"\"" ]
[]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": "bool" } ], "raises": [], "params": [], "outlier_params": [], "others": [] }
import platform def is_windows(): if platform.system() == 'Windows': return True return False
646
276
2cb937ab88b7d876a7f5e50fa14a725421927c38
uw-loci/mp-python-modules
multiscale/ultrasound/reconstruction.py
[ "BSD-2-Clause" ]
Python
extract_iteration_from_path
<not_specific>
def extract_iteration_from_path(file_path): """Get the image index from filename formatted It-index.mat""" match = re.search(r'It-\d*', file_path.stem) index = int(match.group()[3:]) - 1 return index
Get the image index from filename formatted It-index.mat
Get the image index from filename formatted It-index.mat
[ "Get", "the", "image", "index", "from", "filename", "formatted", "It", "-", "index", ".", "mat" ]
def extract_iteration_from_path(file_path): match = re.search(r'It-\d*', file_path.stem) index = int(match.group()[3:]) - 1 return index
[ "def", "extract_iteration_from_path", "(", "file_path", ")", ":", "match", "=", "re", ".", "search", "(", "r'It-\\d*'", ",", "file_path", ".", "stem", ")", "index", "=", "int", "(", "match", ".", "group", "(", ")", "[", "3", ":", "]", ")", "-", "1", "return", "index" ]
Get the image index from filename formatted It-index.mat
[ "Get", "the", "image", "index", "from", "filename", "formatted", "It", "-", "index", ".", "mat" ]
[ "\"\"\"Get the image index from filename formatted It-index.mat\"\"\"" ]
[ { "param": "file_path", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "file_path", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import re def extract_iteration_from_path(file_path): match = re.search(r'It-\d*', file_path.stem) index = int(match.group()[3:]) - 1 return index
647
286
d0185ad76db6e535d22eefea3e395b92eef566ef
GodwinEke/c-lineassistantrepo
src/functions.py
[ "Linux-OpenIB" ]
Python
languages
list
def languages()-> list: """" Returns a list of languages Erikwonda knows """ return ['english', 'russian', 'german', 'chinese', 'french', 'japanese', 'spanish']
Returns a list of languages Erikwonda knows
Returns a list of languages Erikwonda knows
[ "Returns", "a", "list", "of", "languages", "Erikwonda", "knows" ]
def languages()-> list: return ['english', 'russian', 'german', 'chinese', 'french', 'japanese', 'spanish']
[ "def", "languages", "(", ")", "->", "list", ":", "return", "[", "'english'", ",", "'russian'", ",", "'german'", ",", "'chinese'", ",", "'french'", ",", "'japanese'", ",", "'spanish'", "]" ]
Returns a list of languages Erikwonda knows
[ "Returns", "a", "list", "of", "languages", "Erikwonda", "knows" ]
[ "\"\"\"\"\n Returns a list of languages Erikwonda knows\n \"\"\"" ]
[]
{ "returns": [], "raises": [], "params": [], "outlier_params": [], "others": [] }
def languages()-> list: return ['english', 'russian', 'german', 'chinese', 'french', 'japanese', 'spanish']
648
330
6f8ceb2cbe5e03e63e58b2a458fd772c62052000
ORNL/soar_survey_scoring
phase1/code/utils.py
[ "MIT" ]
Python
unjsonify
<not_specific>
def unjsonify(in_file): """ Input: -in_file: the file path where the object you want to read in is stored Output: -obj: the object you want to read in """ obj_text = codecs.open(in_file, 'r', encoding='utf-8').read() obj = json.loads(obj_text) return obj
Input: -in_file: the file path where the object you want to read in is stored Output: -obj: the object you want to read in
the file path where the object you want to read in is stored Output: obj: the object you want to read in
[ "the", "file", "path", "where", "the", "object", "you", "want", "to", "read", "in", "is", "stored", "Output", ":", "obj", ":", "the", "object", "you", "want", "to", "read", "in" ]
def unjsonify(in_file): obj_text = codecs.open(in_file, 'r', encoding='utf-8').read() obj = json.loads(obj_text) return obj
[ "def", "unjsonify", "(", "in_file", ")", ":", "obj_text", "=", "codecs", ".", "open", "(", "in_file", ",", "'r'", ",", "encoding", "=", "'utf-8'", ")", ".", "read", "(", ")", "obj", "=", "json", ".", "loads", "(", "obj_text", ")", "return", "obj" ]
Input: in_file: the file path where the object you want to read in is stored Output: obj: the object you want to read in
[ "Input", ":", "in_file", ":", "the", "file", "path", "where", "the", "object", "you", "want", "to", "read", "in", "is", "stored", "Output", ":", "obj", ":", "the", "object", "you", "want", "to", "read", "in" ]
[ "\"\"\"\n Input:\n -in_file: the file path where the object you want to read in is stored\n Output:\n -obj: the object you want to read in\n \"\"\"" ]
[ { "param": "in_file", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "in_file", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import codecs import json def unjsonify(in_file): obj_text = codecs.open(in_file, 'r', encoding='utf-8').read() obj = json.loads(obj_text) return obj
649
157
cb3f2f7a2b9dc1b4008dc732699a34f7a25bfa7d
dem4ply/chibi
chibi/file/snippets.py
[ "WTFPL" ]
Python
inflate_dir
<not_specific>
def inflate_dir( src ): """ infla la dirrecion para obtner la ruta absoluta Parameters ========== src: string direcion que se quiere inflar Returns ======= string """ if '~' in src: return os.path.expanduser( src ) else: return os.path.abspath( src )
infla la dirrecion para obtner la ruta absoluta Parameters ========== src: string direcion que se quiere inflar Returns ======= string
infla la dirrecion para obtner la ruta absoluta Parameters string direcion que se quiere inflar Returns string
[ "infla", "la", "dirrecion", "para", "obtner", "la", "ruta", "absoluta", "Parameters", "string", "direcion", "que", "se", "quiere", "inflar", "Returns", "string" ]
def inflate_dir( src ): if '~' in src: return os.path.expanduser( src ) else: return os.path.abspath( src )
[ "def", "inflate_dir", "(", "src", ")", ":", "if", "'~'", "in", "src", ":", "return", "os", ".", "path", ".", "expanduser", "(", "src", ")", "else", ":", "return", "os", ".", "path", ".", "abspath", "(", "src", ")" ]
infla la dirrecion para obtner la ruta absoluta Parameters
[ "infla", "la", "dirrecion", "para", "obtner", "la", "ruta", "absoluta", "Parameters" ]
[ "\"\"\"\n infla la dirrecion para obtner la ruta absoluta\n\n Parameters\n ==========\n src: string\n direcion que se quiere inflar\n\n Returns\n =======\n string\n \"\"\"" ]
[ { "param": "src", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "src", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import os def inflate_dir( src ): if '~' in src: return os.path.expanduser( src ) else: return os.path.abspath( src )
650
484
5a6b021801c0b5da6f958ba138f136d30ccf276b
mawanda-jun/NoLabels
legacy/Dataset/clean_images_from_file.py
[ "MIT" ]
Python
rename_images
null
def rename_images(folder_path): """ Rename all images in folder that does not contain the enabled extensions :param folder_path: :return: """ folder_path = os.path.join(os.getcwd(), folder_path) enabled_extensions = ['.bmp', '.png', '.jpg', '.JPG', '.jpeg', '.PNG', '.JPEG', '.BMP'] for root, dirs, files in os.walk(folder_path): for file in files: flag = False for extension in enabled_extensions: if extension in os.path.splitext(file)[1]: flag = True if not flag: os.rename(os.path.join(os.getcwd(), root, file), os.path.join(os.getcwd(), root, file + '.jpeg'))
Rename all images in folder that does not contain the enabled extensions :param folder_path: :return:
Rename all images in folder that does not contain the enabled extensions
[ "Rename", "all", "images", "in", "folder", "that", "does", "not", "contain", "the", "enabled", "extensions" ]
def rename_images(folder_path): folder_path = os.path.join(os.getcwd(), folder_path) enabled_extensions = ['.bmp', '.png', '.jpg', '.JPG', '.jpeg', '.PNG', '.JPEG', '.BMP'] for root, dirs, files in os.walk(folder_path): for file in files: flag = False for extension in enabled_extensions: if extension in os.path.splitext(file)[1]: flag = True if not flag: os.rename(os.path.join(os.getcwd(), root, file), os.path.join(os.getcwd(), root, file + '.jpeg'))
[ "def", "rename_images", "(", "folder_path", ")", ":", "folder_path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "getcwd", "(", ")", ",", "folder_path", ")", "enabled_extensions", "=", "[", "'.bmp'", ",", "'.png'", ",", "'.jpg'", ",", "'.JPG'", ",", "'.jpeg'", ",", "'.PNG'", ",", "'.JPEG'", ",", "'.BMP'", "]", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "folder_path", ")", ":", "for", "file", "in", "files", ":", "flag", "=", "False", "for", "extension", "in", "enabled_extensions", ":", "if", "extension", "in", "os", ".", "path", ".", "splitext", "(", "file", ")", "[", "1", "]", ":", "flag", "=", "True", "if", "not", "flag", ":", "os", ".", "rename", "(", "os", ".", "path", ".", "join", "(", "os", ".", "getcwd", "(", ")", ",", "root", ",", "file", ")", ",", "os", ".", "path", ".", "join", "(", "os", ".", "getcwd", "(", ")", ",", "root", ",", "file", "+", "'.jpeg'", ")", ")" ]
Rename all images in folder that does not contain the enabled extensions
[ "Rename", "all", "images", "in", "folder", "that", "does", "not", "contain", "the", "enabled", "extensions" ]
[ "\"\"\"\n Rename all images in folder that does not contain the enabled extensions\n :param folder_path:\n :return:\n \"\"\"" ]
[ { "param": "folder_path", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": null } ], "raises": [], "params": [ { "identifier": "folder_path", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import os def rename_images(folder_path): folder_path = os.path.join(os.getcwd(), folder_path) enabled_extensions = ['.bmp', '.png', '.jpg', '.JPG', '.jpeg', '.PNG', '.JPEG', '.BMP'] for root, dirs, files in os.walk(folder_path): for file in files: flag = False for extension in enabled_extensions: if extension in os.path.splitext(file)[1]: flag = True if not flag: os.rename(os.path.join(os.getcwd(), root, file), os.path.join(os.getcwd(), root, file + '.jpeg'))
651
557
4d0a0896e49f50edc544c875db0b9b13f8f0ded3
khromiumos/chromiumos-chromite
api/controller/artifacts.py
[ "BSD-3-Clause" ]
Python
_FetchPinnedGuestImageUrisResponse
null
def _FetchPinnedGuestImageUrisResponse(_input_proto, output_proto, _config): """Add test fetched pinned guest image files to a successful response.""" pinned_image = output_proto.pinned_images.add() pinned_image.filename = 'pinned_file.tar.gz' pinned_image.uri = 'https://testuri.com'
Add test fetched pinned guest image files to a successful response.
Add test fetched pinned guest image files to a successful response.
[ "Add", "test", "fetched", "pinned", "guest", "image", "files", "to", "a", "successful", "response", "." ]
def _FetchPinnedGuestImageUrisResponse(_input_proto, output_proto, _config): pinned_image = output_proto.pinned_images.add() pinned_image.filename = 'pinned_file.tar.gz' pinned_image.uri = 'https://testuri.com'
[ "def", "_FetchPinnedGuestImageUrisResponse", "(", "_input_proto", ",", "output_proto", ",", "_config", ")", ":", "pinned_image", "=", "output_proto", ".", "pinned_images", ".", "add", "(", ")", "pinned_image", ".", "filename", "=", "'pinned_file.tar.gz'", "pinned_image", ".", "uri", "=", "'https://testuri.com'" ]
Add test fetched pinned guest image files to a successful response.
[ "Add", "test", "fetched", "pinned", "guest", "image", "files", "to", "a", "successful", "response", "." ]
[ "\"\"\"Add test fetched pinned guest image files to a successful response.\"\"\"" ]
[ { "param": "_input_proto", "type": null }, { "param": "output_proto", "type": null }, { "param": "_config", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "_input_proto", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "output_proto", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "_config", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _FetchPinnedGuestImageUrisResponse(_input_proto, output_proto, _config): pinned_image = output_proto.pinned_images.add() pinned_image.filename = 'pinned_file.tar.gz' pinned_image.uri = 'https://testuri.com'
652
635
e216d7fc8d8c5bdeea4a856a1ad15a1e54cc568b
UlrichBerntien/Uebungen-Python
Sekunden_Umwandlung.py
[ "Apache-2.0" ]
Python
hms_to_seconds
int
def hms_to_seconds(hms: str) -> int: """ Konvertiert eine Angabe "h:mm:ss" in the Anzahl der Sekunden. :param hms: Zeit im Format "stunde:minuten:sekunden" :return: Zeit in Sekunden """ match = re.match(r"(\d+):(\d+):(\d+)$", hms) if not match: raise RuntimeError("invalid hms format: " + hms) hours_part = int(match.group(1)) minutes_part = int(match.group(2)) seconds_part = int(match.group(3)) if minutes_part > 59 or seconds_part > 59: raise RuntimeError("invalid hms format: " + hms) return seconds_part + ((hours_part * 60) + minutes_part) * 60
Konvertiert eine Angabe "h:mm:ss" in the Anzahl der Sekunden. :param hms: Zeit im Format "stunde:minuten:sekunden" :return: Zeit in Sekunden
Konvertiert eine Angabe "h:mm:ss" in the Anzahl der Sekunden.
[ "Konvertiert", "eine", "Angabe", "\"", "h", ":", "mm", ":", "ss", "\"", "in", "the", "Anzahl", "der", "Sekunden", "." ]
def hms_to_seconds(hms: str) -> int: match = re.match(r"(\d+):(\d+):(\d+)$", hms) if not match: raise RuntimeError("invalid hms format: " + hms) hours_part = int(match.group(1)) minutes_part = int(match.group(2)) seconds_part = int(match.group(3)) if minutes_part > 59 or seconds_part > 59: raise RuntimeError("invalid hms format: " + hms) return seconds_part + ((hours_part * 60) + minutes_part) * 60
[ "def", "hms_to_seconds", "(", "hms", ":", "str", ")", "->", "int", ":", "match", "=", "re", ".", "match", "(", "r\"(\\d+):(\\d+):(\\d+)$\"", ",", "hms", ")", "if", "not", "match", ":", "raise", "RuntimeError", "(", "\"invalid hms format: \"", "+", "hms", ")", "hours_part", "=", "int", "(", "match", ".", "group", "(", "1", ")", ")", "minutes_part", "=", "int", "(", "match", ".", "group", "(", "2", ")", ")", "seconds_part", "=", "int", "(", "match", ".", "group", "(", "3", ")", ")", "if", "minutes_part", ">", "59", "or", "seconds_part", ">", "59", ":", "raise", "RuntimeError", "(", "\"invalid hms format: \"", "+", "hms", ")", "return", "seconds_part", "+", "(", "(", "hours_part", "*", "60", ")", "+", "minutes_part", ")", "*", "60" ]
Konvertiert eine Angabe "h:mm:ss" in the Anzahl der Sekunden.
[ "Konvertiert", "eine", "Angabe", "\"", "h", ":", "mm", ":", "ss", "\"", "in", "the", "Anzahl", "der", "Sekunden", "." ]
[ "\"\"\"\n Konvertiert eine Angabe \"h:mm:ss\" in the Anzahl der Sekunden.\n :param hms: Zeit im Format \"stunde:minuten:sekunden\"\n :return: Zeit in Sekunden\n \"\"\"" ]
[ { "param": "hms", "type": "str" } ]
{ "returns": [ { "docstring": "Zeit in Sekunden", "docstring_tokens": [ "Zeit", "in", "Sekunden" ], "type": null } ], "raises": [], "params": [ { "identifier": "hms", "type": "str", "docstring": "Zeit im Format \"stunde:minuten:sekunden\"", "docstring_tokens": [ "Zeit", "im", "Format", "\"", "stunde", ":", "minuten", ":", "sekunden", "\"" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import re def hms_to_seconds(hms: str) -> int: match = re.match(r"(\d+):(\d+):(\d+)$", hms) if not match: raise RuntimeError("invalid hms format: " + hms) hours_part = int(match.group(1)) minutes_part = int(match.group(2)) seconds_part = int(match.group(3)) if minutes_part > 59 or seconds_part > 59: raise RuntimeError("invalid hms format: " + hms) return seconds_part + ((hours_part * 60) + minutes_part) * 60
653
644
56aeff2b0c70db307fea8e9b3acc8affcf4048e0
stillwwater/super_ini
super_ini.py
[ "MIT" ]
Python
internal
null
def internal(global_lut: dict, caller: Scope): """internal closure Classifies a internal scope in the global lut, scope only for internal use, and will not be compiled """ caller.internal = True
internal closure Classifies a internal scope in the global lut, scope only for internal use, and will not be compiled
internal closure Classifies a internal scope in the global lut, scope only for internal use, and will not be compiled
[ "internal", "closure", "Classifies", "a", "internal", "scope", "in", "the", "global", "lut", "scope", "only", "for", "internal", "use", "and", "will", "not", "be", "compiled" ]
def internal(global_lut: dict, caller: Scope): caller.internal = True
[ "def", "internal", "(", "global_lut", ":", "dict", ",", "caller", ":", "Scope", ")", ":", "caller", ".", "internal", "=", "True" ]
internal closure Classifies a internal scope in the global lut, scope only for internal use, and will not be compiled
[ "internal", "closure", "Classifies", "a", "internal", "scope", "in", "the", "global", "lut", "scope", "only", "for", "internal", "use", "and", "will", "not", "be", "compiled" ]
[ "\"\"\"internal closure\n\n Classifies a internal scope in the global lut,\n scope only for internal use, and will not be compiled\n \"\"\"" ]
[ { "param": "global_lut", "type": "dict" }, { "param": "caller", "type": "Scope" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "global_lut", "type": "dict", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "caller", "type": "Scope", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def internal(global_lut: dict, caller: Scope): caller.internal = True
654
244
d67f6e57cfedbab24aba9306405f037428182c94
AZMAG/azsmart
azsmart/defaults__old.py
[ "MIT" ]
Python
bldg_sqft
<not_specific>
def bldg_sqft(parcels, buildings): """ Total built square feet per parcel. """ b = buildings.to_frame(['parcel_id', 'total_sqft']) return b.groupby('parcel_id')['total_sqft'].sum().reindex(parcels.index).fillna(0)
Total built square feet per parcel.
Total built square feet per parcel.
[ "Total", "built", "square", "feet", "per", "parcel", "." ]
def bldg_sqft(parcels, buildings): b = buildings.to_frame(['parcel_id', 'total_sqft']) return b.groupby('parcel_id')['total_sqft'].sum().reindex(parcels.index).fillna(0)
[ "def", "bldg_sqft", "(", "parcels", ",", "buildings", ")", ":", "b", "=", "buildings", ".", "to_frame", "(", "[", "'parcel_id'", ",", "'total_sqft'", "]", ")", "return", "b", ".", "groupby", "(", "'parcel_id'", ")", "[", "'total_sqft'", "]", ".", "sum", "(", ")", ".", "reindex", "(", "parcels", ".", "index", ")", ".", "fillna", "(", "0", ")" ]
Total built square feet per parcel.
[ "Total", "built", "square", "feet", "per", "parcel", "." ]
[ "\"\"\"\n Total built square feet per parcel.\n\n \"\"\"" ]
[ { "param": "parcels", "type": null }, { "param": "buildings", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "parcels", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "buildings", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def bldg_sqft(parcels, buildings): b = buildings.to_frame(['parcel_id', 'total_sqft']) return b.groupby('parcel_id')['total_sqft'].sum().reindex(parcels.index).fillna(0)
656
635
3fce27e0f89786ad718d956ccd3b21263957d8de
alimahmoudi29/graph-theory
graph/topology.py
[ "MIT" ]
Python
same
<not_specific>
def same(path1, path2): """ Compares two paths to verify whether they're the same. :param path1: list of nodes. :param path2: list of nodes. :return: boolean. """ start1 = path2.index(path1[0]) checks = [ path1[:len(path1) - start1] == path2[start1:], path1[len(path1) - start1:] == path2[:start1] ] if all(checks): return True return False
Compares two paths to verify whether they're the same. :param path1: list of nodes. :param path2: list of nodes. :return: boolean.
Compares two paths to verify whether they're the same.
[ "Compares", "two", "paths", "to", "verify", "whether", "they", "'", "re", "the", "same", "." ]
def same(path1, path2): start1 = path2.index(path1[0]) checks = [ path1[:len(path1) - start1] == path2[start1:], path1[len(path1) - start1:] == path2[:start1] ] if all(checks): return True return False
[ "def", "same", "(", "path1", ",", "path2", ")", ":", "start1", "=", "path2", ".", "index", "(", "path1", "[", "0", "]", ")", "checks", "=", "[", "path1", "[", ":", "len", "(", "path1", ")", "-", "start1", "]", "==", "path2", "[", "start1", ":", "]", ",", "path1", "[", "len", "(", "path1", ")", "-", "start1", ":", "]", "==", "path2", "[", ":", "start1", "]", "]", "if", "all", "(", "checks", ")", ":", "return", "True", "return", "False" ]
Compares two paths to verify whether they're the same.
[ "Compares", "two", "paths", "to", "verify", "whether", "they", "'", "re", "the", "same", "." ]
[ "\"\"\" Compares two paths to verify whether they're the same.\n :param path1: list of nodes.\n :param path2: list of nodes.\n :return: boolean.\n \"\"\"" ]
[ { "param": "path1", "type": null }, { "param": "path2", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": null } ], "raises": [], "params": [ { "identifier": "path1", "type": null, "docstring": "list of nodes.", "docstring_tokens": [ "list", "of", "nodes", "." ], "default": null, "is_optional": null }, { "identifier": "path2", "type": null, "docstring": "list of nodes.", "docstring_tokens": [ "list", "of", "nodes", "." ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def same(path1, path2): start1 = path2.index(path1[0]) checks = [ path1[:len(path1) - start1] == path2[start1:], path1[len(path1) - start1:] == path2[:start1] ] if all(checks): return True return False
657
1,000
2debc9f77824d780a775d11c45883a7b2a6bb80f
Erotemic/utool
utool/util_str.py
[ "Apache-2.0" ]
Python
replace_between_tags
<not_specific>
def replace_between_tags(text, repl_, start_tag, end_tag=None): r""" Replaces text between sentinal lines in a block of text. Args: text (str): repl_ (str): start_tag (str): end_tag (str): (default=None) Returns: str: new_text CommandLine: python -m utool.util_str --exec-replace_between_tags Example: >>> # DISABLE_DOCTEST >>> from utool.util_str import * # NOQA >>> text = ut.codeblock( ''' class: # <FOO> bar # </FOO> baz ''') >>> repl_ = 'spam' >>> start_tag = '# <FOO>' >>> end_tag = '# </FOO>' >>> new_text = replace_between_tags(text, repl_, start_tag, end_tag) >>> result = ('new_text =\n%s' % (str(new_text),)) >>> print(result) new_text = class: # <FOO> spam # </FOO> baz """ new_lines = [] editing = False lines = text.split('\n') for line in lines: if not editing: new_lines.append(line) if line.strip().startswith(start_tag): new_lines.append(repl_) editing = True if end_tag is not None and line.strip().startswith(end_tag): editing = False new_lines.append(line) new_text = '\n'.join(new_lines) return new_text
r""" Replaces text between sentinal lines in a block of text. Args: text (str): repl_ (str): start_tag (str): end_tag (str): (default=None) Returns: str: new_text CommandLine: python -m utool.util_str --exec-replace_between_tags Example: >>> # DISABLE_DOCTEST >>> from utool.util_str import * # NOQA >>> text = ut.codeblock( ''' class: # <FOO> bar # </FOO> baz ''') >>> repl_ = 'spam' >>> start_tag = '# <FOO>' >>> end_tag = '# </FOO>' >>> new_text = replace_between_tags(text, repl_, start_tag, end_tag) >>> result = ('new_text =\n%s' % (str(new_text),)) >>> print(result) new_text = class: # <FOO> spam # </FOO> baz
r""" Replaces text between sentinal lines in a block of text.
[ "r", "\"", "\"", "\"", "Replaces", "text", "between", "sentinal", "lines", "in", "a", "block", "of", "text", "." ]
def replace_between_tags(text, repl_, start_tag, end_tag=None): new_lines = [] editing = False lines = text.split('\n') for line in lines: if not editing: new_lines.append(line) if line.strip().startswith(start_tag): new_lines.append(repl_) editing = True if end_tag is not None and line.strip().startswith(end_tag): editing = False new_lines.append(line) new_text = '\n'.join(new_lines) return new_text
[ "def", "replace_between_tags", "(", "text", ",", "repl_", ",", "start_tag", ",", "end_tag", "=", "None", ")", ":", "new_lines", "=", "[", "]", "editing", "=", "False", "lines", "=", "text", ".", "split", "(", "'\\n'", ")", "for", "line", "in", "lines", ":", "if", "not", "editing", ":", "new_lines", ".", "append", "(", "line", ")", "if", "line", ".", "strip", "(", ")", ".", "startswith", "(", "start_tag", ")", ":", "new_lines", ".", "append", "(", "repl_", ")", "editing", "=", "True", "if", "end_tag", "is", "not", "None", "and", "line", ".", "strip", "(", ")", ".", "startswith", "(", "end_tag", ")", ":", "editing", "=", "False", "new_lines", ".", "append", "(", "line", ")", "new_text", "=", "'\\n'", ".", "join", "(", "new_lines", ")", "return", "new_text" ]
r""" Replaces text between sentinal lines in a block of text.
[ "r", "\"", "\"", "\"", "Replaces", "text", "between", "sentinal", "lines", "in", "a", "block", "of", "text", "." ]
[ "r\"\"\"\n Replaces text between sentinal lines in a block of text.\n\n Args:\n text (str):\n repl_ (str):\n start_tag (str):\n end_tag (str): (default=None)\n\n Returns:\n str: new_text\n\n CommandLine:\n python -m utool.util_str --exec-replace_between_tags\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from utool.util_str import * # NOQA\n >>> text = ut.codeblock(\n '''\n class:\n # <FOO>\n bar\n # </FOO>\n baz\n ''')\n >>> repl_ = 'spam'\n >>> start_tag = '# <FOO>'\n >>> end_tag = '# </FOO>'\n >>> new_text = replace_between_tags(text, repl_, start_tag, end_tag)\n >>> result = ('new_text =\\n%s' % (str(new_text),))\n >>> print(result)\n new_text =\n class:\n # <FOO>\n spam\n # </FOO>\n baz\n \"\"\"" ]
[ { "param": "text", "type": null }, { "param": "repl_", "type": null }, { "param": "start_tag", "type": null }, { "param": "end_tag", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": "str" } ], "raises": [], "params": [ { "identifier": "text", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": false }, { "identifier": "repl_", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": false }, { "identifier": "start_tag", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": false }, { "identifier": "end_tag", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": false } ], "outlier_params": [], "others": [ { "identifier": "examples", "docstring": ">>> # DISABLE_DOCTEST\n>>> from utool.util_str import * # NOQA\n>>> text = ut.codeblock(\n'''\nclass:\n\nbar\n\nbaz\n''')\n>>> repl_ = 'spam'\n>>> start_tag = '# '\n>>> end_tag = '# '\n>>> new_text = replace_between_tags(text, repl_, start_tag, end_tag)\n>>> result = ('new_text =\\n%s' % (str(new_text),))\n>>> print(result)\nnew_text\nclass:\n\nspam\n\nbaz", "docstring_tokens": [ ">>>", "#", "DISABLE_DOCTEST", ">>>", "from", "utool", ".", "util_str", "import", "*", "#", "NOQA", ">>>", "text", "=", "ut", ".", "codeblock", "(", "'", "'", "'", "class", ":", "bar", "baz", "'", "'", "'", ")", ">>>", "repl_", "=", "'", "spam", "'", ">>>", "start_tag", "=", "'", "#", "'", ">>>", "end_tag", "=", "'", "#", "'", ">>>", "new_text", "=", "replace_between_tags", "(", "text", "repl_", "start_tag", "end_tag", ")", ">>>", "result", "=", "(", "'", "new_text", "=", "\\", "n%s", "'", "%", "(", "str", "(", "new_text", ")", "))", ">>>", "print", "(", "result", ")", "new_text", "class", ":", "spam", "baz" ] } ] }
def replace_between_tags(text, repl_, start_tag, end_tag=None): new_lines = [] editing = False lines = text.split('\n') for line in lines: if not editing: new_lines.append(line) if line.strip().startswith(start_tag): new_lines.append(repl_) editing = True if end_tag is not None and line.strip().startswith(end_tag): editing = False new_lines.append(line) new_text = '\n'.join(new_lines) return new_text
658
843
ec6e7250740603065df3fef71e75f84c05ed0c1c
eaibmz/python-zhmcclient
tests/unit/test_example.py
[ "Apache-2.0" ]
Python
check
null
def check(session, client): """ Check the faked session and its faked HMC. """ assert session.host == 'fake-host' assert client.version_info() == (1, 8) cpcs = client.cpcs.list() assert len(cpcs) == 2 cpc1 = cpcs[0] # a CPC in classic mode assert cpc1.get_property('name') == 'cpc_1' assert not cpc1.dpm_enabled lpars = cpc1.lpars.list() assert len(lpars) == 1 lpar1 = lpars[0] assert lpar1.get_property('name') == 'lpar_1' cpc2 = cpcs[1] # a CPC in DPM mode assert cpc2.get_property('name') == 'cpc_2' assert cpc2.dpm_enabled partitions = cpc2.partitions.list() assert len(partitions) == 1 partition1 = partitions[0] assert partition1.get_property('name') == 'partition_1' adapters = cpc2.adapters.list() assert len(adapters) == 1 adapter1 = adapters[0] assert adapter1.get_property('name') == 'osa_1' ports = adapter1.ports.list() assert len(ports) == 1 port1 = ports[0] assert port1.get_property('name') == 'osa_1_port_1'
Check the faked session and its faked HMC.
Check the faked session and its faked HMC.
[ "Check", "the", "faked", "session", "and", "its", "faked", "HMC", "." ]
def check(session, client): assert session.host == 'fake-host' assert client.version_info() == (1, 8) cpcs = client.cpcs.list() assert len(cpcs) == 2 cpc1 = cpcs[0] assert cpc1.get_property('name') == 'cpc_1' assert not cpc1.dpm_enabled lpars = cpc1.lpars.list() assert len(lpars) == 1 lpar1 = lpars[0] assert lpar1.get_property('name') == 'lpar_1' cpc2 = cpcs[1] assert cpc2.get_property('name') == 'cpc_2' assert cpc2.dpm_enabled partitions = cpc2.partitions.list() assert len(partitions) == 1 partition1 = partitions[0] assert partition1.get_property('name') == 'partition_1' adapters = cpc2.adapters.list() assert len(adapters) == 1 adapter1 = adapters[0] assert adapter1.get_property('name') == 'osa_1' ports = adapter1.ports.list() assert len(ports) == 1 port1 = ports[0] assert port1.get_property('name') == 'osa_1_port_1'
[ "def", "check", "(", "session", ",", "client", ")", ":", "assert", "session", ".", "host", "==", "'fake-host'", "assert", "client", ".", "version_info", "(", ")", "==", "(", "1", ",", "8", ")", "cpcs", "=", "client", ".", "cpcs", ".", "list", "(", ")", "assert", "len", "(", "cpcs", ")", "==", "2", "cpc1", "=", "cpcs", "[", "0", "]", "assert", "cpc1", ".", "get_property", "(", "'name'", ")", "==", "'cpc_1'", "assert", "not", "cpc1", ".", "dpm_enabled", "lpars", "=", "cpc1", ".", "lpars", ".", "list", "(", ")", "assert", "len", "(", "lpars", ")", "==", "1", "lpar1", "=", "lpars", "[", "0", "]", "assert", "lpar1", ".", "get_property", "(", "'name'", ")", "==", "'lpar_1'", "cpc2", "=", "cpcs", "[", "1", "]", "assert", "cpc2", ".", "get_property", "(", "'name'", ")", "==", "'cpc_2'", "assert", "cpc2", ".", "dpm_enabled", "partitions", "=", "cpc2", ".", "partitions", ".", "list", "(", ")", "assert", "len", "(", "partitions", ")", "==", "1", "partition1", "=", "partitions", "[", "0", "]", "assert", "partition1", ".", "get_property", "(", "'name'", ")", "==", "'partition_1'", "adapters", "=", "cpc2", ".", "adapters", ".", "list", "(", ")", "assert", "len", "(", "adapters", ")", "==", "1", "adapter1", "=", "adapters", "[", "0", "]", "assert", "adapter1", ".", "get_property", "(", "'name'", ")", "==", "'osa_1'", "ports", "=", "adapter1", ".", "ports", ".", "list", "(", ")", "assert", "len", "(", "ports", ")", "==", "1", "port1", "=", "ports", "[", "0", "]", "assert", "port1", ".", "get_property", "(", "'name'", ")", "==", "'osa_1_port_1'" ]
Check the faked session and its faked HMC.
[ "Check", "the", "faked", "session", "and", "its", "faked", "HMC", "." ]
[ "\"\"\"\n Check the faked session and its faked HMC.\n \"\"\"", "# a CPC in classic mode", "# a CPC in DPM mode" ]
[ { "param": "session", "type": null }, { "param": "client", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "session", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "client", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def check(session, client): assert session.host == 'fake-host' assert client.version_info() == (1, 8) cpcs = client.cpcs.list() assert len(cpcs) == 2 cpc1 = cpcs[0] assert cpc1.get_property('name') == 'cpc_1' assert not cpc1.dpm_enabled lpars = cpc1.lpars.list() assert len(lpars) == 1 lpar1 = lpars[0] assert lpar1.get_property('name') == 'lpar_1' cpc2 = cpcs[1] assert cpc2.get_property('name') == 'cpc_2' assert cpc2.dpm_enabled partitions = cpc2.partitions.list() assert len(partitions) == 1 partition1 = partitions[0] assert partition1.get_property('name') == 'partition_1' adapters = cpc2.adapters.list() assert len(adapters) == 1 adapter1 = adapters[0] assert adapter1.get_property('name') == 'osa_1' ports = adapter1.ports.list() assert len(ports) == 1 port1 = ports[0] assert port1.get_property('name') == 'osa_1_port_1'
660
691
3c65c2c9602005283c7df0a5fe9840c66052afa4
zhuxinqimac/disentanglement_lib
disentanglement_lib/methods/semi_supervised/semi_supervised_utils.py
[ "Apache-2.0" ]
Python
permute
<not_specific>
def permute(factor, num_values, random_state): """Permutes the ordinal information of a given factor. Args: factor: Numpy array with the observations of a factor of varation with shape (num_labelled_samples,) and type Int64. num_values: Int with number of distinct values the factor of variation can take. random_state: Random state used to sample the permutation. Returns: factor: Numpy array of Int64 with the observations of a factor of varation with permuted values and shape (num_labelled_samples,). """ unordered_dict = random_state.permutation(range(num_values)) factor[:] = unordered_dict[factor] return factor
Permutes the ordinal information of a given factor. Args: factor: Numpy array with the observations of a factor of varation with shape (num_labelled_samples,) and type Int64. num_values: Int with number of distinct values the factor of variation can take. random_state: Random state used to sample the permutation. Returns: factor: Numpy array of Int64 with the observations of a factor of varation with permuted values and shape (num_labelled_samples,).
Permutes the ordinal information of a given factor.
[ "Permutes", "the", "ordinal", "information", "of", "a", "given", "factor", "." ]
def permute(factor, num_values, random_state): unordered_dict = random_state.permutation(range(num_values)) factor[:] = unordered_dict[factor] return factor
[ "def", "permute", "(", "factor", ",", "num_values", ",", "random_state", ")", ":", "unordered_dict", "=", "random_state", ".", "permutation", "(", "range", "(", "num_values", ")", ")", "factor", "[", ":", "]", "=", "unordered_dict", "[", "factor", "]", "return", "factor" ]
Permutes the ordinal information of a given factor.
[ "Permutes", "the", "ordinal", "information", "of", "a", "given", "factor", "." ]
[ "\"\"\"Permutes the ordinal information of a given factor.\n\n Args:\n factor: Numpy array with the observations of a factor of varation with shape\n (num_labelled_samples,) and type Int64.\n num_values: Int with number of distinct values the factor of variation can\n take.\n random_state: Random state used to sample the permutation.\n\n Returns:\n factor: Numpy array of Int64 with the observations of a factor of varation\n with permuted values and shape (num_labelled_samples,).\n \"\"\"" ]
[ { "param": "factor", "type": null }, { "param": "num_values", "type": null }, { "param": "random_state", "type": null } ]
{ "returns": [ { "docstring": "Numpy array of Int64 with the observations of a factor of varation\nwith permuted values and shape (num_labelled_samples,).", "docstring_tokens": [ "Numpy", "array", "of", "Int64", "with", "the", "observations", "of", "a", "factor", "of", "varation", "with", "permuted", "values", "and", "shape", "(", "num_labelled_samples", ")", "." ], "type": "factor" } ], "raises": [], "params": [ { "identifier": "factor", "type": null, "docstring": "Numpy array with the observations of a factor of varation with shape\n(num_labelled_samples,) and type Int64.", "docstring_tokens": [ "Numpy", "array", "with", "the", "observations", "of", "a", "factor", "of", "varation", "with", "shape", "(", "num_labelled_samples", ")", "and", "type", "Int64", "." ], "default": null, "is_optional": null }, { "identifier": "num_values", "type": null, "docstring": "Int with number of distinct values the factor of variation can\ntake.", "docstring_tokens": [ "Int", "with", "number", "of", "distinct", "values", "the", "factor", "of", "variation", "can", "take", "." ], "default": null, "is_optional": null }, { "identifier": "random_state", "type": null, "docstring": "Random state used to sample the permutation.", "docstring_tokens": [ "Random", "state", "used", "to", "sample", "the", "permutation", "." ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def permute(factor, num_values, random_state): unordered_dict = random_state.permutation(range(num_values)) factor[:] = unordered_dict[factor] return factor
661
348
9522427195a3addc611ab149a0449ce38a73752a
DavidJohnGee/awe
wfeng/utils.py
[ "Apache-2.0" ]
Python
extractIniParam
<not_specific>
def extractIniParam(iniparams, inStr): """ if inStr starts with $ then we return matching param from the ini params, otherwise return taskval Params: iniparams - the map of parameters derived from the ini file inStr - input string """ taskval = inStr if inStr.startswith('$'): #find the param # note that this has been pre-validated, so we should not ever # actually fail to find the key param = inStr[1:] if param in iniparams: taskval = iniparams[param] if taskval == "": taskval = None return taskval
if inStr starts with $ then we return matching param from the ini params, otherwise return taskval Params: iniparams - the map of parameters derived from the ini file inStr - input string
if inStr starts with $ then we return matching param from the ini params, otherwise return taskval Params: iniparams - the map of parameters derived from the ini file inStr - input string
[ "if", "inStr", "starts", "with", "$", "then", "we", "return", "matching", "param", "from", "the", "ini", "params", "otherwise", "return", "taskval", "Params", ":", "iniparams", "-", "the", "map", "of", "parameters", "derived", "from", "the", "ini", "file", "inStr", "-", "input", "string" ]
def extractIniParam(iniparams, inStr): taskval = inStr if inStr.startswith('$'): param = inStr[1:] if param in iniparams: taskval = iniparams[param] if taskval == "": taskval = None return taskval
[ "def", "extractIniParam", "(", "iniparams", ",", "inStr", ")", ":", "taskval", "=", "inStr", "if", "inStr", ".", "startswith", "(", "'$'", ")", ":", "param", "=", "inStr", "[", "1", ":", "]", "if", "param", "in", "iniparams", ":", "taskval", "=", "iniparams", "[", "param", "]", "if", "taskval", "==", "\"\"", ":", "taskval", "=", "None", "return", "taskval" ]
if inStr starts with $ then we return matching param from the ini params, otherwise return taskval Params: iniparams - the map of parameters derived from the ini file inStr - input string
[ "if", "inStr", "starts", "with", "$", "then", "we", "return", "matching", "param", "from", "the", "ini", "params", "otherwise", "return", "taskval", "Params", ":", "iniparams", "-", "the", "map", "of", "parameters", "derived", "from", "the", "ini", "file", "inStr", "-", "input", "string" ]
[ "\"\"\" if inStr starts with $ then we return matching param from the ini\n params, otherwise return taskval\n Params:\n iniparams - the map of parameters derived from the ini file\n inStr - input string\n \"\"\"", "#find the param", "# note that this has been pre-validated, so we should not ever", "# actually fail to find the key" ]
[ { "param": "iniparams", "type": null }, { "param": "inStr", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "iniparams", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "inStr", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def extractIniParam(iniparams, inStr): taskval = inStr if inStr.startswith('$'): param = inStr[1:] if param in iniparams: taskval = iniparams[param] if taskval == "": taskval = None return taskval
662
609
ac8a4041bea38bed4c4544c3111a0a14677451a5
mfkiwl/autobern
bin/rundd.py
[ "MIT" ]
Python
rmbpetmp
null
def rmbpetmp(campaign_dir, dt, bpe_start, bpe_stop): """ This function will perform two operations: 1. remove all files in the campaign's RAW directory, that match the patterns '[A-Z0-9]{4}DDD0.SMT' and '[A-Z0-9]{4}DDD0.[dDoO]' 2. remove all files in the campaign's directories, that have last modification time tag between bpe_start and bpe_stop (aka remove all files in the campaign's directories that have been created/modified by the BPE run) """ doy_str = dt.strftime('%j') yy_str = dt.strftime('%y') raw_dir = os.path.join(campaign_dir, 'RAW') for fn in os.listdir(raw_dir): if re.match(r"[A-Z0-9]{4}"+doy_str+r"0\.SMT", fn): os.remove(os.path.join(raw_dir, fn)) elif re.match(r"[A-Z0-9]{4}"+doy_str+r"0\." + yy_str + r"[oOdD]", fn): os.remove(os.path.join(raw_dir, fn)) for (dirpath, dirnames, filenames) in os.walk(campaign_dir): for filename in filenames: f = os.path.join(dirpath, filename) try: mtime = datetime.datetime.fromtimestamp(os.stat(f).st_mtime, tz=datetime.timezone.utc) if mtime>=bpe_start and mtime <=bpe_stop: #verboseprint('[DEBUG] Removing temporary file {:} rmbpetmp ...'.format(f)) os.remove(f) except: pass
This function will perform two operations: 1. remove all files in the campaign's RAW directory, that match the patterns '[A-Z0-9]{4}DDD0.SMT' and '[A-Z0-9]{4}DDD0.[dDoO]' 2. remove all files in the campaign's directories, that have last modification time tag between bpe_start and bpe_stop (aka remove all files in the campaign's directories that have been created/modified by the BPE run)
This function will perform two operations: 1.
[ "This", "function", "will", "perform", "two", "operations", ":", "1", "." ]
def rmbpetmp(campaign_dir, dt, bpe_start, bpe_stop): doy_str = dt.strftime('%j') yy_str = dt.strftime('%y') raw_dir = os.path.join(campaign_dir, 'RAW') for fn in os.listdir(raw_dir): if re.match(r"[A-Z0-9]{4}"+doy_str+r"0\.SMT", fn): os.remove(os.path.join(raw_dir, fn)) elif re.match(r"[A-Z0-9]{4}"+doy_str+r"0\." + yy_str + r"[oOdD]", fn): os.remove(os.path.join(raw_dir, fn)) for (dirpath, dirnames, filenames) in os.walk(campaign_dir): for filename in filenames: f = os.path.join(dirpath, filename) try: mtime = datetime.datetime.fromtimestamp(os.stat(f).st_mtime, tz=datetime.timezone.utc) if mtime>=bpe_start and mtime <=bpe_stop: os.remove(f) except: pass
[ "def", "rmbpetmp", "(", "campaign_dir", ",", "dt", ",", "bpe_start", ",", "bpe_stop", ")", ":", "doy_str", "=", "dt", ".", "strftime", "(", "'%j'", ")", "yy_str", "=", "dt", ".", "strftime", "(", "'%y'", ")", "raw_dir", "=", "os", ".", "path", ".", "join", "(", "campaign_dir", ",", "'RAW'", ")", "for", "fn", "in", "os", ".", "listdir", "(", "raw_dir", ")", ":", "if", "re", ".", "match", "(", "r\"[A-Z0-9]{4}\"", "+", "doy_str", "+", "r\"0\\.SMT\"", ",", "fn", ")", ":", "os", ".", "remove", "(", "os", ".", "path", ".", "join", "(", "raw_dir", ",", "fn", ")", ")", "elif", "re", ".", "match", "(", "r\"[A-Z0-9]{4}\"", "+", "doy_str", "+", "r\"0\\.\"", "+", "yy_str", "+", "r\"[oOdD]\"", ",", "fn", ")", ":", "os", ".", "remove", "(", "os", ".", "path", ".", "join", "(", "raw_dir", ",", "fn", ")", ")", "for", "(", "dirpath", ",", "dirnames", ",", "filenames", ")", "in", "os", ".", "walk", "(", "campaign_dir", ")", ":", "for", "filename", "in", "filenames", ":", "f", "=", "os", ".", "path", ".", "join", "(", "dirpath", ",", "filename", ")", "try", ":", "mtime", "=", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "os", ".", "stat", "(", "f", ")", ".", "st_mtime", ",", "tz", "=", "datetime", ".", "timezone", ".", "utc", ")", "if", "mtime", ">=", "bpe_start", "and", "mtime", "<=", "bpe_stop", ":", "os", ".", "remove", "(", "f", ")", "except", ":", "pass" ]
This function will perform two operations: 1. remove all files in the campaign's RAW directory, that match the patterns '[A-Z0-9]{4}DDD0.SMT' and '[A-Z0-9]{4}DDD0.
[ "This", "function", "will", "perform", "two", "operations", ":", "1", ".", "remove", "all", "files", "in", "the", "campaign", "'", "s", "RAW", "directory", "that", "match", "the", "patterns", "'", "[", "A", "-", "Z0", "-", "9", "]", "{", "4", "}", "DDD0", ".", "SMT", "'", "and", "'", "[", "A", "-", "Z0", "-", "9", "]", "{", "4", "}", "DDD0", "." ]
[ "\"\"\" This function will perform two operations:\n 1. remove all files in the campaign's RAW directory, that match the\n patterns '[A-Z0-9]{4}DDD0.SMT' and '[A-Z0-9]{4}DDD0.[dDoO]'\n 2. remove all files in the campaign's directories, that have last\n modification time tag between bpe_start and bpe_stop (aka remove\n all files in the campaign's directories that have been \n created/modified by the BPE run)\n \"\"\"", "#verboseprint('[DEBUG] Removing temporary file {:} rmbpetmp ...'.format(f))" ]
[ { "param": "campaign_dir", "type": null }, { "param": "dt", "type": null }, { "param": "bpe_start", "type": null }, { "param": "bpe_stop", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "campaign_dir", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "dt", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "bpe_start", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "bpe_stop", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import re import os import datetime def rmbpetmp(campaign_dir, dt, bpe_start, bpe_stop): doy_str = dt.strftime('%j') yy_str = dt.strftime('%y') raw_dir = os.path.join(campaign_dir, 'RAW') for fn in os.listdir(raw_dir): if re.match(r"[A-Z0-9]{4}"+doy_str+r"0\.SMT", fn): os.remove(os.path.join(raw_dir, fn)) elif re.match(r"[A-Z0-9]{4}"+doy_str+r"0\." + yy_str + r"[oOdD]", fn): os.remove(os.path.join(raw_dir, fn)) for (dirpath, dirnames, filenames) in os.walk(campaign_dir): for filename in filenames: f = os.path.join(dirpath, filename) try: mtime = datetime.datetime.fromtimestamp(os.stat(f).st_mtime, tz=datetime.timezone.utc) if mtime>=bpe_start and mtime <=bpe_stop: os.remove(f) except: pass
663
64
684e4fb2656bdf4c65745ba6ac5d054d8c5b6401
acorbat/anisotropy_errors
anisotropy_functions.py
[ "MIT" ]
Python
total_fluorescence_from_intensity
<not_specific>
def total_fluorescence_from_intensity(I_par, I_per): """ Calculate total fluorescence from crossed intensities. """ return I_par + 2 * I_per
Calculate total fluorescence from crossed intensities.
Calculate total fluorescence from crossed intensities.
[ "Calculate", "total", "fluorescence", "from", "crossed", "intensities", "." ]
def total_fluorescence_from_intensity(I_par, I_per): return I_par + 2 * I_per
[ "def", "total_fluorescence_from_intensity", "(", "I_par", ",", "I_per", ")", ":", "return", "I_par", "+", "2", "*", "I_per" ]
Calculate total fluorescence from crossed intensities.
[ "Calculate", "total", "fluorescence", "from", "crossed", "intensities", "." ]
[ "\"\"\"\n Calculate total fluorescence from crossed intensities.\n \"\"\"" ]
[ { "param": "I_par", "type": null }, { "param": "I_per", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "I_par", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "I_per", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def total_fluorescence_from_intensity(I_par, I_per): return I_par + 2 * I_per
664
885
02cc531ce5bfdb39d8af5fa2d8e35679e06606c2
La-Lojban/valsr
prover/wordle.py
[ "MIT" ]
Python
wordle
<not_specific>
def wordle(guess, word): """Given a 5-letter guess for a 5-letter word, returns the corresponding hint""" # g - green - letter found in the same position # y - yellow - letter found in a different position # b - black - letter not found result = "" for i in range(5): if guess[i] == word[i]: result += "g" elif guess[i] in word: result += "y" else: result += "b" return result
Given a 5-letter guess for a 5-letter word, returns the corresponding hint
Given a 5-letter guess for a 5-letter word, returns the corresponding hint
[ "Given", "a", "5", "-", "letter", "guess", "for", "a", "5", "-", "letter", "word", "returns", "the", "corresponding", "hint" ]
def wordle(guess, word): result = "" for i in range(5): if guess[i] == word[i]: result += "g" elif guess[i] in word: result += "y" else: result += "b" return result
[ "def", "wordle", "(", "guess", ",", "word", ")", ":", "result", "=", "\"\"", "for", "i", "in", "range", "(", "5", ")", ":", "if", "guess", "[", "i", "]", "==", "word", "[", "i", "]", ":", "result", "+=", "\"g\"", "elif", "guess", "[", "i", "]", "in", "word", ":", "result", "+=", "\"y\"", "else", ":", "result", "+=", "\"b\"", "return", "result" ]
Given a 5-letter guess for a 5-letter word, returns the corresponding hint
[ "Given", "a", "5", "-", "letter", "guess", "for", "a", "5", "-", "letter", "word", "returns", "the", "corresponding", "hint" ]
[ "\"\"\"Given a 5-letter guess for a 5-letter word, returns the corresponding hint\"\"\"", "# g - green - letter found in the same position", "# y - yellow - letter found in a different position", "# b - black - letter not found" ]
[ { "param": "guess", "type": null }, { "param": "word", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "guess", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "word", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def wordle(guess, word): result = "" for i in range(5): if guess[i] == word[i]: result += "g" elif guess[i] in word: result += "y" else: result += "b" return result
665
105
af5d32ca73fdaa7d5fda0533a68e9cfa1664bbd5
lcchiang/Artificial-Intelligence-Course
Lab 2/lab2.py
[ "MIT" ]
Python
endgame_score_connectfour
<not_specific>
def endgame_score_connectfour(board, is_current_player_maximizer): """Given an endgame board, returns 1000 if the maximizer has won, -1000 if the minimizer has won, or 0 in case of a tie.""" for chain in board.get_all_chains(): if len(chain) >= 4: if is_current_player_maximizer: return -1000 else: return 1000 return 0
Given an endgame board, returns 1000 if the maximizer has won, -1000 if the minimizer has won, or 0 in case of a tie.
Given an endgame board, returns 1000 if the maximizer has won, 1000 if the minimizer has won, or 0 in case of a tie.
[ "Given", "an", "endgame", "board", "returns", "1000", "if", "the", "maximizer", "has", "won", "1000", "if", "the", "minimizer", "has", "won", "or", "0", "in", "case", "of", "a", "tie", "." ]
def endgame_score_connectfour(board, is_current_player_maximizer): for chain in board.get_all_chains(): if len(chain) >= 4: if is_current_player_maximizer: return -1000 else: return 1000 return 0
[ "def", "endgame_score_connectfour", "(", "board", ",", "is_current_player_maximizer", ")", ":", "for", "chain", "in", "board", ".", "get_all_chains", "(", ")", ":", "if", "len", "(", "chain", ")", ">=", "4", ":", "if", "is_current_player_maximizer", ":", "return", "-", "1000", "else", ":", "return", "1000", "return", "0" ]
Given an endgame board, returns 1000 if the maximizer has won, 1000 if the minimizer has won, or 0 in case of a tie.
[ "Given", "an", "endgame", "board", "returns", "1000", "if", "the", "maximizer", "has", "won", "1000", "if", "the", "minimizer", "has", "won", "or", "0", "in", "case", "of", "a", "tie", "." ]
[ "\"\"\"Given an endgame board, returns 1000 if the maximizer has won,\n -1000 if the minimizer has won, or 0 in case of a tie.\"\"\"" ]
[ { "param": "board", "type": null }, { "param": "is_current_player_maximizer", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "board", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "is_current_player_maximizer", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def endgame_score_connectfour(board, is_current_player_maximizer): for chain in board.get_all_chains(): if len(chain) >= 4: if is_current_player_maximizer: return -1000 else: return 1000 return 0
666
103
a8c50adec9f68e52a23cc188881ecd921d6b10a6
timsavage/django-warthog
warthog/managers.py
[ "BSD-3-Clause" ]
Python
generate_cache_key
<not_specific>
def generate_cache_key(instance_or_type, **vary_by): """Generate a cache key for a model object.""" opts = instance_or_type._meta return 'model:{}[{}]'.format( opts.db_table, ','.join(['%s=%s' % v for v in vary_by.iteritems()]) )
Generate a cache key for a model object.
Generate a cache key for a model object.
[ "Generate", "a", "cache", "key", "for", "a", "model", "object", "." ]
def generate_cache_key(instance_or_type, **vary_by): opts = instance_or_type._meta return 'model:{}[{}]'.format( opts.db_table, ','.join(['%s=%s' % v for v in vary_by.iteritems()]) )
[ "def", "generate_cache_key", "(", "instance_or_type", ",", "**", "vary_by", ")", ":", "opts", "=", "instance_or_type", ".", "_meta", "return", "'model:{}[{}]'", ".", "format", "(", "opts", ".", "db_table", ",", "','", ".", "join", "(", "[", "'%s=%s'", "%", "v", "for", "v", "in", "vary_by", ".", "iteritems", "(", ")", "]", ")", ")" ]
Generate a cache key for a model object.
[ "Generate", "a", "cache", "key", "for", "a", "model", "object", "." ]
[ "\"\"\"Generate a cache key for a model object.\"\"\"" ]
[ { "param": "instance_or_type", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "instance_or_type", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def generate_cache_key(instance_or_type, **vary_by): opts = instance_or_type._meta return 'model:{}[{}]'.format( opts.db_table, ','.join(['%s=%s' % v for v in vary_by.iteritems()]) )
668
369
08332f19fa2c47aecfe49883a6ce4dc91ea0b7a6
vergilus/NJUNMT-pytorch
reinforces/reinforce_utils.py
[ "MIT" ]
Python
corpus_bleu_char
<not_specific>
def corpus_bleu_char(hyp_in, ref_in, need_tokenized=True): """ preprocess corpus into char level and test BLEU, proposed to check modification rate :param hyp_in: files to be tested :param ref_in: reference file :param need_tokenized: for languages needs tokenization :return: """ with open(hyp_in, "r") as hyp, open(ref_in, "r") as ref, \ open("hyp_char", "w") as hyp_char, open("ref_char", "w") as ref_char: for line_hyp_in, line_ref_in in zip(hyp, ref): if not need_tokenized: line_hyp_in = line_hyp_in.replace(" ", "") line_ref_in = line_ref_in.replace(" ", "") hyp_char.write(" ".join(list(line_hyp_in))) ref_char.write(" ".join(list(line_ref_in))) # cat hyp_char | sacrebleu -lc --score-only ref_char # sacrebleu_cmd = ["sacrebleu", "-l"] + ["--score-only",]+["ref_char"] cat = subprocess.Popen(("cat", "hyp_char"), stdout=subprocess.PIPE) cmd_bleu = subprocess.Popen(("/home/zouw/anaconda3/bin/sacrebleu", "-lc", "--score-only", "--force", "ref_char"), stdin=cat.stdout, stdout=subprocess.PIPE) bleu = cmd_bleu.communicate()[0].decode("utf-8").strip() print(bleu) bleu = float(bleu) subprocess.Popen("rm ref_char hyp_char", shell=True) return bleu
preprocess corpus into char level and test BLEU, proposed to check modification rate :param hyp_in: files to be tested :param ref_in: reference file :param need_tokenized: for languages needs tokenization :return:
preprocess corpus into char level and test BLEU, proposed to check modification rate
[ "preprocess", "corpus", "into", "char", "level", "and", "test", "BLEU", "proposed", "to", "check", "modification", "rate" ]
def corpus_bleu_char(hyp_in, ref_in, need_tokenized=True): with open(hyp_in, "r") as hyp, open(ref_in, "r") as ref, \ open("hyp_char", "w") as hyp_char, open("ref_char", "w") as ref_char: for line_hyp_in, line_ref_in in zip(hyp, ref): if not need_tokenized: line_hyp_in = line_hyp_in.replace(" ", "") line_ref_in = line_ref_in.replace(" ", "") hyp_char.write(" ".join(list(line_hyp_in))) ref_char.write(" ".join(list(line_ref_in))) cat = subprocess.Popen(("cat", "hyp_char"), stdout=subprocess.PIPE) cmd_bleu = subprocess.Popen(("/home/zouw/anaconda3/bin/sacrebleu", "-lc", "--score-only", "--force", "ref_char"), stdin=cat.stdout, stdout=subprocess.PIPE) bleu = cmd_bleu.communicate()[0].decode("utf-8").strip() print(bleu) bleu = float(bleu) subprocess.Popen("rm ref_char hyp_char", shell=True) return bleu
[ "def", "corpus_bleu_char", "(", "hyp_in", ",", "ref_in", ",", "need_tokenized", "=", "True", ")", ":", "with", "open", "(", "hyp_in", ",", "\"r\"", ")", "as", "hyp", ",", "open", "(", "ref_in", ",", "\"r\"", ")", "as", "ref", ",", "open", "(", "\"hyp_char\"", ",", "\"w\"", ")", "as", "hyp_char", ",", "open", "(", "\"ref_char\"", ",", "\"w\"", ")", "as", "ref_char", ":", "for", "line_hyp_in", ",", "line_ref_in", "in", "zip", "(", "hyp", ",", "ref", ")", ":", "if", "not", "need_tokenized", ":", "line_hyp_in", "=", "line_hyp_in", ".", "replace", "(", "\" \"", ",", "\"\"", ")", "line_ref_in", "=", "line_ref_in", ".", "replace", "(", "\" \"", ",", "\"\"", ")", "hyp_char", ".", "write", "(", "\" \"", ".", "join", "(", "list", "(", "line_hyp_in", ")", ")", ")", "ref_char", ".", "write", "(", "\" \"", ".", "join", "(", "list", "(", "line_ref_in", ")", ")", ")", "cat", "=", "subprocess", ".", "Popen", "(", "(", "\"cat\"", ",", "\"hyp_char\"", ")", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "cmd_bleu", "=", "subprocess", ".", "Popen", "(", "(", "\"/home/zouw/anaconda3/bin/sacrebleu\"", ",", "\"-lc\"", ",", "\"--score-only\"", ",", "\"--force\"", ",", "\"ref_char\"", ")", ",", "stdin", "=", "cat", ".", "stdout", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "bleu", "=", "cmd_bleu", ".", "communicate", "(", ")", "[", "0", "]", ".", "decode", "(", "\"utf-8\"", ")", ".", "strip", "(", ")", "print", "(", "bleu", ")", "bleu", "=", "float", "(", "bleu", ")", "subprocess", ".", "Popen", "(", "\"rm ref_char hyp_char\"", ",", "shell", "=", "True", ")", "return", "bleu" ]
preprocess corpus into char level and test BLEU, proposed to check modification rate
[ "preprocess", "corpus", "into", "char", "level", "and", "test", "BLEU", "proposed", "to", "check", "modification", "rate" ]
[ "\"\"\"\n preprocess corpus into char level and test BLEU,\n proposed to check modification rate\n :param hyp_in: files to be tested\n :param ref_in: reference file\n :param need_tokenized: for languages needs tokenization\n :return:\n \"\"\"", "# cat hyp_char | sacrebleu -lc --score-only ref_char", "# sacrebleu_cmd = [\"sacrebleu\", \"-l\"] + [\"--score-only\",]+[\"ref_char\"]" ]
[ { "param": "hyp_in", "type": null }, { "param": "ref_in", "type": null }, { "param": "need_tokenized", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": null } ], "raises": [], "params": [ { "identifier": "hyp_in", "type": null, "docstring": "files to be tested", "docstring_tokens": [ "files", "to", "be", "tested" ], "default": null, "is_optional": null }, { "identifier": "ref_in", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null }, { "identifier": "need_tokenized", "type": null, "docstring": "for languages needs tokenization", "docstring_tokens": [ "for", "languages", "needs", "tokenization" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import subprocess def corpus_bleu_char(hyp_in, ref_in, need_tokenized=True): with open(hyp_in, "r") as hyp, open(ref_in, "r") as ref, \ open("hyp_char", "w") as hyp_char, open("ref_char", "w") as ref_char: for line_hyp_in, line_ref_in in zip(hyp, ref): if not need_tokenized: line_hyp_in = line_hyp_in.replace(" ", "") line_ref_in = line_ref_in.replace(" ", "") hyp_char.write(" ".join(list(line_hyp_in))) ref_char.write(" ".join(list(line_ref_in))) cat = subprocess.Popen(("cat", "hyp_char"), stdout=subprocess.PIPE) cmd_bleu = subprocess.Popen(("/home/zouw/anaconda3/bin/sacrebleu", "-lc", "--score-only", "--force", "ref_char"), stdin=cat.stdout, stdout=subprocess.PIPE) bleu = cmd_bleu.communicate()[0].decode("utf-8").strip() print(bleu) bleu = float(bleu) subprocess.Popen("rm ref_char hyp_char", shell=True) return bleu
669
17
16ad88f4b5ea25174fc31c8aa157f82ee1279291
andrewmummery/PyTextEncrypt
textencryptor_beta.py
[ "MIT" ]
Python
message_to_file
null
def message_to_file(message,save_name='encrypted_file.txt'): """ This function saves a string to a text file (with name save_name). """ sf = open(save_name,'w+') sf.write(message) sf.close()
This function saves a string to a text file (with name save_name).
This function saves a string to a text file (with name save_name).
[ "This", "function", "saves", "a", "string", "to", "a", "text", "file", "(", "with", "name", "save_name", ")", "." ]
def message_to_file(message,save_name='encrypted_file.txt'): sf = open(save_name,'w+') sf.write(message) sf.close()
[ "def", "message_to_file", "(", "message", ",", "save_name", "=", "'encrypted_file.txt'", ")", ":", "sf", "=", "open", "(", "save_name", ",", "'w+'", ")", "sf", ".", "write", "(", "message", ")", "sf", ".", "close", "(", ")" ]
This function saves a string to a text file (with name save_name).
[ "This", "function", "saves", "a", "string", "to", "a", "text", "file", "(", "with", "name", "save_name", ")", "." ]
[ "\"\"\"\n This function saves a string to a text file (with name save_name). \n \"\"\"" ]
[ { "param": "message", "type": null }, { "param": "save_name", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "message", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "save_name", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def message_to_file(message,save_name='encrypted_file.txt'): sf = open(save_name,'w+') sf.write(message) sf.close()
670
157
d0223aa4d3aa595bdf6ee51e1a258e800e9586a6
ycs334/platform-services-python-sdk
ibm_platform_services/global_search_v2.py
[ "Apache-2.0" ]
Python
from_dict
'ResultItem'
def from_dict(cls, _dict: Dict) -> 'ResultItem': """Initialize a ResultItem object from a json dictionary.""" args = {} if 'crn' in _dict: args['crn'] = _dict.get('crn') args.update({k:v for (k,v) in _dict.items() if k not in cls._properties}) return cls(**args)
Initialize a ResultItem object from a json dictionary.
Initialize a ResultItem object from a json dictionary.
[ "Initialize", "a", "ResultItem", "object", "from", "a", "json", "dictionary", "." ]
def from_dict(cls, _dict: Dict) -> 'ResultItem': args = {} if 'crn' in _dict: args['crn'] = _dict.get('crn') args.update({k:v for (k,v) in _dict.items() if k not in cls._properties}) return cls(**args)
[ "def", "from_dict", "(", "cls", ",", "_dict", ":", "Dict", ")", "->", "'ResultItem'", ":", "args", "=", "{", "}", "if", "'crn'", "in", "_dict", ":", "args", "[", "'crn'", "]", "=", "_dict", ".", "get", "(", "'crn'", ")", "args", ".", "update", "(", "{", "k", ":", "v", "for", "(", "k", ",", "v", ")", "in", "_dict", ".", "items", "(", ")", "if", "k", "not", "in", "cls", ".", "_properties", "}", ")", "return", "cls", "(", "**", "args", ")" ]
Initialize a ResultItem object from a json dictionary.
[ "Initialize", "a", "ResultItem", "object", "from", "a", "json", "dictionary", "." ]
[ "\"\"\"Initialize a ResultItem object from a json dictionary.\"\"\"" ]
[ { "param": "cls", "type": null }, { "param": "_dict", "type": "Dict" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "cls", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "_dict", "type": "Dict", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def from_dict(cls, _dict: Dict) -> 'ResultItem': args = {} if 'crn' in _dict: args['crn'] = _dict.get('crn') args.update({k:v for (k,v) in _dict.items() if k not in cls._properties}) return cls(**args)
671
978
a6fafed4d1412b7d7e6d9ca28f4805ce9e0e92ea
ldmud/python-asyncio
ldmud_asyncio/__init__.py
[ "0BSD" ]
Python
_get_fd_from_fileobj
<not_specific>
def _get_fd_from_fileobj(fileobj): """Returns the file descriptor for a file object. The file object needs either to be an integer (the descriptor itself) or have a fileno() function that returns the descriptor. """ if isinstance(fileobj, int): return fileobj else: return int(fileobj.fileno())
Returns the file descriptor for a file object. The file object needs either to be an integer (the descriptor itself) or have a fileno() function that returns the descriptor.
Returns the file descriptor for a file object. The file object needs either to be an integer (the descriptor itself) or have a fileno() function that returns the descriptor.
[ "Returns", "the", "file", "descriptor", "for", "a", "file", "object", ".", "The", "file", "object", "needs", "either", "to", "be", "an", "integer", "(", "the", "descriptor", "itself", ")", "or", "have", "a", "fileno", "()", "function", "that", "returns", "the", "descriptor", "." ]
def _get_fd_from_fileobj(fileobj): if isinstance(fileobj, int): return fileobj else: return int(fileobj.fileno())
[ "def", "_get_fd_from_fileobj", "(", "fileobj", ")", ":", "if", "isinstance", "(", "fileobj", ",", "int", ")", ":", "return", "fileobj", "else", ":", "return", "int", "(", "fileobj", ".", "fileno", "(", ")", ")" ]
Returns the file descriptor for a file object.
[ "Returns", "the", "file", "descriptor", "for", "a", "file", "object", "." ]
[ "\"\"\"Returns the file descriptor for a file object.\n\n The file object needs either to be an integer (the descriptor itself)\n or have a fileno() function that returns the descriptor.\n \"\"\"" ]
[ { "param": "fileobj", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "fileobj", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _get_fd_from_fileobj(fileobj): if isinstance(fileobj, int): return fileobj else: return int(fileobj.fileno())
672
127
769eba837e3cd6fe3f0e0706d2a0f680ef948b7d
nkonai/Curso-em-video-Python
pacote-download/Ex102.py
[ "MIT" ]
Python
fatorial
<not_specific>
def fatorial(num, show=False): """ Calcula o fatorial de um numero :param num: O numero a ser calculado :param show: (opcional) Mostrar ou nao a conta :return: O valor fatorial do numero n """ f = 1 for c in range(num, 0, -1): if show: print(c, end='') if c > 1: print(' x ', end ='') else: print(' = ', end ='') f *= c return f
Calcula o fatorial de um numero :param num: O numero a ser calculado :param show: (opcional) Mostrar ou nao a conta :return: O valor fatorial do numero n
Calcula o fatorial de um numero
[ "Calcula", "o", "fatorial", "de", "um", "numero" ]
def fatorial(num, show=False): f = 1 for c in range(num, 0, -1): if show: print(c, end='') if c > 1: print(' x ', end ='') else: print(' = ', end ='') f *= c return f
[ "def", "fatorial", "(", "num", ",", "show", "=", "False", ")", ":", "f", "=", "1", "for", "c", "in", "range", "(", "num", ",", "0", ",", "-", "1", ")", ":", "if", "show", ":", "print", "(", "c", ",", "end", "=", "''", ")", "if", "c", ">", "1", ":", "print", "(", "' x '", ",", "end", "=", "''", ")", "else", ":", "print", "(", "' = '", ",", "end", "=", "''", ")", "f", "*=", "c", "return", "f" ]
Calcula o fatorial de um numero
[ "Calcula", "o", "fatorial", "de", "um", "numero" ]
[ "\"\"\"\n Calcula o fatorial de um numero\n :param num: O numero a ser calculado\n :param show: (opcional) Mostrar ou nao a conta\n :return: O valor fatorial do numero n\n \"\"\"" ]
[ { "param": "num", "type": null }, { "param": "show", "type": null } ]
{ "returns": [ { "docstring": "O valor fatorial do numero n", "docstring_tokens": [ "O", "valor", "fatorial", "do", "numero", "n" ], "type": null } ], "raises": [], "params": [ { "identifier": "num", "type": null, "docstring": "O numero a ser calculado", "docstring_tokens": [ "O", "numero", "a", "ser", "calculado" ], "default": null, "is_optional": null }, { "identifier": "show", "type": null, "docstring": "(opcional) Mostrar ou nao a conta", "docstring_tokens": [ "(", "opcional", ")", "Mostrar", "ou", "nao", "a", "conta" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def fatorial(num, show=False): f = 1 for c in range(num, 0, -1): if show: print(c, end='') if c > 1: print(' x ', end ='') else: print(' = ', end ='') f *= c return f
674
29
0c60a25e5498a3b05c737325b417d3e70366cd78
dengpan1/hue
desktop/core/ext-py/SQLAlchemy-1.3.17/test/sql/test_functions.py
[ "Apache-2.0" ]
Python
exec_sorted
<not_specific>
def exec_sorted(statement, *args, **kw): """Executes a statement and returns a sorted list plain tuple rows.""" return sorted( [tuple(row) for row in statement.execute(*args, **kw).fetchall()] )
Executes a statement and returns a sorted list plain tuple rows.
Executes a statement and returns a sorted list plain tuple rows.
[ "Executes", "a", "statement", "and", "returns", "a", "sorted", "list", "plain", "tuple", "rows", "." ]
def exec_sorted(statement, *args, **kw): return sorted( [tuple(row) for row in statement.execute(*args, **kw).fetchall()] )
[ "def", "exec_sorted", "(", "statement", ",", "*", "args", ",", "**", "kw", ")", ":", "return", "sorted", "(", "[", "tuple", "(", "row", ")", "for", "row", "in", "statement", ".", "execute", "(", "*", "args", ",", "**", "kw", ")", ".", "fetchall", "(", ")", "]", ")" ]
Executes a statement and returns a sorted list plain tuple rows.
[ "Executes", "a", "statement", "and", "returns", "a", "sorted", "list", "plain", "tuple", "rows", "." ]
[ "\"\"\"Executes a statement and returns a sorted list plain tuple rows.\"\"\"" ]
[ { "param": "statement", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "statement", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def exec_sorted(statement, *args, **kw): return sorted( [tuple(row) for row in statement.execute(*args, **kw).fetchall()] )
675
498
7eef626e30339dee952e7d4f4fe79176ed45f74f
rene-d/dl-pypi
pypim.py
[ "Unlicense" ]
Python
fetch_value
<not_specific>
def fetch_value(db, sql, params=(), default_value=0): """ fetch the first value of the first row of a select statement """ # logger.debug(f"{sql}") # logger.debug(f"{params!r}") if not isinstance(params, tuple) and not isinstance(params, list): params = (params,) row = db.execute(sql, params).fetchone() # logger.debug(f"{row!r}") if row and isinstance(row[0], int): return int(row[0]) else: return default_value
fetch the first value of the first row of a select statement
fetch the first value of the first row of a select statement
[ "fetch", "the", "first", "value", "of", "the", "first", "row", "of", "a", "select", "statement" ]
def fetch_value(db, sql, params=(), default_value=0): if not isinstance(params, tuple) and not isinstance(params, list): params = (params,) row = db.execute(sql, params).fetchone() if row and isinstance(row[0], int): return int(row[0]) else: return default_value
[ "def", "fetch_value", "(", "db", ",", "sql", ",", "params", "=", "(", ")", ",", "default_value", "=", "0", ")", ":", "if", "not", "isinstance", "(", "params", ",", "tuple", ")", "and", "not", "isinstance", "(", "params", ",", "list", ")", ":", "params", "=", "(", "params", ",", ")", "row", "=", "db", ".", "execute", "(", "sql", ",", "params", ")", ".", "fetchone", "(", ")", "if", "row", "and", "isinstance", "(", "row", "[", "0", "]", ",", "int", ")", ":", "return", "int", "(", "row", "[", "0", "]", ")", "else", ":", "return", "default_value" ]
fetch the first value of the first row of a select statement
[ "fetch", "the", "first", "value", "of", "the", "first", "row", "of", "a", "select", "statement" ]
[ "\"\"\"\n fetch the first value of the first row of a select statement\n \"\"\"", "# logger.debug(f\"{sql}\")", "# logger.debug(f\"{params!r}\")", "# logger.debug(f\"{row!r}\")" ]
[ { "param": "db", "type": null }, { "param": "sql", "type": null }, { "param": "params", "type": null }, { "param": "default_value", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "db", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "sql", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "params", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "default_value", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def fetch_value(db, sql, params=(), default_value=0): if not isinstance(params, tuple) and not isinstance(params, list): params = (params,) row = db.execute(sql, params).fetchone() if row and isinstance(row[0], int): return int(row[0]) else: return default_value
676
958
2d4b7f003fa4a2f3d2448e2e3b1e21367b38f808
bastibe/PySoundCard
pysoundcard.py
[ "BSD-3-Clause" ]
Python
_split
<not_specific>
def _split(value): """Split input/output value into two values.""" if isinstance(value, str): # iterable, but not meant for splitting return value, value try: invalue, outvalue = value except TypeError: invalue = outvalue = value except ValueError: raise ValueError("Only single values and pairs are allowed") return invalue, outvalue
Split input/output value into two values.
Split input/output value into two values.
[ "Split", "input", "/", "output", "value", "into", "two", "values", "." ]
def _split(value): if isinstance(value, str): return value, value try: invalue, outvalue = value except TypeError: invalue = outvalue = value except ValueError: raise ValueError("Only single values and pairs are allowed") return invalue, outvalue
[ "def", "_split", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "str", ")", ":", "return", "value", ",", "value", "try", ":", "invalue", ",", "outvalue", "=", "value", "except", "TypeError", ":", "invalue", "=", "outvalue", "=", "value", "except", "ValueError", ":", "raise", "ValueError", "(", "\"Only single values and pairs are allowed\"", ")", "return", "invalue", ",", "outvalue" ]
Split input/output value into two values.
[ "Split", "input", "/", "output", "value", "into", "two", "values", "." ]
[ "\"\"\"Split input/output value into two values.\"\"\"", "# iterable, but not meant for splitting" ]
[ { "param": "value", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "value", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _split(value): if isinstance(value, str): return value, value try: invalue, outvalue = value except TypeError: invalue = outvalue = value except ValueError: raise ValueError("Only single values and pairs are allowed") return invalue, outvalue
677
147
2c45a7aedbfcd4bbdee8e2067d960693f81528f7
ctogle/editops
editops/alignment.py
[ "MIT" ]
Python
_compute_f
<not_specific>
def _compute_f(D, I, S): """Computes a factor `f` to reconcile that replacement errors are less detrimental than insertion/deletion errors. - f is on [0.5, 1] - f == 0.5 -> all replacements - f == 1 -> all non-replacements - f * (D_S + I_S) = (S + D + I), where D_S and I_S include contributions from replacements. Args: D (int): Number of deletion errors (false negatives). I (int): Number of insertion errors (false positives). S (int): Number of replacement errors (false positive/negatives pairs). Returns: float: A factor `f` which is 0.5 when all errors are replacements and 1.0 when all errors are not replacements. """ return 1 - ((S / (I + D)) if (I or D) else 0)
Computes a factor `f` to reconcile that replacement errors are less detrimental than insertion/deletion errors. - f is on [0.5, 1] - f == 0.5 -> all replacements - f == 1 -> all non-replacements - f * (D_S + I_S) = (S + D + I), where D_S and I_S include contributions from replacements. Args: D (int): Number of deletion errors (false negatives). I (int): Number of insertion errors (false positives). S (int): Number of replacement errors (false positive/negatives pairs). Returns: float: A factor `f` which is 0.5 when all errors are replacements and 1.0 when all errors are not replacements.
Computes a factor `f` to reconcile that replacement errors are less detrimental than insertion/deletion errors.
[ "Computes", "a", "factor", "`", "f", "`", "to", "reconcile", "that", "replacement", "errors", "are", "less", "detrimental", "than", "insertion", "/", "deletion", "errors", "." ]
def _compute_f(D, I, S): return 1 - ((S / (I + D)) if (I or D) else 0)
[ "def", "_compute_f", "(", "D", ",", "I", ",", "S", ")", ":", "return", "1", "-", "(", "(", "S", "/", "(", "I", "+", "D", ")", ")", "if", "(", "I", "or", "D", ")", "else", "0", ")" ]
Computes a factor `f` to reconcile that replacement errors are less detrimental than insertion/deletion errors.
[ "Computes", "a", "factor", "`", "f", "`", "to", "reconcile", "that", "replacement", "errors", "are", "less", "detrimental", "than", "insertion", "/", "deletion", "errors", "." ]
[ "\"\"\"Computes a factor `f` to reconcile that replacement errors are less detrimental \n than insertion/deletion errors.\n\n - f is on [0.5, 1]\n - f == 0.5 -> all replacements\n - f == 1 -> all non-replacements\n - f * (D_S + I_S) = (S + D + I), where D_S and I_S include contributions from \n replacements.\n\n Args:\n D (int): Number of deletion errors (false negatives).\n I (int): Number of insertion errors (false positives).\n S (int): Number of replacement errors (false positive/negatives pairs).\n\n Returns:\n float: A factor `f` which is 0.5 when all errors are replacements and 1.0\n when all errors are not replacements.\n\n \"\"\"" ]
[ { "param": "D", "type": null }, { "param": "I", "type": null }, { "param": "S", "type": null } ]
{ "returns": [ { "docstring": "A factor `f` which is 0.5 when all errors are replacements and 1.0\nwhen all errors are not replacements.", "docstring_tokens": [ "A", "factor", "`", "f", "`", "which", "is", "0", ".", "5", "when", "all", "errors", "are", "replacements", "and", "1", ".", "0", "when", "all", "errors", "are", "not", "replacements", "." ], "type": "float" } ], "raises": [], "params": [ { "identifier": "D", "type": null, "docstring": "Number of deletion errors (false negatives).", "docstring_tokens": [ "Number", "of", "deletion", "errors", "(", "false", "negatives", ")", "." ], "default": null, "is_optional": false }, { "identifier": "I", "type": null, "docstring": "Number of insertion errors (false positives).", "docstring_tokens": [ "Number", "of", "insertion", "errors", "(", "false", "positives", ")", "." ], "default": null, "is_optional": false }, { "identifier": "S", "type": null, "docstring": "Number of replacement errors (false positive/negatives pairs).", "docstring_tokens": [ "Number", "of", "replacement", "errors", "(", "false", "positive", "/", "negatives", "pairs", ")", "." ], "default": null, "is_optional": false } ], "outlier_params": [], "others": [] }
def _compute_f(D, I, S): return 1 - ((S / (I + D)) if (I or D) else 0)
678
297
c505ac19be890edad7ef664b4e5f94f914d57f38
cameliot/llama
llama/mqtt.py
[ "BSD-3-Clause" ]
Python
_decode_action_type
<not_specific>
def _decode_action_type(routes, topic): """ Decode an inbound MQTT topic into an action type. :param routes: The routing dict :type routes: dict :param topic: The MQTT topic :type topic: str :returns: The action type """ for handle, route in routes.items(): if topic.startswith(route): return "@" + topic.replace(route, handle) return topic
Decode an inbound MQTT topic into an action type. :param routes: The routing dict :type routes: dict :param topic: The MQTT topic :type topic: str :returns: The action type
Decode an inbound MQTT topic into an action type.
[ "Decode", "an", "inbound", "MQTT", "topic", "into", "an", "action", "type", "." ]
def _decode_action_type(routes, topic): for handle, route in routes.items(): if topic.startswith(route): return "@" + topic.replace(route, handle) return topic
[ "def", "_decode_action_type", "(", "routes", ",", "topic", ")", ":", "for", "handle", ",", "route", "in", "routes", ".", "items", "(", ")", ":", "if", "topic", ".", "startswith", "(", "route", ")", ":", "return", "\"@\"", "+", "topic", ".", "replace", "(", "route", ",", "handle", ")", "return", "topic" ]
Decode an inbound MQTT topic into an action type.
[ "Decode", "an", "inbound", "MQTT", "topic", "into", "an", "action", "type", "." ]
[ "\"\"\"\n Decode an inbound MQTT topic into an action type.\n\n :param routes: The routing dict\n :type routes: dict\n\n :param topic: The MQTT topic\n :type topic: str\n\n :returns: The action type\n \"\"\"" ]
[ { "param": "routes", "type": null }, { "param": "topic", "type": null } ]
{ "returns": [ { "docstring": "The action type", "docstring_tokens": [ "The", "action", "type" ], "type": null } ], "raises": [], "params": [ { "identifier": "routes", "type": null, "docstring": "The routing dict", "docstring_tokens": [ "The", "routing", "dict" ], "default": null, "is_optional": null }, { "identifier": "topic", "type": null, "docstring": "The MQTT topic", "docstring_tokens": [ "The", "MQTT", "topic" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _decode_action_type(routes, topic): for handle, route in routes.items(): if topic.startswith(route): return "@" + topic.replace(route, handle) return topic
679
122
73c158e24eee925bbf96107e6d2c63b2d2e5f135
spiricn/DevUtils
du/Utils.py
[ "MIT" ]
Python
generateFileMd5sum
<not_specific>
def generateFileMd5sum(fielPath): """ Generate MD5 hex digest from given file @param fielPath File path @return md5 checksum """ md5 = hashlib.md5() with open(fielPath, "rb") as f: for chunk in iter(lambda: f.read(4096), b""): md5.update(chunk) return md5.hexdigest()
Generate MD5 hex digest from given file @param fielPath File path @return md5 checksum
Generate MD5 hex digest from given file @param fielPath File path @return md5 checksum
[ "Generate", "MD5", "hex", "digest", "from", "given", "file", "@param", "fielPath", "File", "path", "@return", "md5", "checksum" ]
def generateFileMd5sum(fielPath): md5 = hashlib.md5() with open(fielPath, "rb") as f: for chunk in iter(lambda: f.read(4096), b""): md5.update(chunk) return md5.hexdigest()
[ "def", "generateFileMd5sum", "(", "fielPath", ")", ":", "md5", "=", "hashlib", ".", "md5", "(", ")", "with", "open", "(", "fielPath", ",", "\"rb\"", ")", "as", "f", ":", "for", "chunk", "in", "iter", "(", "lambda", ":", "f", ".", "read", "(", "4096", ")", ",", "b\"\"", ")", ":", "md5", ".", "update", "(", "chunk", ")", "return", "md5", ".", "hexdigest", "(", ")" ]
Generate MD5 hex digest from given file @param fielPath File path @return md5 checksum
[ "Generate", "MD5", "hex", "digest", "from", "given", "file", "@param", "fielPath", "File", "path", "@return", "md5", "checksum" ]
[ "\"\"\"\r\n Generate MD5 hex digest from given file\r\n\r\n @param fielPath File path\r\n @return md5 checksum\r\n \"\"\"" ]
[ { "param": "fielPath", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "fielPath", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import hashlib def generateFileMd5sum(fielPath): md5 = hashlib.md5() with open(fielPath, "rb") as f: for chunk in iter(lambda: f.read(4096), b""): md5.update(chunk) return md5.hexdigest()
680
516
eb3ca34d7ab6e39279954d1520936518a56981f0
violet-zct/fairseq-dro-mnmt
fairseq/criterions/token_weighted_ls_cross_entropy.py
[ "MIT" ]
Python
add_args
null
def add_args(parser): """Add criterion-specific arguments to the parser.""" # fmt: off parser.add_argument('--label-smoothing', default=0., type=float, metavar='D', help='epsilon for label smoothing, 0 means no label smoothing') parser.add_argument('--start-ft-steps', default=0, type=int) parser.add_argument('--obj', default="exp", choices=['exp', 'chi_square']) parser.add_argument('--adaptive-T', type=float) # fmt: on
Add criterion-specific arguments to the parser.
Add criterion-specific arguments to the parser.
[ "Add", "criterion", "-", "specific", "arguments", "to", "the", "parser", "." ]
def add_args(parser): parser.add_argument('--label-smoothing', default=0., type=float, metavar='D', help='epsilon for label smoothing, 0 means no label smoothing') parser.add_argument('--start-ft-steps', default=0, type=int) parser.add_argument('--obj', default="exp", choices=['exp', 'chi_square']) parser.add_argument('--adaptive-T', type=float)
[ "def", "add_args", "(", "parser", ")", ":", "parser", ".", "add_argument", "(", "'--label-smoothing'", ",", "default", "=", "0.", ",", "type", "=", "float", ",", "metavar", "=", "'D'", ",", "help", "=", "'epsilon for label smoothing, 0 means no label smoothing'", ")", "parser", ".", "add_argument", "(", "'--start-ft-steps'", ",", "default", "=", "0", ",", "type", "=", "int", ")", "parser", ".", "add_argument", "(", "'--obj'", ",", "default", "=", "\"exp\"", ",", "choices", "=", "[", "'exp'", ",", "'chi_square'", "]", ")", "parser", ".", "add_argument", "(", "'--adaptive-T'", ",", "type", "=", "float", ")" ]
Add criterion-specific arguments to the parser.
[ "Add", "criterion", "-", "specific", "arguments", "to", "the", "parser", "." ]
[ "\"\"\"Add criterion-specific arguments to the parser.\"\"\"", "# fmt: off", "# fmt: on" ]
[ { "param": "parser", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "parser", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def add_args(parser): parser.add_argument('--label-smoothing', default=0., type=float, metavar='D', help='epsilon for label smoothing, 0 means no label smoothing') parser.add_argument('--start-ft-steps', default=0, type=int) parser.add_argument('--obj', default="exp", choices=['exp', 'chi_square']) parser.add_argument('--adaptive-T', type=float)
681
192
f6d1716bb55b6586b087733e227db1743b74f6a6
mrosanes/taurus_deb
lib/taurus/test/fuzzytest.py
[ "CC-BY-3.0" ]
Python
loopTest
<not_specific>
def loopTest(testname, maxtries=100, maxfails=10): '''Run a test `maxtries` times or until it fails `maxfails` times and report the number of tries and failures. :param testname: (str) test name. see: :meth:`unittest.TestLoader.loadTestsFromName` :param maxtries: (int) maximum number of runs :param maxfails: (int) maximum number of failed runs :return: (tuple) a tuple of ints: tries, failures ''' from taurus.external import unittest suite = unittest.defaultTestLoader.loadTestsFromName(testname) runner = unittest.TextTestRunner(verbosity=0) i, f = 0, 0 while f < maxfails and i < maxtries: i += 1 result = runner.run(suite) if not result.wasSuccessful(): f += 1 return i, f
Run a test `maxtries` times or until it fails `maxfails` times and report the number of tries and failures. :param testname: (str) test name. see: :meth:`unittest.TestLoader.loadTestsFromName` :param maxtries: (int) maximum number of runs :param maxfails: (int) maximum number of failed runs :return: (tuple) a tuple of ints: tries, failures
Run a test `maxtries` times or until it fails `maxfails` times and report the number of tries and failures.
[ "Run", "a", "test", "`", "maxtries", "`", "times", "or", "until", "it", "fails", "`", "maxfails", "`", "times", "and", "report", "the", "number", "of", "tries", "and", "failures", "." ]
def loopTest(testname, maxtries=100, maxfails=10): from taurus.external import unittest suite = unittest.defaultTestLoader.loadTestsFromName(testname) runner = unittest.TextTestRunner(verbosity=0) i, f = 0, 0 while f < maxfails and i < maxtries: i += 1 result = runner.run(suite) if not result.wasSuccessful(): f += 1 return i, f
[ "def", "loopTest", "(", "testname", ",", "maxtries", "=", "100", ",", "maxfails", "=", "10", ")", ":", "from", "taurus", ".", "external", "import", "unittest", "suite", "=", "unittest", ".", "defaultTestLoader", ".", "loadTestsFromName", "(", "testname", ")", "runner", "=", "unittest", ".", "TextTestRunner", "(", "verbosity", "=", "0", ")", "i", ",", "f", "=", "0", ",", "0", "while", "f", "<", "maxfails", "and", "i", "<", "maxtries", ":", "i", "+=", "1", "result", "=", "runner", ".", "run", "(", "suite", ")", "if", "not", "result", ".", "wasSuccessful", "(", ")", ":", "f", "+=", "1", "return", "i", ",", "f" ]
Run a test `maxtries` times or until it fails `maxfails` times and report the number of tries and failures.
[ "Run", "a", "test", "`", "maxtries", "`", "times", "or", "until", "it", "fails", "`", "maxfails", "`", "times", "and", "report", "the", "number", "of", "tries", "and", "failures", "." ]
[ "'''Run a test `maxtries` times or until it fails `maxfails` times and\n report the number of tries and failures.\n\n :param testname: (str) test name. see:\n :meth:`unittest.TestLoader.loadTestsFromName`\n :param maxtries: (int) maximum number of runs\n :param maxfails: (int) maximum number of failed runs\n\n :return: (tuple) a tuple of ints: tries, failures\n '''" ]
[ { "param": "testname", "type": null }, { "param": "maxtries", "type": null }, { "param": "maxfails", "type": null } ]
{ "returns": [ { "docstring": "(tuple) a tuple of ints: tries, failures", "docstring_tokens": [ "(", "tuple", ")", "a", "tuple", "of", "ints", ":", "tries", "failures" ], "type": null } ], "raises": [], "params": [ { "identifier": "testname", "type": null, "docstring": "(str) test name. see:\n:meth:`unittest.TestLoader.loadTestsFromName`", "docstring_tokens": [ "(", "str", ")", "test", "name", ".", "see", ":", ":", "meth", ":", "`", "unittest", ".", "TestLoader", ".", "loadTestsFromName", "`" ], "default": null, "is_optional": null }, { "identifier": "maxtries", "type": null, "docstring": "(int) maximum number of runs", "docstring_tokens": [ "(", "int", ")", "maximum", "number", "of", "runs" ], "default": null, "is_optional": null }, { "identifier": "maxfails", "type": null, "docstring": "(int) maximum number of failed runs", "docstring_tokens": [ "(", "int", ")", "maximum", "number", "of", "failed", "runs" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import unittest def loopTest(testname, maxtries=100, maxfails=10): from taurus.external import unittest suite = unittest.defaultTestLoader.loadTestsFromName(testname) runner = unittest.TextTestRunner(verbosity=0) i, f = 0, 0 while f < maxfails and i < maxtries: i += 1 result = runner.run(suite) if not result.wasSuccessful(): f += 1 return i, f
682
186
d490abed7d237a2b151669e563ced0d61b3303c4
renovate-tests/bazel-integration-testing-1
tools/common.bzl
[ "Apache-2.0" ]
Python
_zfill
<not_specific>
def _zfill(v, l = 5): """zfill a string by padding 0s to the left of the string till it is the link specified by l. """ return "0" * (l - len(v)) + v
zfill a string by padding 0s to the left of the string till it is the link specified by l.
zfill a string by padding 0s to the left of the string till it is the link specified by l.
[ "zfill", "a", "string", "by", "padding", "0s", "to", "the", "left", "of", "the", "string", "till", "it", "is", "the", "link", "specified", "by", "l", "." ]
def _zfill(v, l = 5): return "0" * (l - len(v)) + v
[ "def", "_zfill", "(", "v", ",", "l", "=", "5", ")", ":", "return", "\"0\"", "*", "(", "l", "-", "len", "(", "v", ")", ")", "+", "v" ]
zfill a string by padding 0s to the left of the string till it is the link specified by l.
[ "zfill", "a", "string", "by", "padding", "0s", "to", "the", "left", "of", "the", "string", "till", "it", "is", "the", "link", "specified", "by", "l", "." ]
[ "\"\"\"zfill a string by padding 0s to the left of the string till it is the link\n specified by l.\n \"\"\"" ]
[ { "param": "v", "type": null }, { "param": "l", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "v", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "l", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _zfill(v, l = 5): return "0" * (l - len(v)) + v
683
58