repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
python-cmd2/cmd2
examples/scripts/save_help_text.py
https://github.com/python-cmd2/cmd2/blob/b22c0bd891ed08c8b09df56df9d91f48166a5e2a/examples/scripts/save_help_text.py#L39-L55
def add_help_to_file(item: str, outfile: TextIO, is_command: bool) -> None: """ Write help text for commands and topics to the output file :param item: what is having its help text saved :param outfile: file being written to :param is_command: tells if the item is a command and not just a help topic """ if is_command: label = "COMMAND" else: label = "TOPIC" header = '{}\n{}: {}\n{}\n'.format(ASTERISKS, label, item, ASTERISKS) outfile.write(header) result = app('help {}'.format(item)) outfile.write(result.stdout)
[ "def", "add_help_to_file", "(", "item", ":", "str", ",", "outfile", ":", "TextIO", ",", "is_command", ":", "bool", ")", "->", "None", ":", "if", "is_command", ":", "label", "=", "\"COMMAND\"", "else", ":", "label", "=", "\"TOPIC\"", "header", "=", "'{}\\n{}: {}\\n{}\\n'", ".", "format", "(", "ASTERISKS", ",", "label", ",", "item", ",", "ASTERISKS", ")", "outfile", ".", "write", "(", "header", ")", "result", "=", "app", "(", "'help {}'", ".", "format", "(", "item", ")", ")", "outfile", ".", "write", "(", "result", ".", "stdout", ")" ]
Write help text for commands and topics to the output file :param item: what is having its help text saved :param outfile: file being written to :param is_command: tells if the item is a command and not just a help topic
[ "Write", "help", "text", "for", "commands", "and", "topics", "to", "the", "output", "file", ":", "param", "item", ":", "what", "is", "having", "its", "help", "text", "saved", ":", "param", "outfile", ":", "file", "being", "written", "to", ":", "param", "is_command", ":", "tells", "if", "the", "item", "is", "a", "command", "and", "not", "just", "a", "help", "topic" ]
python
train
Parquery/sphinx-icontract
sphinx_icontract/__init__.py
https://github.com/Parquery/sphinx-icontract/blob/92918f23a8ea1873112e9b7446c64cd6f12ee04b/sphinx_icontract/__init__.py#L303-L343
def _capture_as_text(capture: Callable[..., Any]) -> str: """Convert the capture function into its text representation by parsing the source code of the decorator.""" if not icontract._represent._is_lambda(a_function=capture): signature = inspect.signature(capture) param_names = list(signature.parameters.keys()) return "{}({})".format(capture.__qualname__, ", ".join(param_names)) lines, lineno = inspect.findsource(capture) filename = inspect.getsourcefile(capture) decorator_inspection = icontract._represent.inspect_decorator(lines=lines, lineno=lineno, filename=filename) call_node = decorator_inspection.node capture_node = None # type: Optional[ast.Lambda] if len(call_node.args) > 0: assert isinstance(call_node.args[0], ast.Lambda), \ ("Expected the first argument to the snapshot decorator to be a condition as lambda AST node, " "but got: {}").format(type(call_node.args[0])) capture_node = call_node.args[0] elif len(call_node.keywords) > 0: for keyword in call_node.keywords: if keyword.arg == "capture": assert isinstance(keyword.value, ast.Lambda), \ "Expected lambda node as value of the 'capture' argument to the decorator." capture_node = keyword.value break assert capture_node is not None, "Expected to find a keyword AST node with 'capture' arg, but found none" else: raise AssertionError( "Expected a call AST node of a snapshot decorator to have either args or keywords, but got: {}".format( ast.dump(call_node))) capture_text = decorator_inspection.atok.get_text(capture_node.body) return capture_text
[ "def", "_capture_as_text", "(", "capture", ":", "Callable", "[", "...", ",", "Any", "]", ")", "->", "str", ":", "if", "not", "icontract", ".", "_represent", ".", "_is_lambda", "(", "a_function", "=", "capture", ")", ":", "signature", "=", "inspect", ".", "signature", "(", "capture", ")", "param_names", "=", "list", "(", "signature", ".", "parameters", ".", "keys", "(", ")", ")", "return", "\"{}({})\"", ".", "format", "(", "capture", ".", "__qualname__", ",", "\", \"", ".", "join", "(", "param_names", ")", ")", "lines", ",", "lineno", "=", "inspect", ".", "findsource", "(", "capture", ")", "filename", "=", "inspect", ".", "getsourcefile", "(", "capture", ")", "decorator_inspection", "=", "icontract", ".", "_represent", ".", "inspect_decorator", "(", "lines", "=", "lines", ",", "lineno", "=", "lineno", ",", "filename", "=", "filename", ")", "call_node", "=", "decorator_inspection", ".", "node", "capture_node", "=", "None", "# type: Optional[ast.Lambda]", "if", "len", "(", "call_node", ".", "args", ")", ">", "0", ":", "assert", "isinstance", "(", "call_node", ".", "args", "[", "0", "]", ",", "ast", ".", "Lambda", ")", ",", "(", "\"Expected the first argument to the snapshot decorator to be a condition as lambda AST node, \"", "\"but got: {}\"", ")", ".", "format", "(", "type", "(", "call_node", ".", "args", "[", "0", "]", ")", ")", "capture_node", "=", "call_node", ".", "args", "[", "0", "]", "elif", "len", "(", "call_node", ".", "keywords", ")", ">", "0", ":", "for", "keyword", "in", "call_node", ".", "keywords", ":", "if", "keyword", ".", "arg", "==", "\"capture\"", ":", "assert", "isinstance", "(", "keyword", ".", "value", ",", "ast", ".", "Lambda", ")", ",", "\"Expected lambda node as value of the 'capture' argument to the decorator.\"", "capture_node", "=", "keyword", ".", "value", "break", "assert", "capture_node", "is", "not", "None", ",", "\"Expected to find a keyword AST node with 'capture' arg, but found none\"", "else", ":", "raise", "AssertionError", "(", "\"Expected a call AST node of a snapshot decorator to have either args or keywords, but got: {}\"", ".", "format", "(", "ast", ".", "dump", "(", "call_node", ")", ")", ")", "capture_text", "=", "decorator_inspection", ".", "atok", ".", "get_text", "(", "capture_node", ".", "body", ")", "return", "capture_text" ]
Convert the capture function into its text representation by parsing the source code of the decorator.
[ "Convert", "the", "capture", "function", "into", "its", "text", "representation", "by", "parsing", "the", "source", "code", "of", "the", "decorator", "." ]
python
train
google/tangent
tangent/grad_util.py
https://github.com/google/tangent/blob/6533e83af09de7345d1b438512679992f080dcc9/tangent/grad_util.py#L116-L172
def autodiff_tree(func, wrt, motion, mode, preserve_result, check_dims, verbose): """Perform AD on all functions in a call tree. This function walks the call tree and differentiates each function in it. It also ensures that the global namespaces that each function in the call tree was in are merged. The `tangent` and `numpy` packages are added to the namespace here, so that the gradient templates can assume that they are present. Args: See `grad`. Returns: final: A single module which contains the primals and adjoints of all the functions in the call tree. namespace: A merged dictionary with all the variables in the global namespaces of each function. The primals and adjoints need access to these in order to execute. """ # Imported here to avoid circular imports import tangent namespace = {'tangent': tangent, 'numpy': numpy} done = set() final = gast.Module(body=[]) namespace.update(six.get_function_globals(func)) node, required = autodiff_ast(func, wrt, motion, mode, preserve_result, check_dims, verbose) final.body.extend(node.body) to_do = set(required) if motion == 'split' and mode == 'reverse': done.add((func, wrt)) to_do -= done while to_do: func, wrt = to_do.pop() namespace.update(six.get_function_globals(func)) node, required = autodiff_ast( func=func, wrt=wrt, motion='split', mode=mode, preserve_result=True, check_dims=False, verbose=verbose) final.body.extend(node.body) done.add((func, wrt)) to_do.update(required) to_do -= done return final, namespace
[ "def", "autodiff_tree", "(", "func", ",", "wrt", ",", "motion", ",", "mode", ",", "preserve_result", ",", "check_dims", ",", "verbose", ")", ":", "# Imported here to avoid circular imports", "import", "tangent", "namespace", "=", "{", "'tangent'", ":", "tangent", ",", "'numpy'", ":", "numpy", "}", "done", "=", "set", "(", ")", "final", "=", "gast", ".", "Module", "(", "body", "=", "[", "]", ")", "namespace", ".", "update", "(", "six", ".", "get_function_globals", "(", "func", ")", ")", "node", ",", "required", "=", "autodiff_ast", "(", "func", ",", "wrt", ",", "motion", ",", "mode", ",", "preserve_result", ",", "check_dims", ",", "verbose", ")", "final", ".", "body", ".", "extend", "(", "node", ".", "body", ")", "to_do", "=", "set", "(", "required", ")", "if", "motion", "==", "'split'", "and", "mode", "==", "'reverse'", ":", "done", ".", "add", "(", "(", "func", ",", "wrt", ")", ")", "to_do", "-=", "done", "while", "to_do", ":", "func", ",", "wrt", "=", "to_do", ".", "pop", "(", ")", "namespace", ".", "update", "(", "six", ".", "get_function_globals", "(", "func", ")", ")", "node", ",", "required", "=", "autodiff_ast", "(", "func", "=", "func", ",", "wrt", "=", "wrt", ",", "motion", "=", "'split'", ",", "mode", "=", "mode", ",", "preserve_result", "=", "True", ",", "check_dims", "=", "False", ",", "verbose", "=", "verbose", ")", "final", ".", "body", ".", "extend", "(", "node", ".", "body", ")", "done", ".", "add", "(", "(", "func", ",", "wrt", ")", ")", "to_do", ".", "update", "(", "required", ")", "to_do", "-=", "done", "return", "final", ",", "namespace" ]
Perform AD on all functions in a call tree. This function walks the call tree and differentiates each function in it. It also ensures that the global namespaces that each function in the call tree was in are merged. The `tangent` and `numpy` packages are added to the namespace here, so that the gradient templates can assume that they are present. Args: See `grad`. Returns: final: A single module which contains the primals and adjoints of all the functions in the call tree. namespace: A merged dictionary with all the variables in the global namespaces of each function. The primals and adjoints need access to these in order to execute.
[ "Perform", "AD", "on", "all", "functions", "in", "a", "call", "tree", "." ]
python
train
saltstack/salt
salt/states/pkg.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pkg.py#L840-L884
def _verify_install(desired, new_pkgs, ignore_epoch=False, new_caps=None): ''' Determine whether or not the installed packages match what was requested in the SLS file. ''' ok = [] failed = [] if not new_caps: new_caps = dict() for pkgname, pkgver in desired.items(): # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names. # Homebrew for Mac OSX does something similar with tap names # prefixing package names, separated with a slash. has_origin = '/' in pkgname if __grains__['os'] == 'FreeBSD' and has_origin: cver = [k for k, v in six.iteritems(new_pkgs) if v['origin'] == pkgname] elif __grains__['os'] == 'MacOS' and has_origin: cver = new_pkgs.get(pkgname, new_pkgs.get(pkgname.split('/')[-1])) elif __grains__['os'] == 'OpenBSD': cver = new_pkgs.get(pkgname.split('%')[0]) elif __grains__['os_family'] == 'Debian': cver = new_pkgs.get(pkgname.split('=')[0]) else: cver = new_pkgs.get(pkgname) if not cver and pkgname in new_caps: cver = new_pkgs.get(new_caps.get(pkgname)[0]) if not cver: failed.append(pkgname) continue elif pkgver == 'latest': ok.append(pkgname) continue elif not __salt__['pkg_resource.version_clean'](pkgver): ok.append(pkgname) continue elif pkgver.endswith("*") and cver[0].startswith(pkgver[:-1]): ok.append(pkgname) continue if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch): ok.append(pkgname) else: failed.append(pkgname) return ok, failed
[ "def", "_verify_install", "(", "desired", ",", "new_pkgs", ",", "ignore_epoch", "=", "False", ",", "new_caps", "=", "None", ")", ":", "ok", "=", "[", "]", "failed", "=", "[", "]", "if", "not", "new_caps", ":", "new_caps", "=", "dict", "(", ")", "for", "pkgname", ",", "pkgver", "in", "desired", ".", "items", "(", ")", ":", "# FreeBSD pkg supports `openjdk` and `java/openjdk7` package names.", "# Homebrew for Mac OSX does something similar with tap names", "# prefixing package names, separated with a slash.", "has_origin", "=", "'/'", "in", "pkgname", "if", "__grains__", "[", "'os'", "]", "==", "'FreeBSD'", "and", "has_origin", ":", "cver", "=", "[", "k", "for", "k", ",", "v", "in", "six", ".", "iteritems", "(", "new_pkgs", ")", "if", "v", "[", "'origin'", "]", "==", "pkgname", "]", "elif", "__grains__", "[", "'os'", "]", "==", "'MacOS'", "and", "has_origin", ":", "cver", "=", "new_pkgs", ".", "get", "(", "pkgname", ",", "new_pkgs", ".", "get", "(", "pkgname", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", ")", ")", "elif", "__grains__", "[", "'os'", "]", "==", "'OpenBSD'", ":", "cver", "=", "new_pkgs", ".", "get", "(", "pkgname", ".", "split", "(", "'%'", ")", "[", "0", "]", ")", "elif", "__grains__", "[", "'os_family'", "]", "==", "'Debian'", ":", "cver", "=", "new_pkgs", ".", "get", "(", "pkgname", ".", "split", "(", "'='", ")", "[", "0", "]", ")", "else", ":", "cver", "=", "new_pkgs", ".", "get", "(", "pkgname", ")", "if", "not", "cver", "and", "pkgname", "in", "new_caps", ":", "cver", "=", "new_pkgs", ".", "get", "(", "new_caps", ".", "get", "(", "pkgname", ")", "[", "0", "]", ")", "if", "not", "cver", ":", "failed", ".", "append", "(", "pkgname", ")", "continue", "elif", "pkgver", "==", "'latest'", ":", "ok", ".", "append", "(", "pkgname", ")", "continue", "elif", "not", "__salt__", "[", "'pkg_resource.version_clean'", "]", "(", "pkgver", ")", ":", "ok", ".", "append", "(", "pkgname", ")", "continue", "elif", "pkgver", ".", "endswith", "(", "\"*\"", ")", "and", "cver", "[", "0", "]", ".", "startswith", "(", "pkgver", "[", ":", "-", "1", "]", ")", ":", "ok", ".", "append", "(", "pkgname", ")", "continue", "if", "_fulfills_version_string", "(", "cver", ",", "pkgver", ",", "ignore_epoch", "=", "ignore_epoch", ")", ":", "ok", ".", "append", "(", "pkgname", ")", "else", ":", "failed", ".", "append", "(", "pkgname", ")", "return", "ok", ",", "failed" ]
Determine whether or not the installed packages match what was requested in the SLS file.
[ "Determine", "whether", "or", "not", "the", "installed", "packages", "match", "what", "was", "requested", "in", "the", "SLS", "file", "." ]
python
train
eqcorrscan/EQcorrscan
eqcorrscan/utils/archive_read.py
https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/archive_read.py#L31-L140
def read_data(archive, arc_type, day, stachans, length=86400): """ Function to read the appropriate data from an archive for a day. :type archive: str :param archive: The archive source - if arc_type is seishub, this should be a url, if the arc_type is FDSN then this can be either a url or a known obspy client. If arc_type is day_vols, then this is the path to the top directory. :type arc_type: str :param arc_type: The type of archive, can be: seishub, FDSN, day_volumes :type day: datetime.date :param day: Date to retrieve data for :type stachans: list :param stachans: List of tuples of Stations and channels to try and get, will not fail if stations are not available, but will warn. :type length: float :param length: Data length to extract in seconds, defaults to 1 day. :returns: Stream of data :rtype: obspy.core.stream.Stream .. note:: A note on arc_types, if arc_type is day_vols, then this will \ look for directories labelled in the IRIS DMC conventions of \ Yyyyy/Rjjj.01/... where yyyy is the year and jjj is the julian day. \ Data within these files directories should be stored as day-long, \ single-channel files. This is not implemented in the fasted way \ possible to allow for a more general situation. If you require more \ speed you will need to re-write this. .. rubric:: Example >>> from obspy import UTCDateTime >>> t1 = UTCDateTime(2012, 3, 26) >>> stachans = [('JCNB', 'SP1')] >>> st = read_data('NCEDC', 'FDSN', t1, stachans) >>> print(st) 1 Trace(s) in Stream: BP.JCNB.40.SP1 | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.\ 950000Z | 20.0 Hz, 1728000 samples .. rubric:: Example, missing data >>> t1 = UTCDateTime(2012, 3, 26) >>> stachans = [('JCNB', 'SP1'), ('GCSZ', 'HHZ')] >>> st = read_data('NCEDC', 'FDSN', t1, stachans) >>> print(st) 1 Trace(s) in Stream: BP.JCNB.40.SP1 | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.\ 950000Z | 20.0 Hz, 1728000 samples .. rubric:: Example, local day-volumes >>> # Get the path to the test data >>> import eqcorrscan >>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data' >>> t1 = UTCDateTime(2012, 3, 26) >>> stachans = [('WHYM', 'SHZ'), ('EORO', 'SHZ')] >>> st = read_data(TEST_PATH + '/day_vols', 'day_vols', ... t1, stachans) >>> print(st) 2 Trace(s) in Stream: AF.WHYM..SHZ | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.000000Z \ | 1.0 Hz, 86400 samples AF.EORO..SHZ | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.000000Z \ | 1.0 Hz, 86400 samples """ st = [] available_stations = _check_available_data(archive, arc_type, day) for station in stachans: if len(station[1]) == 2: # Cope with two char channel naming in seisan station_map = (station[0], station[1][0] + '*' + station[1][1]) available_stations_map = [(sta[0], sta[1][0] + '*' + sta[1][-1]) for sta in available_stations] else: station_map = station available_stations_map = available_stations if station_map not in available_stations_map: msg = ' '.join([station[0], station_map[1], 'is not available for', day.strftime('%Y/%m/%d')]) warnings.warn(msg) continue if arc_type.lower() == 'seishub': client = SeishubClient(archive) st += client.get_waveforms( network='*', station=station_map[0], location='*', channel=station_map[1], starttime=UTCDateTime(day), endtime=UTCDateTime(day) + length) elif arc_type.upper() == "FDSN": client = FDSNClient(archive) try: st += client.get_waveforms( network='*', station=station_map[0], location='*', channel=station_map[1], starttime=UTCDateTime(day), endtime=UTCDateTime(day) + length) except FDSNException: warnings.warn('No data on server despite station being ' + 'available...') continue elif arc_type.lower() == 'day_vols': wavfiles = _get_station_file(os.path.join( archive, day.strftime('Y%Y' + os.sep + 'R%j.01')), station_map[0], station_map[1]) for wavfile in wavfiles: st += read(wavfile, starttime=day, endtime=day + length) st = Stream(st) return st
[ "def", "read_data", "(", "archive", ",", "arc_type", ",", "day", ",", "stachans", ",", "length", "=", "86400", ")", ":", "st", "=", "[", "]", "available_stations", "=", "_check_available_data", "(", "archive", ",", "arc_type", ",", "day", ")", "for", "station", "in", "stachans", ":", "if", "len", "(", "station", "[", "1", "]", ")", "==", "2", ":", "# Cope with two char channel naming in seisan", "station_map", "=", "(", "station", "[", "0", "]", ",", "station", "[", "1", "]", "[", "0", "]", "+", "'*'", "+", "station", "[", "1", "]", "[", "1", "]", ")", "available_stations_map", "=", "[", "(", "sta", "[", "0", "]", ",", "sta", "[", "1", "]", "[", "0", "]", "+", "'*'", "+", "sta", "[", "1", "]", "[", "-", "1", "]", ")", "for", "sta", "in", "available_stations", "]", "else", ":", "station_map", "=", "station", "available_stations_map", "=", "available_stations", "if", "station_map", "not", "in", "available_stations_map", ":", "msg", "=", "' '", ".", "join", "(", "[", "station", "[", "0", "]", ",", "station_map", "[", "1", "]", ",", "'is not available for'", ",", "day", ".", "strftime", "(", "'%Y/%m/%d'", ")", "]", ")", "warnings", ".", "warn", "(", "msg", ")", "continue", "if", "arc_type", ".", "lower", "(", ")", "==", "'seishub'", ":", "client", "=", "SeishubClient", "(", "archive", ")", "st", "+=", "client", ".", "get_waveforms", "(", "network", "=", "'*'", ",", "station", "=", "station_map", "[", "0", "]", ",", "location", "=", "'*'", ",", "channel", "=", "station_map", "[", "1", "]", ",", "starttime", "=", "UTCDateTime", "(", "day", ")", ",", "endtime", "=", "UTCDateTime", "(", "day", ")", "+", "length", ")", "elif", "arc_type", ".", "upper", "(", ")", "==", "\"FDSN\"", ":", "client", "=", "FDSNClient", "(", "archive", ")", "try", ":", "st", "+=", "client", ".", "get_waveforms", "(", "network", "=", "'*'", ",", "station", "=", "station_map", "[", "0", "]", ",", "location", "=", "'*'", ",", "channel", "=", "station_map", "[", "1", "]", ",", "starttime", "=", "UTCDateTime", "(", "day", ")", ",", "endtime", "=", "UTCDateTime", "(", "day", ")", "+", "length", ")", "except", "FDSNException", ":", "warnings", ".", "warn", "(", "'No data on server despite station being '", "+", "'available...'", ")", "continue", "elif", "arc_type", ".", "lower", "(", ")", "==", "'day_vols'", ":", "wavfiles", "=", "_get_station_file", "(", "os", ".", "path", ".", "join", "(", "archive", ",", "day", ".", "strftime", "(", "'Y%Y'", "+", "os", ".", "sep", "+", "'R%j.01'", ")", ")", ",", "station_map", "[", "0", "]", ",", "station_map", "[", "1", "]", ")", "for", "wavfile", "in", "wavfiles", ":", "st", "+=", "read", "(", "wavfile", ",", "starttime", "=", "day", ",", "endtime", "=", "day", "+", "length", ")", "st", "=", "Stream", "(", "st", ")", "return", "st" ]
Function to read the appropriate data from an archive for a day. :type archive: str :param archive: The archive source - if arc_type is seishub, this should be a url, if the arc_type is FDSN then this can be either a url or a known obspy client. If arc_type is day_vols, then this is the path to the top directory. :type arc_type: str :param arc_type: The type of archive, can be: seishub, FDSN, day_volumes :type day: datetime.date :param day: Date to retrieve data for :type stachans: list :param stachans: List of tuples of Stations and channels to try and get, will not fail if stations are not available, but will warn. :type length: float :param length: Data length to extract in seconds, defaults to 1 day. :returns: Stream of data :rtype: obspy.core.stream.Stream .. note:: A note on arc_types, if arc_type is day_vols, then this will \ look for directories labelled in the IRIS DMC conventions of \ Yyyyy/Rjjj.01/... where yyyy is the year and jjj is the julian day. \ Data within these files directories should be stored as day-long, \ single-channel files. This is not implemented in the fasted way \ possible to allow for a more general situation. If you require more \ speed you will need to re-write this. .. rubric:: Example >>> from obspy import UTCDateTime >>> t1 = UTCDateTime(2012, 3, 26) >>> stachans = [('JCNB', 'SP1')] >>> st = read_data('NCEDC', 'FDSN', t1, stachans) >>> print(st) 1 Trace(s) in Stream: BP.JCNB.40.SP1 | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.\ 950000Z | 20.0 Hz, 1728000 samples .. rubric:: Example, missing data >>> t1 = UTCDateTime(2012, 3, 26) >>> stachans = [('JCNB', 'SP1'), ('GCSZ', 'HHZ')] >>> st = read_data('NCEDC', 'FDSN', t1, stachans) >>> print(st) 1 Trace(s) in Stream: BP.JCNB.40.SP1 | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.\ 950000Z | 20.0 Hz, 1728000 samples .. rubric:: Example, local day-volumes >>> # Get the path to the test data >>> import eqcorrscan >>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data' >>> t1 = UTCDateTime(2012, 3, 26) >>> stachans = [('WHYM', 'SHZ'), ('EORO', 'SHZ')] >>> st = read_data(TEST_PATH + '/day_vols', 'day_vols', ... t1, stachans) >>> print(st) 2 Trace(s) in Stream: AF.WHYM..SHZ | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.000000Z \ | 1.0 Hz, 86400 samples AF.EORO..SHZ | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.000000Z \ | 1.0 Hz, 86400 samples
[ "Function", "to", "read", "the", "appropriate", "data", "from", "an", "archive", "for", "a", "day", "." ]
python
train
CulturePlex/django-zotero
django_zotero/templatetags/zotero_inline_extras.py
https://github.com/CulturePlex/django-zotero/blob/de31583a80a2bd2459c118fb5aa767a2842e0b00/django_zotero/templatetags/zotero_inline_extras.py#L10-L32
def zotero_inline_tags(parser, token): """ Render an inline formset of tags. Usage: {% zotero_inline_tags formset[ option] %} option = "all" | "media" | "formset" """ args = token.split_contents() length = len(args) if length == 2: rendered_node = RenderedAllNode(args[1]) elif length == 3 and args[2].lower() == u'all': rendered_node = RenderedAllNode(args[1]) elif length == 3 and args[2].lower() == u'media': rendered_node = RenderedMediaNode(args[1]) elif length == 3 and args[2].lower() == u'formset': rendered_node = RenderedFormsetNode(args[1]) else: raise t.TemplateSyntaxError('Incorrect arguments in %s.' % args[0]) return rendered_node
[ "def", "zotero_inline_tags", "(", "parser", ",", "token", ")", ":", "args", "=", "token", ".", "split_contents", "(", ")", "length", "=", "len", "(", "args", ")", "if", "length", "==", "2", ":", "rendered_node", "=", "RenderedAllNode", "(", "args", "[", "1", "]", ")", "elif", "length", "==", "3", "and", "args", "[", "2", "]", ".", "lower", "(", ")", "==", "u'all'", ":", "rendered_node", "=", "RenderedAllNode", "(", "args", "[", "1", "]", ")", "elif", "length", "==", "3", "and", "args", "[", "2", "]", ".", "lower", "(", ")", "==", "u'media'", ":", "rendered_node", "=", "RenderedMediaNode", "(", "args", "[", "1", "]", ")", "elif", "length", "==", "3", "and", "args", "[", "2", "]", ".", "lower", "(", ")", "==", "u'formset'", ":", "rendered_node", "=", "RenderedFormsetNode", "(", "args", "[", "1", "]", ")", "else", ":", "raise", "t", ".", "TemplateSyntaxError", "(", "'Incorrect arguments in %s.'", "%", "args", "[", "0", "]", ")", "return", "rendered_node" ]
Render an inline formset of tags. Usage: {% zotero_inline_tags formset[ option] %} option = "all" | "media" | "formset"
[ "Render", "an", "inline", "formset", "of", "tags", ".", "Usage", ":", "{", "%", "zotero_inline_tags", "formset", "[", "option", "]", "%", "}", "option", "=", "all", "|", "media", "|", "formset" ]
python
train
michael-lazar/rtv
rtv/packages/praw/__init__.py
https://github.com/michael-lazar/rtv/blob/ccef2af042566ad384977028cf0bde01bc524dda/rtv/packages/praw/__init__.py#L1146-L1162
def get_subreddit_recommendations(self, subreddits, omit=None): """Return a list of recommended subreddits as Subreddit objects. Subreddits with activity less than a certain threshold, will not have any recommendations due to lack of data. :param subreddits: A list of subreddits (either names or Subreddit objects) to base the recommendations on. :param omit: A list of subreddits (either names or Subreddit objects) that will be filtered out of the result. """ params = {'omit': _to_reddit_list(omit or [])} url = self.config['sub_recommendations'].format( subreddits=_to_reddit_list(subreddits)) result = self.request_json(url, params=params) return [objects.Subreddit(self, sub['sr_name']) for sub in result]
[ "def", "get_subreddit_recommendations", "(", "self", ",", "subreddits", ",", "omit", "=", "None", ")", ":", "params", "=", "{", "'omit'", ":", "_to_reddit_list", "(", "omit", "or", "[", "]", ")", "}", "url", "=", "self", ".", "config", "[", "'sub_recommendations'", "]", ".", "format", "(", "subreddits", "=", "_to_reddit_list", "(", "subreddits", ")", ")", "result", "=", "self", ".", "request_json", "(", "url", ",", "params", "=", "params", ")", "return", "[", "objects", ".", "Subreddit", "(", "self", ",", "sub", "[", "'sr_name'", "]", ")", "for", "sub", "in", "result", "]" ]
Return a list of recommended subreddits as Subreddit objects. Subreddits with activity less than a certain threshold, will not have any recommendations due to lack of data. :param subreddits: A list of subreddits (either names or Subreddit objects) to base the recommendations on. :param omit: A list of subreddits (either names or Subreddit objects) that will be filtered out of the result.
[ "Return", "a", "list", "of", "recommended", "subreddits", "as", "Subreddit", "objects", "." ]
python
train
DataONEorg/d1_python
lib_common/src/d1_common/wrap/access_policy.py
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/wrap/access_policy.py#L596-L603
def _norm_perm_list_from_perm_dict(self, perm_dict): """Return a minimal, ordered, hashable list of subjects and permissions.""" high_perm_dict = self._highest_perm_dict_from_perm_dict(perm_dict) return [ [k, list(sorted(high_perm_dict[k]))] for k in ORDERED_PERM_LIST if high_perm_dict.get(k, False) ]
[ "def", "_norm_perm_list_from_perm_dict", "(", "self", ",", "perm_dict", ")", ":", "high_perm_dict", "=", "self", ".", "_highest_perm_dict_from_perm_dict", "(", "perm_dict", ")", "return", "[", "[", "k", ",", "list", "(", "sorted", "(", "high_perm_dict", "[", "k", "]", ")", ")", "]", "for", "k", "in", "ORDERED_PERM_LIST", "if", "high_perm_dict", ".", "get", "(", "k", ",", "False", ")", "]" ]
Return a minimal, ordered, hashable list of subjects and permissions.
[ "Return", "a", "minimal", "ordered", "hashable", "list", "of", "subjects", "and", "permissions", "." ]
python
train
cisco-sas/kitty
kitty/data/data_manager.py
https://github.com/cisco-sas/kitty/blob/cb0760989dcdfe079e43ac574d872d0b18953a32/kitty/data/data_manager.py#L75-L86
def get_results(self): ''' :return: result from running the task ''' self._event.wait() if self._exception is not None: # # Well... rethrownig the exception caught in execute # but on the caller thread # raise self._exception # pylint: disable=E0702 return self._result
[ "def", "get_results", "(", "self", ")", ":", "self", ".", "_event", ".", "wait", "(", ")", "if", "self", ".", "_exception", "is", "not", "None", ":", "#", "# Well... rethrownig the exception caught in execute", "# but on the caller thread", "#", "raise", "self", ".", "_exception", "# pylint: disable=E0702", "return", "self", ".", "_result" ]
:return: result from running the task
[ ":", "return", ":", "result", "from", "running", "the", "task" ]
python
train
census-instrumentation/opencensus-python
contrib/opencensus-ext-prometheus/opencensus/ext/prometheus/stats_exporter/__init__.py
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-prometheus/opencensus/ext/prometheus/stats_exporter/__init__.py#L121-L132
def register_view(self, view): """ register_view will create the needed structure in order to be able to sent all data to Prometheus """ v_name = get_view_name(self.options.namespace, view) if v_name not in self.registered_views: desc = {'name': v_name, 'documentation': view.description, 'labels': list(map(sanitize, view.columns))} self.registered_views[v_name] = desc self.registry.register(self)
[ "def", "register_view", "(", "self", ",", "view", ")", ":", "v_name", "=", "get_view_name", "(", "self", ".", "options", ".", "namespace", ",", "view", ")", "if", "v_name", "not", "in", "self", ".", "registered_views", ":", "desc", "=", "{", "'name'", ":", "v_name", ",", "'documentation'", ":", "view", ".", "description", ",", "'labels'", ":", "list", "(", "map", "(", "sanitize", ",", "view", ".", "columns", ")", ")", "}", "self", ".", "registered_views", "[", "v_name", "]", "=", "desc", "self", ".", "registry", ".", "register", "(", "self", ")" ]
register_view will create the needed structure in order to be able to sent all data to Prometheus
[ "register_view", "will", "create", "the", "needed", "structure", "in", "order", "to", "be", "able", "to", "sent", "all", "data", "to", "Prometheus" ]
python
train
Clinical-Genomics/scout
scout/server/blueprints/variants/controllers.py
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/server/blueprints/variants/controllers.py#L677-L697
def transcript_str(transcript_obj, gene_name=None): """Generate amino acid change as a string.""" if transcript_obj.get('exon'): gene_part, part_count_raw = 'exon', transcript_obj['exon'] elif transcript_obj.get('intron'): gene_part, part_count_raw = 'intron', transcript_obj['intron'] else: # variant between genes gene_part, part_count_raw = 'intergenic', '0' part_count = part_count_raw.rpartition('/')[0] change_str = "{}:{}{}:{}:{}".format( transcript_obj.get('refseq_id', ''), gene_part, part_count, transcript_obj.get('coding_sequence_name', 'NA'), transcript_obj.get('protein_sequence_name', 'NA'), ) if gene_name: change_str = "{}:".format(gene_name) + change_str return change_str
[ "def", "transcript_str", "(", "transcript_obj", ",", "gene_name", "=", "None", ")", ":", "if", "transcript_obj", ".", "get", "(", "'exon'", ")", ":", "gene_part", ",", "part_count_raw", "=", "'exon'", ",", "transcript_obj", "[", "'exon'", "]", "elif", "transcript_obj", ".", "get", "(", "'intron'", ")", ":", "gene_part", ",", "part_count_raw", "=", "'intron'", ",", "transcript_obj", "[", "'intron'", "]", "else", ":", "# variant between genes", "gene_part", ",", "part_count_raw", "=", "'intergenic'", ",", "'0'", "part_count", "=", "part_count_raw", ".", "rpartition", "(", "'/'", ")", "[", "0", "]", "change_str", "=", "\"{}:{}{}:{}:{}\"", ".", "format", "(", "transcript_obj", ".", "get", "(", "'refseq_id'", ",", "''", ")", ",", "gene_part", ",", "part_count", ",", "transcript_obj", ".", "get", "(", "'coding_sequence_name'", ",", "'NA'", ")", ",", "transcript_obj", ".", "get", "(", "'protein_sequence_name'", ",", "'NA'", ")", ",", ")", "if", "gene_name", ":", "change_str", "=", "\"{}:\"", ".", "format", "(", "gene_name", ")", "+", "change_str", "return", "change_str" ]
Generate amino acid change as a string.
[ "Generate", "amino", "acid", "change", "as", "a", "string", "." ]
python
test
quodlibet/mutagen
mutagen/mp3/__init__.py
https://github.com/quodlibet/mutagen/blob/e393df5971ba41ba5a50de9c2c9e7e5484d82c4e/mutagen/mp3/__init__.py#L229-L244
def skip_id3(fileobj): """Might raise IOError""" # WMP writes multiple id3s, so skip as many as we find while True: idata = fileobj.read(10) try: id3, insize = struct.unpack('>3sxxx4s', idata) except struct.error: id3, insize = b'', 0 insize = BitPaddedInt(insize) if id3 == b'ID3' and insize > 0: fileobj.seek(insize, 1) else: fileobj.seek(-len(idata), 1) break
[ "def", "skip_id3", "(", "fileobj", ")", ":", "# WMP writes multiple id3s, so skip as many as we find", "while", "True", ":", "idata", "=", "fileobj", ".", "read", "(", "10", ")", "try", ":", "id3", ",", "insize", "=", "struct", ".", "unpack", "(", "'>3sxxx4s'", ",", "idata", ")", "except", "struct", ".", "error", ":", "id3", ",", "insize", "=", "b''", ",", "0", "insize", "=", "BitPaddedInt", "(", "insize", ")", "if", "id3", "==", "b'ID3'", "and", "insize", ">", "0", ":", "fileobj", ".", "seek", "(", "insize", ",", "1", ")", "else", ":", "fileobj", ".", "seek", "(", "-", "len", "(", "idata", ")", ",", "1", ")", "break" ]
Might raise IOError
[ "Might", "raise", "IOError" ]
python
train
rytilahti/python-songpal
songpal/service.py
https://github.com/rytilahti/python-songpal/blob/0443de6b3d960b9067a851d82261ca00e46b4618/songpal/service.py#L283-L292
def asdict(self): """Return dict presentation of this service. Useful for dumping the device information into JSON. """ return { "methods": {m.name: m.asdict() for m in self.methods}, "protocols": self.protocols, "notifications": {n.name: n.asdict() for n in self.notifications}, }
[ "def", "asdict", "(", "self", ")", ":", "return", "{", "\"methods\"", ":", "{", "m", ".", "name", ":", "m", ".", "asdict", "(", ")", "for", "m", "in", "self", ".", "methods", "}", ",", "\"protocols\"", ":", "self", ".", "protocols", ",", "\"notifications\"", ":", "{", "n", ".", "name", ":", "n", ".", "asdict", "(", ")", "for", "n", "in", "self", ".", "notifications", "}", ",", "}" ]
Return dict presentation of this service. Useful for dumping the device information into JSON.
[ "Return", "dict", "presentation", "of", "this", "service", "." ]
python
train
UCL-INGI/INGInious
inginious/frontend/pages/queue.py
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/frontend/pages/queue.py#L16-L18
def GET_AUTH(self): """ GET request """ return self.template_helper.get_renderer().queue(*self.submission_manager.get_job_queue_snapshot(), datetime.fromtimestamp)
[ "def", "GET_AUTH", "(", "self", ")", ":", "return", "self", ".", "template_helper", ".", "get_renderer", "(", ")", ".", "queue", "(", "*", "self", ".", "submission_manager", ".", "get_job_queue_snapshot", "(", ")", ",", "datetime", ".", "fromtimestamp", ")" ]
GET request
[ "GET", "request" ]
python
train
edx/XBlock
xblock/runtime.py
https://github.com/edx/XBlock/blob/368bf46e2c0ee69bbb21817f428c4684936e18ee/xblock/runtime.py#L638-L651
def get_block(self, usage_id, for_parent=None): """ Create an XBlock instance in this runtime. The `usage_id` is used to find the XBlock class and data. """ def_id = self.id_reader.get_definition_id(usage_id) try: block_type = self.id_reader.get_block_type(def_id) except NoSuchDefinition: raise NoSuchUsage(repr(usage_id)) keys = ScopeIds(self.user_id, block_type, def_id, usage_id) block = self.construct_xblock(block_type, keys, for_parent=for_parent) return block
[ "def", "get_block", "(", "self", ",", "usage_id", ",", "for_parent", "=", "None", ")", ":", "def_id", "=", "self", ".", "id_reader", ".", "get_definition_id", "(", "usage_id", ")", "try", ":", "block_type", "=", "self", ".", "id_reader", ".", "get_block_type", "(", "def_id", ")", "except", "NoSuchDefinition", ":", "raise", "NoSuchUsage", "(", "repr", "(", "usage_id", ")", ")", "keys", "=", "ScopeIds", "(", "self", ".", "user_id", ",", "block_type", ",", "def_id", ",", "usage_id", ")", "block", "=", "self", ".", "construct_xblock", "(", "block_type", ",", "keys", ",", "for_parent", "=", "for_parent", ")", "return", "block" ]
Create an XBlock instance in this runtime. The `usage_id` is used to find the XBlock class and data.
[ "Create", "an", "XBlock", "instance", "in", "this", "runtime", "." ]
python
train
denisenkom/pytds
src/pytds/tds.py
https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/tds.py#L555-L575
def process_param(self): """ Reads and processes RETURNVALUE stream. This stream is used to send OUTPUT parameters from RPC to client. Stream format url: http://msdn.microsoft.com/en-us/library/dd303881.aspx """ self.log_response_message('got RETURNVALUE message') r = self._reader if tds_base.IS_TDS72_PLUS(self): ordinal = r.get_usmallint() else: r.get_usmallint() # ignore size ordinal = self._out_params_indexes[self.return_value_index] name = r.read_ucs2(r.get_byte()) r.get_byte() # 1 - OUTPUT of sp, 2 - result of udf param = tds_base.Column() param.column_name = name self.get_type_info(param) param.value = param.serializer.read(r) self.output_params[ordinal] = param self.return_value_index += 1
[ "def", "process_param", "(", "self", ")", ":", "self", ".", "log_response_message", "(", "'got RETURNVALUE message'", ")", "r", "=", "self", ".", "_reader", "if", "tds_base", ".", "IS_TDS72_PLUS", "(", "self", ")", ":", "ordinal", "=", "r", ".", "get_usmallint", "(", ")", "else", ":", "r", ".", "get_usmallint", "(", ")", "# ignore size", "ordinal", "=", "self", ".", "_out_params_indexes", "[", "self", ".", "return_value_index", "]", "name", "=", "r", ".", "read_ucs2", "(", "r", ".", "get_byte", "(", ")", ")", "r", ".", "get_byte", "(", ")", "# 1 - OUTPUT of sp, 2 - result of udf", "param", "=", "tds_base", ".", "Column", "(", ")", "param", ".", "column_name", "=", "name", "self", ".", "get_type_info", "(", "param", ")", "param", ".", "value", "=", "param", ".", "serializer", ".", "read", "(", "r", ")", "self", ".", "output_params", "[", "ordinal", "]", "=", "param", "self", ".", "return_value_index", "+=", "1" ]
Reads and processes RETURNVALUE stream. This stream is used to send OUTPUT parameters from RPC to client. Stream format url: http://msdn.microsoft.com/en-us/library/dd303881.aspx
[ "Reads", "and", "processes", "RETURNVALUE", "stream", "." ]
python
train
djgagne/hagelslag
hagelslag/data/HREFv2ModelGrid.py
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/data/HREFv2ModelGrid.py#L103-L187
def load_data(self): """ Loads data from grib2 file objects or list of grib2 file objects. Handles specific grib2 variable names and grib2 message numbers. Returns: Array of data loaded from files in (time, y, x) dimensions, Units """ file_objects = self.file_objects var = self.variable valid_date = self.valid_dates data = self.data unknown_names = self.unknown_names unknown_units = self.unknown_units member = self.member lat = self.lat lon = self.lon if self.sector_ind_path: inds_file = pd.read_csv(self.sector_ind_path+'sector_data_indices.csv') inds = inds_file.loc[:,'indices'] out_x = self.mapping_data["x"] if not file_objects: print() print("No {0} model runs on {1}".format(member,self.run_date)) print() units = None return self.data, units for f, file in enumerate(file_objects): grib = pygrib.open(file) if type(var) is int: data_values = grib[var].values #lat, lon = grib[var].latlons() #proj = Proj(grib[var].projparams) if grib[var].units == 'unknown': Id = grib[var].parameterNumber units = self.unknown_units[Id] else: units = grib[var].units elif type(var) is str: if '_' in var: variable = var.split('_')[0] level = int(var.split('_')[1]) if variable in unknown_names.values(): Id, units = self.format_grib_name(variable) data_values = grib.select(parameterNumber=Id, level=level)[0].values #lat, lon = grib.select(parameterNumber=Id, level=level)[0].latlons() #proj = Proj(grib.select(parameterNumber=Id, level=level)[0].projparams) else: data_values = grib.select(name=variable, level=level)[0].values units = grib.select(name=variable, level=level)[0].units #lat, lon = grib.select(name=variable, level=level)[0].latlons() #proj = Proj(grib.select(name=variable, level=level)[0].projparams) else: if var in unknown_names.values(): Id, units = self.format_grib_name(var) data_values = grib.select(parameterNumber=Id)[0].values #lat, lon = grib.select(parameterNumber=Id)[0].latlons() #proj = Proj(grib.select(parameterNumber=Id)[0].projparams) elif len(grib.select(name=var)) > 1: raise NameError("Multiple '{0}' records found. Rename with level:'{0}_level'".format(var)) else: data_values = grib.select(name=var)[0].values units = grib.select(name=var)[0].units #lat, lon = grib.select(name=var)[0].latlons() #proj = Proj(grib.select(name=var)[0].projparams) if data is None: data = np.empty((len(valid_date), out_x.shape[0], out_x.shape[1]), dtype=float) if self.sector_ind_path: data[f] = data_values[:].flatten()[inds].reshape(out_x.shape) else: data[f]=data_values[:] else: if self.sector_ind_path: data[f] = data_values[:].flatten()[inds].reshape(out_x.shape) else: data[f]=data_values[:] return data, units
[ "def", "load_data", "(", "self", ")", ":", "file_objects", "=", "self", ".", "file_objects", "var", "=", "self", ".", "variable", "valid_date", "=", "self", ".", "valid_dates", "data", "=", "self", ".", "data", "unknown_names", "=", "self", ".", "unknown_names", "unknown_units", "=", "self", ".", "unknown_units", "member", "=", "self", ".", "member", "lat", "=", "self", ".", "lat", "lon", "=", "self", ".", "lon", "if", "self", ".", "sector_ind_path", ":", "inds_file", "=", "pd", ".", "read_csv", "(", "self", ".", "sector_ind_path", "+", "'sector_data_indices.csv'", ")", "inds", "=", "inds_file", ".", "loc", "[", ":", ",", "'indices'", "]", "out_x", "=", "self", ".", "mapping_data", "[", "\"x\"", "]", "if", "not", "file_objects", ":", "print", "(", ")", "print", "(", "\"No {0} model runs on {1}\"", ".", "format", "(", "member", ",", "self", ".", "run_date", ")", ")", "print", "(", ")", "units", "=", "None", "return", "self", ".", "data", ",", "units", "for", "f", ",", "file", "in", "enumerate", "(", "file_objects", ")", ":", "grib", "=", "pygrib", ".", "open", "(", "file", ")", "if", "type", "(", "var", ")", "is", "int", ":", "data_values", "=", "grib", "[", "var", "]", ".", "values", "#lat, lon = grib[var].latlons()", "#proj = Proj(grib[var].projparams)", "if", "grib", "[", "var", "]", ".", "units", "==", "'unknown'", ":", "Id", "=", "grib", "[", "var", "]", ".", "parameterNumber", "units", "=", "self", ".", "unknown_units", "[", "Id", "]", "else", ":", "units", "=", "grib", "[", "var", "]", ".", "units", "elif", "type", "(", "var", ")", "is", "str", ":", "if", "'_'", "in", "var", ":", "variable", "=", "var", ".", "split", "(", "'_'", ")", "[", "0", "]", "level", "=", "int", "(", "var", ".", "split", "(", "'_'", ")", "[", "1", "]", ")", "if", "variable", "in", "unknown_names", ".", "values", "(", ")", ":", "Id", ",", "units", "=", "self", ".", "format_grib_name", "(", "variable", ")", "data_values", "=", "grib", ".", "select", "(", "parameterNumber", "=", "Id", ",", "level", "=", "level", ")", "[", "0", "]", ".", "values", "#lat, lon = grib.select(parameterNumber=Id, level=level)[0].latlons()", "#proj = Proj(grib.select(parameterNumber=Id, level=level)[0].projparams)", "else", ":", "data_values", "=", "grib", ".", "select", "(", "name", "=", "variable", ",", "level", "=", "level", ")", "[", "0", "]", ".", "values", "units", "=", "grib", ".", "select", "(", "name", "=", "variable", ",", "level", "=", "level", ")", "[", "0", "]", ".", "units", "#lat, lon = grib.select(name=variable, level=level)[0].latlons()", "#proj = Proj(grib.select(name=variable, level=level)[0].projparams)", "else", ":", "if", "var", "in", "unknown_names", ".", "values", "(", ")", ":", "Id", ",", "units", "=", "self", ".", "format_grib_name", "(", "var", ")", "data_values", "=", "grib", ".", "select", "(", "parameterNumber", "=", "Id", ")", "[", "0", "]", ".", "values", "#lat, lon = grib.select(parameterNumber=Id)[0].latlons() ", "#proj = Proj(grib.select(parameterNumber=Id)[0].projparams)", "elif", "len", "(", "grib", ".", "select", "(", "name", "=", "var", ")", ")", ">", "1", ":", "raise", "NameError", "(", "\"Multiple '{0}' records found. Rename with level:'{0}_level'\"", ".", "format", "(", "var", ")", ")", "else", ":", "data_values", "=", "grib", ".", "select", "(", "name", "=", "var", ")", "[", "0", "]", ".", "values", "units", "=", "grib", ".", "select", "(", "name", "=", "var", ")", "[", "0", "]", ".", "units", "#lat, lon = grib.select(name=var)[0].latlons()", "#proj = Proj(grib.select(name=var)[0].projparams)", "if", "data", "is", "None", ":", "data", "=", "np", ".", "empty", "(", "(", "len", "(", "valid_date", ")", ",", "out_x", ".", "shape", "[", "0", "]", ",", "out_x", ".", "shape", "[", "1", "]", ")", ",", "dtype", "=", "float", ")", "if", "self", ".", "sector_ind_path", ":", "data", "[", "f", "]", "=", "data_values", "[", ":", "]", ".", "flatten", "(", ")", "[", "inds", "]", ".", "reshape", "(", "out_x", ".", "shape", ")", "else", ":", "data", "[", "f", "]", "=", "data_values", "[", ":", "]", "else", ":", "if", "self", ".", "sector_ind_path", ":", "data", "[", "f", "]", "=", "data_values", "[", ":", "]", ".", "flatten", "(", ")", "[", "inds", "]", ".", "reshape", "(", "out_x", ".", "shape", ")", "else", ":", "data", "[", "f", "]", "=", "data_values", "[", ":", "]", "return", "data", ",", "units" ]
Loads data from grib2 file objects or list of grib2 file objects. Handles specific grib2 variable names and grib2 message numbers. Returns: Array of data loaded from files in (time, y, x) dimensions, Units
[ "Loads", "data", "from", "grib2", "file", "objects", "or", "list", "of", "grib2", "file", "objects", ".", "Handles", "specific", "grib2", "variable", "names", "and", "grib2", "message", "numbers", ".", "Returns", ":", "Array", "of", "data", "loaded", "from", "files", "in", "(", "time", "y", "x", ")", "dimensions", "Units" ]
python
train
ThreatConnect-Inc/tcex
tcex/tcex_ti/mappings/tcex_ti_mappings.py
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_ti/mappings/tcex_ti_mappings.py#L726-L764
def attribute_label(self, attribute_id, label, action='GET', params=None): """ Gets a security labels from a attribute Args: attribute_id: label: action: params: Returns: Security label json """ if params is None: params = {} if not self.can_update(): self._tcex.handle_error(910, [self.type]) if action == 'GET': return self.tc_requests.get_attribute_label( self.api_type, self.api_sub_type, self.unique_id, attribute_id, label, owner=self.owner, params=params, ) if action == 'DELETE': return self.tc_requests.delete_attribute_label( self.api_type, self.api_sub_type, self.unique_id, attribute_id, label, owner=self.owner, ) self._tcex.handle_error(925, ['action', 'attribute_label', 'action', 'action', action]) return None
[ "def", "attribute_label", "(", "self", ",", "attribute_id", ",", "label", ",", "action", "=", "'GET'", ",", "params", "=", "None", ")", ":", "if", "params", "is", "None", ":", "params", "=", "{", "}", "if", "not", "self", ".", "can_update", "(", ")", ":", "self", ".", "_tcex", ".", "handle_error", "(", "910", ",", "[", "self", ".", "type", "]", ")", "if", "action", "==", "'GET'", ":", "return", "self", ".", "tc_requests", ".", "get_attribute_label", "(", "self", ".", "api_type", ",", "self", ".", "api_sub_type", ",", "self", ".", "unique_id", ",", "attribute_id", ",", "label", ",", "owner", "=", "self", ".", "owner", ",", "params", "=", "params", ",", ")", "if", "action", "==", "'DELETE'", ":", "return", "self", ".", "tc_requests", ".", "delete_attribute_label", "(", "self", ".", "api_type", ",", "self", ".", "api_sub_type", ",", "self", ".", "unique_id", ",", "attribute_id", ",", "label", ",", "owner", "=", "self", ".", "owner", ",", ")", "self", ".", "_tcex", ".", "handle_error", "(", "925", ",", "[", "'action'", ",", "'attribute_label'", ",", "'action'", ",", "'action'", ",", "action", "]", ")", "return", "None" ]
Gets a security labels from a attribute Args: attribute_id: label: action: params: Returns: Security label json
[ "Gets", "a", "security", "labels", "from", "a", "attribute" ]
python
train
edx/edx-val
edxval/api.py
https://github.com/edx/edx-val/blob/30df48061e77641edb5272895b7c7f7f25eb7aa7/edxval/api.py#L79-L115
def create_video(video_data): """ Called on to create Video objects in the database create_video is used to create Video objects whose children are EncodedVideo objects which are linked to Profile objects. This is an alternative to the HTTP requests so it can be used internally. The VideoSerializer is used to deserialize this object. If there are duplicate profile_names, the entire creation will be rejected. If the profile is not found in the database, the video will not be created. Args: video_data (dict): { url: api url to the video edx_video_id: ID of the video duration: Length of video in seconds client_video_id: client ID of video encoded_video: a list of EncodedVideo dicts url: url of the video file_size: size of the video in bytes profile: ID of the profile courses: Courses associated with this video image: poster image file name for a particular course } Raises: Raises ValCannotCreateError if the video cannot be created. Returns the successfully created Video object """ serializer = VideoSerializer(data=video_data) if serializer.is_valid(): serializer.save() return video_data.get("edx_video_id") else: raise ValCannotCreateError(serializer.errors)
[ "def", "create_video", "(", "video_data", ")", ":", "serializer", "=", "VideoSerializer", "(", "data", "=", "video_data", ")", "if", "serializer", ".", "is_valid", "(", ")", ":", "serializer", ".", "save", "(", ")", "return", "video_data", ".", "get", "(", "\"edx_video_id\"", ")", "else", ":", "raise", "ValCannotCreateError", "(", "serializer", ".", "errors", ")" ]
Called on to create Video objects in the database create_video is used to create Video objects whose children are EncodedVideo objects which are linked to Profile objects. This is an alternative to the HTTP requests so it can be used internally. The VideoSerializer is used to deserialize this object. If there are duplicate profile_names, the entire creation will be rejected. If the profile is not found in the database, the video will not be created. Args: video_data (dict): { url: api url to the video edx_video_id: ID of the video duration: Length of video in seconds client_video_id: client ID of video encoded_video: a list of EncodedVideo dicts url: url of the video file_size: size of the video in bytes profile: ID of the profile courses: Courses associated with this video image: poster image file name for a particular course } Raises: Raises ValCannotCreateError if the video cannot be created. Returns the successfully created Video object
[ "Called", "on", "to", "create", "Video", "objects", "in", "the", "database" ]
python
train
PyPSA/PyPSA
pypsa/opf.py
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/opf.py#L1400-L1425
def network_lopf_prepare_solver(network, solver_name="glpk", solver_io=None): """ Prepare solver for linear optimal power flow. Parameters ---------- solver_name : string Must be a solver name that pyomo recognises and that is installed, e.g. "glpk", "gurobi" solver_io : string, default None Solver Input-Output option, e.g. "python" to use "gurobipy" for solver_name="gurobi" Returns ------- None """ network.opt = SolverFactory(solver_name, solver_io=solver_io) patch_optsolver_record_memusage_before_solving(network.opt, network) if isinstance(network.opt, PersistentSolver): network.opt.set_instance(network.model) return network.opt
[ "def", "network_lopf_prepare_solver", "(", "network", ",", "solver_name", "=", "\"glpk\"", ",", "solver_io", "=", "None", ")", ":", "network", ".", "opt", "=", "SolverFactory", "(", "solver_name", ",", "solver_io", "=", "solver_io", ")", "patch_optsolver_record_memusage_before_solving", "(", "network", ".", "opt", ",", "network", ")", "if", "isinstance", "(", "network", ".", "opt", ",", "PersistentSolver", ")", ":", "network", ".", "opt", ".", "set_instance", "(", "network", ".", "model", ")", "return", "network", ".", "opt" ]
Prepare solver for linear optimal power flow. Parameters ---------- solver_name : string Must be a solver name that pyomo recognises and that is installed, e.g. "glpk", "gurobi" solver_io : string, default None Solver Input-Output option, e.g. "python" to use "gurobipy" for solver_name="gurobi" Returns ------- None
[ "Prepare", "solver", "for", "linear", "optimal", "power", "flow", "." ]
python
train
joeyespo/path-and-address
path_and_address/validation.py
https://github.com/joeyespo/path-and-address/blob/f8193a09f4b785574d920e8a2aeeb55ea6ff4e20/path_and_address/validation.py#L7-L21
def valid_address(address): """ Determines whether the specified address string is valid. """ if not address: return False components = str(address).split(':') if len(components) > 2 or not valid_hostname(components[0]): return False if len(components) == 2 and not valid_port(components[1]): return False return True
[ "def", "valid_address", "(", "address", ")", ":", "if", "not", "address", ":", "return", "False", "components", "=", "str", "(", "address", ")", ".", "split", "(", "':'", ")", "if", "len", "(", "components", ")", ">", "2", "or", "not", "valid_hostname", "(", "components", "[", "0", "]", ")", ":", "return", "False", "if", "len", "(", "components", ")", "==", "2", "and", "not", "valid_port", "(", "components", "[", "1", "]", ")", ":", "return", "False", "return", "True" ]
Determines whether the specified address string is valid.
[ "Determines", "whether", "the", "specified", "address", "string", "is", "valid", "." ]
python
train
reingart/pyafipws
wsltv.py
https://github.com/reingart/pyafipws/blob/ee87cfe4ac12285ab431df5fec257f103042d1ab/wsltv.py#L565-L578
def ConsultarCondicionesVenta(self, sep="||"): "Retorna un listado de códigos y descripciones de las condiciones de ventas" ret = self.client.consultarCondicionesVenta( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, )['respuesta'] self.__analizar_errores(ret) array = ret.get('condicionVenta', []) if sep is None: return dict([(it['codigo'], it['descripcion']) for it in array]) else: return [("%s %%s %s %%s %s" % (sep, sep, sep)) % (it['codigo'], it['descripcion']) for it in array]
[ "def", "ConsultarCondicionesVenta", "(", "self", ",", "sep", "=", "\"||\"", ")", ":", "ret", "=", "self", ".", "client", ".", "consultarCondicionesVenta", "(", "auth", "=", "{", "'token'", ":", "self", ".", "Token", ",", "'sign'", ":", "self", ".", "Sign", ",", "'cuit'", ":", "self", ".", "Cuit", ",", "}", ",", ")", "[", "'respuesta'", "]", "self", ".", "__analizar_errores", "(", "ret", ")", "array", "=", "ret", ".", "get", "(", "'condicionVenta'", ",", "[", "]", ")", "if", "sep", "is", "None", ":", "return", "dict", "(", "[", "(", "it", "[", "'codigo'", "]", ",", "it", "[", "'descripcion'", "]", ")", "for", "it", "in", "array", "]", ")", "else", ":", "return", "[", "(", "\"%s %%s %s %%s %s\"", "%", "(", "sep", ",", "sep", ",", "sep", ")", ")", "%", "(", "it", "[", "'codigo'", "]", ",", "it", "[", "'descripcion'", "]", ")", "for", "it", "in", "array", "]" ]
Retorna un listado de códigos y descripciones de las condiciones de ventas
[ "Retorna", "un", "listado", "de", "códigos", "y", "descripciones", "de", "las", "condiciones", "de", "ventas" ]
python
train
cobrateam/flask-mongoalchemy
examples/library/library.py
https://github.com/cobrateam/flask-mongoalchemy/blob/66ab6f857cae69e35d37035880c1dfaf1dc9bd15/examples/library/library.py#L37-L45
def list_authors(): """List all authors. e.g.: GET /authors""" authors = Author.query.all() content = '<p>Authors:</p>' for author in authors: content += '<p>%s</p>' % author.name return content
[ "def", "list_authors", "(", ")", ":", "authors", "=", "Author", ".", "query", ".", "all", "(", ")", "content", "=", "'<p>Authors:</p>'", "for", "author", "in", "authors", ":", "content", "+=", "'<p>%s</p>'", "%", "author", ".", "name", "return", "content" ]
List all authors. e.g.: GET /authors
[ "List", "all", "authors", "." ]
python
train
alfred82santa/dirty-loader
dirty_loader/__init__.py
https://github.com/alfred82santa/dirty-loader/blob/0d7895e3c84a0c197d804ce31305c5cba4c512e4/dirty_loader/__init__.py#L289-L301
def unregister_module(self, module): """ Unregister a module. :param module: must be a string or a module object to unregistered :type module: str """ if module not in self._namespaces.values(): raise NoRegisteredError("Module '{0}' is not registered on loader.".format(module)) for ns, mod in list(self._namespaces.items()): if mod == module: self.unregister_namespace(ns)
[ "def", "unregister_module", "(", "self", ",", "module", ")", ":", "if", "module", "not", "in", "self", ".", "_namespaces", ".", "values", "(", ")", ":", "raise", "NoRegisteredError", "(", "\"Module '{0}' is not registered on loader.\"", ".", "format", "(", "module", ")", ")", "for", "ns", ",", "mod", "in", "list", "(", "self", ".", "_namespaces", ".", "items", "(", ")", ")", ":", "if", "mod", "==", "module", ":", "self", ".", "unregister_namespace", "(", "ns", ")" ]
Unregister a module. :param module: must be a string or a module object to unregistered :type module: str
[ "Unregister", "a", "module", "." ]
python
train
manns/pyspread
pyspread/src/gui/_chart_dialog.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_chart_dialog.py#L654-L659
def OnLabelSizeIntCtrl(self, event): """Label size IntCtrl event handler""" self.attrs["labelsize"] = event.GetValue() post_command_event(self, self.DrawChartMsg)
[ "def", "OnLabelSizeIntCtrl", "(", "self", ",", "event", ")", ":", "self", ".", "attrs", "[", "\"labelsize\"", "]", "=", "event", ".", "GetValue", "(", ")", "post_command_event", "(", "self", ",", "self", ".", "DrawChartMsg", ")" ]
Label size IntCtrl event handler
[ "Label", "size", "IntCtrl", "event", "handler" ]
python
train
DistrictDataLabs/yellowbrick
yellowbrick/features/importances.py
https://github.com/DistrictDataLabs/yellowbrick/blob/59b67236a3862c73363e8edad7cd86da5b69e3b2/yellowbrick/features/importances.py#L239-L256
def finalize(self, **kwargs): """ Finalize the drawing setting labels and title. """ # Set the title self.set_title('Feature Importances of {} Features using {}'.format( len(self.features_), self.name)) # Set the xlabel self.ax.set_xlabel(self._get_xlabel()) # Remove the ygrid self.ax.grid(False, axis='y') if self.stack: plt.legend(bbox_to_anchor=(1.04, 0.5), loc="center left") # Ensure we have a tight fit plt.tight_layout()
[ "def", "finalize", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# Set the title", "self", ".", "set_title", "(", "'Feature Importances of {} Features using {}'", ".", "format", "(", "len", "(", "self", ".", "features_", ")", ",", "self", ".", "name", ")", ")", "# Set the xlabel", "self", ".", "ax", ".", "set_xlabel", "(", "self", ".", "_get_xlabel", "(", ")", ")", "# Remove the ygrid", "self", ".", "ax", ".", "grid", "(", "False", ",", "axis", "=", "'y'", ")", "if", "self", ".", "stack", ":", "plt", ".", "legend", "(", "bbox_to_anchor", "=", "(", "1.04", ",", "0.5", ")", ",", "loc", "=", "\"center left\"", ")", "# Ensure we have a tight fit", "plt", ".", "tight_layout", "(", ")" ]
Finalize the drawing setting labels and title.
[ "Finalize", "the", "drawing", "setting", "labels", "and", "title", "." ]
python
train
pantsbuild/pants
src/python/pants/backend/jvm/tasks/coursier_resolve.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/backend/jvm/tasks/coursier_resolve.py#L209-L284
def _get_result_from_coursier(self, jars_to_resolve, global_excludes, pinned_coords, coursier_cache_path, sources, javadoc, executor): """ Calling coursier and return the result per invocation. If coursier was called once for classifier '' and once for classifier 'tests', then the return value would be: {'default': [<first coursier output>, <second coursier output>]} :param jars_to_resolve: List of `JarDependency`s to resolve :param global_excludes: List of `M2Coordinate`s to exclude globally :param pinned_coords: List of `M2Coordinate`s that need to be pinned. :param coursier_cache_path: path to where coursier cache is stored. :param executor: An instance of `pants.java.executor.Executor` :return: The aggregation of results by conf from coursier. Each coursier call could return the following: { "conflict_resolution": { "org:name:version" (requested): "org:name:version" (reconciled) }, "dependencies": [ { "coord": "orgA:nameA:versionA", "file": <path>, "dependencies": [ // coodinates for its transitive dependencies <orgX:nameX:versionX>, <orgY:nameY:versionY>, ] }, { "coord": "orgB:nameB:jar:classifier:versionB", "file": <path>, "dependencies": [ // coodinates for its transitive dependencies <orgX:nameX:versionX>, <orgZ:nameZ:versionZ>, ] }, ... // more about orgX:nameX:versionX, orgY:nameY:versionY, orgZ:nameZ:versionZ ] } Hence the aggregation of the results will be in the following format, for example when default classifier and sources are fetched: { 'default': [<result from coursier call with default conf with classifier X>, <result from coursier call with default conf with classifier Y>], 'src_doc': [<result from coursier call with --sources and/or --javadoc>], } """ # Prepare coursier args coursier_subsystem_instance = CoursierSubsystem.global_instance() coursier_jar = coursier_subsystem_instance.bootstrap_coursier(self.context.new_workunit) repos = coursier_subsystem_instance.get_options().repos # make [repoX, repoY] -> ['-r', repoX, '-r', repoY] repo_args = list(itertools.chain(*list(zip(['-r'] * len(repos), repos)))) artifact_types_arg = ['-A', ','.join(coursier_subsystem_instance.get_options().artifact_types)] advanced_options = coursier_subsystem_instance.get_options().fetch_options common_args = ['fetch', # Print the resolution tree '-t', '--cache', coursier_cache_path ] + repo_args + artifact_types_arg + advanced_options coursier_work_temp_dir = os.path.join(self.versioned_workdir, 'tmp') safe_mkdir(coursier_work_temp_dir) results_by_conf = self._get_default_conf_results(common_args, coursier_jar, global_excludes, jars_to_resolve, coursier_work_temp_dir, pinned_coords, executor) if sources or javadoc: non_default_conf_results = self._get_non_default_conf_results(common_args, coursier_jar, global_excludes, jars_to_resolve, coursier_work_temp_dir, pinned_coords, sources, javadoc, executor) results_by_conf.update(non_default_conf_results) return results_by_conf
[ "def", "_get_result_from_coursier", "(", "self", ",", "jars_to_resolve", ",", "global_excludes", ",", "pinned_coords", ",", "coursier_cache_path", ",", "sources", ",", "javadoc", ",", "executor", ")", ":", "# Prepare coursier args", "coursier_subsystem_instance", "=", "CoursierSubsystem", ".", "global_instance", "(", ")", "coursier_jar", "=", "coursier_subsystem_instance", ".", "bootstrap_coursier", "(", "self", ".", "context", ".", "new_workunit", ")", "repos", "=", "coursier_subsystem_instance", ".", "get_options", "(", ")", ".", "repos", "# make [repoX, repoY] -> ['-r', repoX, '-r', repoY]", "repo_args", "=", "list", "(", "itertools", ".", "chain", "(", "*", "list", "(", "zip", "(", "[", "'-r'", "]", "*", "len", "(", "repos", ")", ",", "repos", ")", ")", ")", ")", "artifact_types_arg", "=", "[", "'-A'", ",", "','", ".", "join", "(", "coursier_subsystem_instance", ".", "get_options", "(", ")", ".", "artifact_types", ")", "]", "advanced_options", "=", "coursier_subsystem_instance", ".", "get_options", "(", ")", ".", "fetch_options", "common_args", "=", "[", "'fetch'", ",", "# Print the resolution tree", "'-t'", ",", "'--cache'", ",", "coursier_cache_path", "]", "+", "repo_args", "+", "artifact_types_arg", "+", "advanced_options", "coursier_work_temp_dir", "=", "os", ".", "path", ".", "join", "(", "self", ".", "versioned_workdir", ",", "'tmp'", ")", "safe_mkdir", "(", "coursier_work_temp_dir", ")", "results_by_conf", "=", "self", ".", "_get_default_conf_results", "(", "common_args", ",", "coursier_jar", ",", "global_excludes", ",", "jars_to_resolve", ",", "coursier_work_temp_dir", ",", "pinned_coords", ",", "executor", ")", "if", "sources", "or", "javadoc", ":", "non_default_conf_results", "=", "self", ".", "_get_non_default_conf_results", "(", "common_args", ",", "coursier_jar", ",", "global_excludes", ",", "jars_to_resolve", ",", "coursier_work_temp_dir", ",", "pinned_coords", ",", "sources", ",", "javadoc", ",", "executor", ")", "results_by_conf", ".", "update", "(", "non_default_conf_results", ")", "return", "results_by_conf" ]
Calling coursier and return the result per invocation. If coursier was called once for classifier '' and once for classifier 'tests', then the return value would be: {'default': [<first coursier output>, <second coursier output>]} :param jars_to_resolve: List of `JarDependency`s to resolve :param global_excludes: List of `M2Coordinate`s to exclude globally :param pinned_coords: List of `M2Coordinate`s that need to be pinned. :param coursier_cache_path: path to where coursier cache is stored. :param executor: An instance of `pants.java.executor.Executor` :return: The aggregation of results by conf from coursier. Each coursier call could return the following: { "conflict_resolution": { "org:name:version" (requested): "org:name:version" (reconciled) }, "dependencies": [ { "coord": "orgA:nameA:versionA", "file": <path>, "dependencies": [ // coodinates for its transitive dependencies <orgX:nameX:versionX>, <orgY:nameY:versionY>, ] }, { "coord": "orgB:nameB:jar:classifier:versionB", "file": <path>, "dependencies": [ // coodinates for its transitive dependencies <orgX:nameX:versionX>, <orgZ:nameZ:versionZ>, ] }, ... // more about orgX:nameX:versionX, orgY:nameY:versionY, orgZ:nameZ:versionZ ] } Hence the aggregation of the results will be in the following format, for example when default classifier and sources are fetched: { 'default': [<result from coursier call with default conf with classifier X>, <result from coursier call with default conf with classifier Y>], 'src_doc': [<result from coursier call with --sources and/or --javadoc>], }
[ "Calling", "coursier", "and", "return", "the", "result", "per", "invocation", "." ]
python
train
UCL-INGI/INGInious
inginious/frontend/user_manager.py
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/frontend/user_manager.py#L723-L752
def course_is_open_to_user(self, course, username=None, lti=None): """ Checks if a user is can access a course :param course: a Course object :param username: The username of the user that we want to check. If None, uses self.session_username() :param lti: indicates if the user is currently in a LTI session or not. - None to ignore the check - True to indicate the user is in a LTI session - False to indicate the user is not in a LTI session - "auto" to enable the check and take the information from the current session :return: True if the user can access the course, False else """ if username is None: username = self.session_username() if lti == "auto": lti = self.session_lti_info() is not None if self.has_staff_rights_on_course(course, username): return True if not course.get_accessibility().is_open() or (not self.course_is_user_registered(course, username) and not course.allow_preview()): return False if lti and course.is_lti() != lti: return False if lti is False and course.is_lti(): return not course.lti_send_back_grade() return True
[ "def", "course_is_open_to_user", "(", "self", ",", "course", ",", "username", "=", "None", ",", "lti", "=", "None", ")", ":", "if", "username", "is", "None", ":", "username", "=", "self", ".", "session_username", "(", ")", "if", "lti", "==", "\"auto\"", ":", "lti", "=", "self", ".", "session_lti_info", "(", ")", "is", "not", "None", "if", "self", ".", "has_staff_rights_on_course", "(", "course", ",", "username", ")", ":", "return", "True", "if", "not", "course", ".", "get_accessibility", "(", ")", ".", "is_open", "(", ")", "or", "(", "not", "self", ".", "course_is_user_registered", "(", "course", ",", "username", ")", "and", "not", "course", ".", "allow_preview", "(", ")", ")", ":", "return", "False", "if", "lti", "and", "course", ".", "is_lti", "(", ")", "!=", "lti", ":", "return", "False", "if", "lti", "is", "False", "and", "course", ".", "is_lti", "(", ")", ":", "return", "not", "course", ".", "lti_send_back_grade", "(", ")", "return", "True" ]
Checks if a user is can access a course :param course: a Course object :param username: The username of the user that we want to check. If None, uses self.session_username() :param lti: indicates if the user is currently in a LTI session or not. - None to ignore the check - True to indicate the user is in a LTI session - False to indicate the user is not in a LTI session - "auto" to enable the check and take the information from the current session :return: True if the user can access the course, False else
[ "Checks", "if", "a", "user", "is", "can", "access", "a", "course", ":", "param", "course", ":", "a", "Course", "object", ":", "param", "username", ":", "The", "username", "of", "the", "user", "that", "we", "want", "to", "check", ".", "If", "None", "uses", "self", ".", "session_username", "()", ":", "param", "lti", ":", "indicates", "if", "the", "user", "is", "currently", "in", "a", "LTI", "session", "or", "not", ".", "-", "None", "to", "ignore", "the", "check", "-", "True", "to", "indicate", "the", "user", "is", "in", "a", "LTI", "session", "-", "False", "to", "indicate", "the", "user", "is", "not", "in", "a", "LTI", "session", "-", "auto", "to", "enable", "the", "check", "and", "take", "the", "information", "from", "the", "current", "session", ":", "return", ":", "True", "if", "the", "user", "can", "access", "the", "course", "False", "else" ]
python
train
duniter/duniter-python-api
duniterpy/api/client.py
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/api/client.py#L37-L51
def parse_text(text: str, schema: dict) -> Any: """ Validate and parse the BMA answer from websocket :param text: the bma answer :param schema: dict for jsonschema :return: the json data """ try: data = json.loads(text) jsonschema.validate(data, schema) except (TypeError, json.decoder.JSONDecodeError): raise jsonschema.ValidationError("Could not parse json") return data
[ "def", "parse_text", "(", "text", ":", "str", ",", "schema", ":", "dict", ")", "->", "Any", ":", "try", ":", "data", "=", "json", ".", "loads", "(", "text", ")", "jsonschema", ".", "validate", "(", "data", ",", "schema", ")", "except", "(", "TypeError", ",", "json", ".", "decoder", ".", "JSONDecodeError", ")", ":", "raise", "jsonschema", ".", "ValidationError", "(", "\"Could not parse json\"", ")", "return", "data" ]
Validate and parse the BMA answer from websocket :param text: the bma answer :param schema: dict for jsonschema :return: the json data
[ "Validate", "and", "parse", "the", "BMA", "answer", "from", "websocket" ]
python
train
qwilka/vn-tree
vntree/node.py
https://github.com/qwilka/vn-tree/blob/f08106e9c7232d8748d78d1d39b019699a7407dd/vntree/node.py#L435-L507
def to_texttree(self, indent=3, func=True, symbol='ascii'): """Method returning a text representation of the (sub-)tree rooted at the current node instance (`self`). :param indent: the indentation width for each tree level. :type indent: int :param func: function returning a string representation for each node. e.g. `func=lambda n: str(n._coord)` would show the node coordinates. `func=True` node.name displayed for each node. `func=False` no node representation, just the tree structure is displayed. :type func: function or bool :param symbol: tuple of tree symbol characters. `None` or 'ascii' gives a preformed ascii tree, equivalent to tuple :code:`(|, +, |, |, |, -, .)`. 'box' preformed with box-drawing characters, equivalent to tuple :code:`(┬, └, ┬, ├, └, ─, ⋅)`. 'unicode' preformed with unicode characters. :type symbol: tuple or str or None :returns: a string representation of the tree. :rtype: str """ if indent<2: indent=2 if func is True: # default func prints node.name func = lambda n: "{}".format(n.name) if isinstance(symbol, (list, tuple)): s_root, s_branch, s_spar, s_fnode = symbol elif symbol=="unicode": # ┬ └ ┬ ├ └ ─ ⋅ s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = ( "\u252c", "\u2514", "\u252c", "\u251c", "\u2514", "\u2500", "\u22c5") elif symbol=="box": # https://en.wikipedia.org/wiki/Box-drawing_character # ┬ └ ┬ ├ └ ─ ⋅ s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = ( "\u252c", "\u2514", "\u252c", "\u251c", "\u2514", "\u2500", "\u22c5") else: s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = ( "|", "+", "|", "|", "|", "-", ".") _text = "" #local_root_level = len(self.ancestors) local_root_level = self._level for _n in self: #level = len(node.ancestors) - local_root_level level = _n._level - local_root_level if level==0: _text += s_root elif _n.parent.childs[0] == _n and len(_n.parent.childs)>1: # first child #s_spar="f" _text += ( (s_level + " "*(indent-1))*(level-1) + s_branch + s_spar*(indent-1) + s_fnode) elif _n.parent.childs[-1] == _n and len(_n.childs)==0: # last child, no children #s_spar="l" _text += ( (s_level + " "*(indent-1))*(level) + s_lnode ) elif _n.parent.childs[-1] == _n: # last child, has children #s_spar="l" _text += ( (s_level + " "*(indent-1))*(level) + s_mnode ) #+ s_spar*(indent-1) ) # elif level>0: # _text += (s_level + " "*(indent-1))*(level-1) + s_branch + s_spar*(indent-1) else: #_text += s_fnode #s_spar="m" _text += ( (s_level + " "*(indent-1))*(level) + s_mnode ) #+ s_spar*(indent-1) ) if func and callable(func): _text += func(_n) _text += "\n" return _text
[ "def", "to_texttree", "(", "self", ",", "indent", "=", "3", ",", "func", "=", "True", ",", "symbol", "=", "'ascii'", ")", ":", "if", "indent", "<", "2", ":", "indent", "=", "2", "if", "func", "is", "True", ":", "# default func prints node.name", "func", "=", "lambda", "n", ":", "\"{}\"", ".", "format", "(", "n", ".", "name", ")", "if", "isinstance", "(", "symbol", ",", "(", "list", ",", "tuple", ")", ")", ":", "s_root", ",", "s_branch", ",", "s_spar", ",", "s_fnode", "=", "symbol", "elif", "symbol", "==", "\"unicode\"", ":", "# ┬ └ ┬ ├ └ ─ ⋅", "s_root", ",", "s_branch", ",", "s_fnode", ",", "s_mnode", ",", "s_lnode", ",", "s_spar", ",", "s_level", "=", "(", "\"\\u252c\"", ",", "\"\\u2514\"", ",", "\"\\u252c\"", ",", "\"\\u251c\"", ",", "\"\\u2514\"", ",", "\"\\u2500\"", ",", "\"\\u22c5\"", ")", "elif", "symbol", "==", "\"box\"", ":", "# https://en.wikipedia.org/wiki/Box-drawing_character", "# ┬ └ ┬ ├ └ ─ ⋅", "s_root", ",", "s_branch", ",", "s_fnode", ",", "s_mnode", ",", "s_lnode", ",", "s_spar", ",", "s_level", "=", "(", "\"\\u252c\"", ",", "\"\\u2514\"", ",", "\"\\u252c\"", ",", "\"\\u251c\"", ",", "\"\\u2514\"", ",", "\"\\u2500\"", ",", "\"\\u22c5\"", ")", "else", ":", "s_root", ",", "s_branch", ",", "s_fnode", ",", "s_mnode", ",", "s_lnode", ",", "s_spar", ",", "s_level", "=", "(", "\"|\"", ",", "\"+\"", ",", "\"|\"", ",", "\"|\"", ",", "\"|\"", ",", "\"-\"", ",", "\".\"", ")", "_text", "=", "\"\"", "#local_root_level = len(self.ancestors)", "local_root_level", "=", "self", ".", "_level", "for", "_n", "in", "self", ":", "#level = len(node.ancestors) - local_root_level", "level", "=", "_n", ".", "_level", "-", "local_root_level", "if", "level", "==", "0", ":", "_text", "+=", "s_root", "elif", "_n", ".", "parent", ".", "childs", "[", "0", "]", "==", "_n", "and", "len", "(", "_n", ".", "parent", ".", "childs", ")", ">", "1", ":", "# first child", "#s_spar=\"f\"", "_text", "+=", "(", "(", "s_level", "+", "\" \"", "*", "(", "indent", "-", "1", ")", ")", "*", "(", "level", "-", "1", ")", "+", "s_branch", "+", "s_spar", "*", "(", "indent", "-", "1", ")", "+", "s_fnode", ")", "elif", "_n", ".", "parent", ".", "childs", "[", "-", "1", "]", "==", "_n", "and", "len", "(", "_n", ".", "childs", ")", "==", "0", ":", "# last child, no children", "#s_spar=\"l\"", "_text", "+=", "(", "(", "s_level", "+", "\" \"", "*", "(", "indent", "-", "1", ")", ")", "*", "(", "level", ")", "+", "s_lnode", ")", "elif", "_n", ".", "parent", ".", "childs", "[", "-", "1", "]", "==", "_n", ":", "# last child, has children", "#s_spar=\"l\"", "_text", "+=", "(", "(", "s_level", "+", "\" \"", "*", "(", "indent", "-", "1", ")", ")", "*", "(", "level", ")", "+", "s_mnode", ")", "#+ s_spar*(indent-1) )", "# elif level>0:", "# _text += (s_level + \" \"*(indent-1))*(level-1) + s_branch + s_spar*(indent-1)", "else", ":", "#_text += s_fnode", "#s_spar=\"m\"", "_text", "+=", "(", "(", "s_level", "+", "\" \"", "*", "(", "indent", "-", "1", ")", ")", "*", "(", "level", ")", "+", "s_mnode", ")", "#+ s_spar*(indent-1) )", "if", "func", "and", "callable", "(", "func", ")", ":", "_text", "+=", "func", "(", "_n", ")", "_text", "+=", "\"\\n\"", "return", "_text" ]
Method returning a text representation of the (sub-)tree rooted at the current node instance (`self`). :param indent: the indentation width for each tree level. :type indent: int :param func: function returning a string representation for each node. e.g. `func=lambda n: str(n._coord)` would show the node coordinates. `func=True` node.name displayed for each node. `func=False` no node representation, just the tree structure is displayed. :type func: function or bool :param symbol: tuple of tree symbol characters. `None` or 'ascii' gives a preformed ascii tree, equivalent to tuple :code:`(|, +, |, |, |, -, .)`. 'box' preformed with box-drawing characters, equivalent to tuple :code:`(┬, └, ┬, ├, └, ─, ⋅)`. 'unicode' preformed with unicode characters. :type symbol: tuple or str or None :returns: a string representation of the tree. :rtype: str
[ "Method", "returning", "a", "text", "representation", "of", "the", "(", "sub", "-", ")", "tree", "rooted", "at", "the", "current", "node", "instance", "(", "self", ")", "." ]
python
train
PyCQA/pylint
pylint/reporters/reports_handler_mix_in.py
https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/reporters/reports_handler_mix_in.py#L70-L79
def add_stats(self, **kwargs): """add some stats entries to the statistic dictionary raise an AssertionError if there is a key conflict """ for key, value in kwargs.items(): if key[-1] == "_": key = key[:-1] assert key not in self.stats self.stats[key] = value return self.stats
[ "def", "add_stats", "(", "self", ",", "*", "*", "kwargs", ")", ":", "for", "key", ",", "value", "in", "kwargs", ".", "items", "(", ")", ":", "if", "key", "[", "-", "1", "]", "==", "\"_\"", ":", "key", "=", "key", "[", ":", "-", "1", "]", "assert", "key", "not", "in", "self", ".", "stats", "self", ".", "stats", "[", "key", "]", "=", "value", "return", "self", ".", "stats" ]
add some stats entries to the statistic dictionary raise an AssertionError if there is a key conflict
[ "add", "some", "stats", "entries", "to", "the", "statistic", "dictionary", "raise", "an", "AssertionError", "if", "there", "is", "a", "key", "conflict" ]
python
test
codeghar/brokerlso
brokerlso/qmfv2.py
https://github.com/codeghar/brokerlso/blob/e110e12502b090e12b06c7615dd0a96a14a92585/brokerlso/qmfv2.py#L109-L124
def delete_queue(self, name): """Create message content and properties to delete queue with QMFv2 :param name: Name of queue to delete :type name: str :returns: Tuple containing content and method properties """ content = {"_object_id": {"_object_name": self.object_name}, "_method_name": "delete", "_arguments": {"type": "queue", "name": name, "options": dict()}} # "A nested map with the key options. This is presently unused." logger.debug("Message content -> {0}".format(content)) return content, self.method_properties
[ "def", "delete_queue", "(", "self", ",", "name", ")", ":", "content", "=", "{", "\"_object_id\"", ":", "{", "\"_object_name\"", ":", "self", ".", "object_name", "}", ",", "\"_method_name\"", ":", "\"delete\"", ",", "\"_arguments\"", ":", "{", "\"type\"", ":", "\"queue\"", ",", "\"name\"", ":", "name", ",", "\"options\"", ":", "dict", "(", ")", "}", "}", "# \"A nested map with the key options. This is presently unused.\"", "logger", ".", "debug", "(", "\"Message content -> {0}\"", ".", "format", "(", "content", ")", ")", "return", "content", ",", "self", ".", "method_properties" ]
Create message content and properties to delete queue with QMFv2 :param name: Name of queue to delete :type name: str :returns: Tuple containing content and method properties
[ "Create", "message", "content", "and", "properties", "to", "delete", "queue", "with", "QMFv2" ]
python
test
twilio/twilio-python
twilio/rest/ip_messaging/v2/service/channel/invite.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/ip_messaging/v2/service/channel/invite.py#L214-L228
def get_instance(self, payload): """ Build an instance of InviteInstance :param dict payload: Payload response from the API :returns: twilio.rest.chat.v2.service.channel.invite.InviteInstance :rtype: twilio.rest.chat.v2.service.channel.invite.InviteInstance """ return InviteInstance( self._version, payload, service_sid=self._solution['service_sid'], channel_sid=self._solution['channel_sid'], )
[ "def", "get_instance", "(", "self", ",", "payload", ")", ":", "return", "InviteInstance", "(", "self", ".", "_version", ",", "payload", ",", "service_sid", "=", "self", ".", "_solution", "[", "'service_sid'", "]", ",", "channel_sid", "=", "self", ".", "_solution", "[", "'channel_sid'", "]", ",", ")" ]
Build an instance of InviteInstance :param dict payload: Payload response from the API :returns: twilio.rest.chat.v2.service.channel.invite.InviteInstance :rtype: twilio.rest.chat.v2.service.channel.invite.InviteInstance
[ "Build", "an", "instance", "of", "InviteInstance" ]
python
train
ionelmc/python-redis-lock
src/redis_lock/__init__.py
https://github.com/ionelmc/python-redis-lock/blob/5481cd88b64d86d318e389c79b0575a73464b1f5/src/redis_lock/__init__.py#L222-L261
def acquire(self, blocking=True, timeout=None): """ :param blocking: Boolean value specifying whether lock should be blocking or not. :param timeout: An integer value specifying the maximum number of seconds to block. """ logger.debug("Getting %r ...", self._name) if self._held: raise AlreadyAcquired("Already acquired from this Lock instance.") if not blocking and timeout is not None: raise TimeoutNotUsable("Timeout cannot be used if blocking=False") timeout = timeout if timeout is None else int(timeout) if timeout is not None and timeout <= 0: raise InvalidTimeout("Timeout (%d) cannot be less than or equal to 0" % timeout) if timeout and self._expire and timeout > self._expire: raise TimeoutTooLarge("Timeout (%d) cannot be greater than expire (%d)" % (timeout, self._expire)) busy = True blpop_timeout = timeout or self._expire or 0 timed_out = False while busy: busy = not self._client.set(self._name, self._id, nx=True, ex=self._expire) if busy: if timed_out: return False elif blocking: timed_out = not self._client.blpop(self._signal, blpop_timeout) and timeout else: logger.debug("Failed to get %r.", self._name) return False logger.debug("Got lock for %r.", self._name) if self._lock_renewal_interval is not None: self._start_lock_renewer() return True
[ "def", "acquire", "(", "self", ",", "blocking", "=", "True", ",", "timeout", "=", "None", ")", ":", "logger", ".", "debug", "(", "\"Getting %r ...\"", ",", "self", ".", "_name", ")", "if", "self", ".", "_held", ":", "raise", "AlreadyAcquired", "(", "\"Already acquired from this Lock instance.\"", ")", "if", "not", "blocking", "and", "timeout", "is", "not", "None", ":", "raise", "TimeoutNotUsable", "(", "\"Timeout cannot be used if blocking=False\"", ")", "timeout", "=", "timeout", "if", "timeout", "is", "None", "else", "int", "(", "timeout", ")", "if", "timeout", "is", "not", "None", "and", "timeout", "<=", "0", ":", "raise", "InvalidTimeout", "(", "\"Timeout (%d) cannot be less than or equal to 0\"", "%", "timeout", ")", "if", "timeout", "and", "self", ".", "_expire", "and", "timeout", ">", "self", ".", "_expire", ":", "raise", "TimeoutTooLarge", "(", "\"Timeout (%d) cannot be greater than expire (%d)\"", "%", "(", "timeout", ",", "self", ".", "_expire", ")", ")", "busy", "=", "True", "blpop_timeout", "=", "timeout", "or", "self", ".", "_expire", "or", "0", "timed_out", "=", "False", "while", "busy", ":", "busy", "=", "not", "self", ".", "_client", ".", "set", "(", "self", ".", "_name", ",", "self", ".", "_id", ",", "nx", "=", "True", ",", "ex", "=", "self", ".", "_expire", ")", "if", "busy", ":", "if", "timed_out", ":", "return", "False", "elif", "blocking", ":", "timed_out", "=", "not", "self", ".", "_client", ".", "blpop", "(", "self", ".", "_signal", ",", "blpop_timeout", ")", "and", "timeout", "else", ":", "logger", ".", "debug", "(", "\"Failed to get %r.\"", ",", "self", ".", "_name", ")", "return", "False", "logger", ".", "debug", "(", "\"Got lock for %r.\"", ",", "self", ".", "_name", ")", "if", "self", ".", "_lock_renewal_interval", "is", "not", "None", ":", "self", ".", "_start_lock_renewer", "(", ")", "return", "True" ]
:param blocking: Boolean value specifying whether lock should be blocking or not. :param timeout: An integer value specifying the maximum number of seconds to block.
[ ":", "param", "blocking", ":", "Boolean", "value", "specifying", "whether", "lock", "should", "be", "blocking", "or", "not", ".", ":", "param", "timeout", ":", "An", "integer", "value", "specifying", "the", "maximum", "number", "of", "seconds", "to", "block", "." ]
python
train
yyuu/botornado
boto/dynamodb/table.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/dynamodb/table.py#L104-L129
def refresh(self, wait_for_active=False, retry_seconds=5): """ Refresh all of the fields of the Table object by calling the underlying DescribeTable request. :type wait_for_active: bool :param wait_for_active: If True, this command will not return until the table status, as returned from Amazon DynamoDB, is 'ACTIVE'. :type retry_seconds: int :param retry_seconds: If wait_for_active is True, this parameter controls the number of seconds of delay between calls to update_table in Amazon DynamoDB. Default is 5 seconds. """ done = False while not done: response = self.layer2.describe_table(self.name) self.update_from_response(response) if wait_for_active: if self.status == 'ACTIVE': done = True else: time.sleep(retry_seconds) else: done = True
[ "def", "refresh", "(", "self", ",", "wait_for_active", "=", "False", ",", "retry_seconds", "=", "5", ")", ":", "done", "=", "False", "while", "not", "done", ":", "response", "=", "self", ".", "layer2", ".", "describe_table", "(", "self", ".", "name", ")", "self", ".", "update_from_response", "(", "response", ")", "if", "wait_for_active", ":", "if", "self", ".", "status", "==", "'ACTIVE'", ":", "done", "=", "True", "else", ":", "time", ".", "sleep", "(", "retry_seconds", ")", "else", ":", "done", "=", "True" ]
Refresh all of the fields of the Table object by calling the underlying DescribeTable request. :type wait_for_active: bool :param wait_for_active: If True, this command will not return until the table status, as returned from Amazon DynamoDB, is 'ACTIVE'. :type retry_seconds: int :param retry_seconds: If wait_for_active is True, this parameter controls the number of seconds of delay between calls to update_table in Amazon DynamoDB. Default is 5 seconds.
[ "Refresh", "all", "of", "the", "fields", "of", "the", "Table", "object", "by", "calling", "the", "underlying", "DescribeTable", "request", "." ]
python
train
yyuu/botornado
botornado/s3/bucket.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/botornado/s3/bucket.py#L53-L63
def lookup(self, key_name, headers=None, callback=None): """ Deprecated: Please use get_key method. :type key_name: string :param key_name: The name of the key to retrieve :rtype: :class:`boto.s3.key.Key` :returns: A Key object from this bucket. """ return self.get_key(key_name, headers=headers, callback=callback)
[ "def", "lookup", "(", "self", ",", "key_name", ",", "headers", "=", "None", ",", "callback", "=", "None", ")", ":", "return", "self", ".", "get_key", "(", "key_name", ",", "headers", "=", "headers", ",", "callback", "=", "callback", ")" ]
Deprecated: Please use get_key method. :type key_name: string :param key_name: The name of the key to retrieve :rtype: :class:`boto.s3.key.Key` :returns: A Key object from this bucket.
[ "Deprecated", ":", "Please", "use", "get_key", "method", ".", ":", "type", "key_name", ":", "string", ":", "param", "key_name", ":", "The", "name", "of", "the", "key", "to", "retrieve", ":", "rtype", ":", ":", "class", ":", "boto", ".", "s3", ".", "key", ".", "Key", ":", "returns", ":", "A", "Key", "object", "from", "this", "bucket", "." ]
python
train
GaretJax/django-click
djclick/adapter.py
https://github.com/GaretJax/django-click/blob/3584bff81cb7891a1aa2d7fe49c1db501f5b0e84/djclick/adapter.py#L90-L110
def execute(self, *args, **kwargs): """ Called when run through `call_command`. `args` are passed through, while `kwargs` is the __dict__ of the return value of `self.create_parser('', name)` updated with the kwargs passed to `call_command`. """ # Remove internal Django command handling machinery kwargs.pop('skip_checks', None) parent_ctx = click.get_current_context(silent=True) with self.make_context('', list(args), parent=parent_ctx) as ctx: # Rename kwargs to to the appropriate destination argument name opt_mapping = dict(self.map_names()) arg_options = {opt_mapping.get(key, key): value for key, value in six.iteritems(kwargs)} # Update the context with the passed (renamed) kwargs ctx.params.update(arg_options) # Invoke the command self.invoke(ctx)
[ "def", "execute", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Remove internal Django command handling machinery", "kwargs", ".", "pop", "(", "'skip_checks'", ",", "None", ")", "parent_ctx", "=", "click", ".", "get_current_context", "(", "silent", "=", "True", ")", "with", "self", ".", "make_context", "(", "''", ",", "list", "(", "args", ")", ",", "parent", "=", "parent_ctx", ")", "as", "ctx", ":", "# Rename kwargs to to the appropriate destination argument name", "opt_mapping", "=", "dict", "(", "self", ".", "map_names", "(", ")", ")", "arg_options", "=", "{", "opt_mapping", ".", "get", "(", "key", ",", "key", ")", ":", "value", "for", "key", ",", "value", "in", "six", ".", "iteritems", "(", "kwargs", ")", "}", "# Update the context with the passed (renamed) kwargs", "ctx", ".", "params", ".", "update", "(", "arg_options", ")", "# Invoke the command", "self", ".", "invoke", "(", "ctx", ")" ]
Called when run through `call_command`. `args` are passed through, while `kwargs` is the __dict__ of the return value of `self.create_parser('', name)` updated with the kwargs passed to `call_command`.
[ "Called", "when", "run", "through", "call_command", ".", "args", "are", "passed", "through", "while", "kwargs", "is", "the", "__dict__", "of", "the", "return", "value", "of", "self", ".", "create_parser", "(", "name", ")", "updated", "with", "the", "kwargs", "passed", "to", "call_command", "." ]
python
train
mitsei/dlkit
dlkit/services/hierarchy.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/services/hierarchy.py#L458-L466
def get_hierarchy_form(self, *args, **kwargs): """Pass through to provider HierarchyAdminSession.get_hierarchy_form_for_update""" # Implemented from kitosid template for - # osid.resource.BinAdminSession.get_bin_form_for_update_template # This method might be a bit sketchy. Time will tell. if isinstance(args[-1], list) or 'hierarchy_record_types' in kwargs: return self.get_hierarchy_form_for_create(*args, **kwargs) else: return self.get_hierarchy_form_for_update(*args, **kwargs)
[ "def", "get_hierarchy_form", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Implemented from kitosid template for -", "# osid.resource.BinAdminSession.get_bin_form_for_update_template", "# This method might be a bit sketchy. Time will tell.", "if", "isinstance", "(", "args", "[", "-", "1", "]", ",", "list", ")", "or", "'hierarchy_record_types'", "in", "kwargs", ":", "return", "self", ".", "get_hierarchy_form_for_create", "(", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "return", "self", ".", "get_hierarchy_form_for_update", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Pass through to provider HierarchyAdminSession.get_hierarchy_form_for_update
[ "Pass", "through", "to", "provider", "HierarchyAdminSession", ".", "get_hierarchy_form_for_update" ]
python
train
senaite/senaite.jsonapi
src/senaite/jsonapi/catalog.py
https://github.com/senaite/senaite.jsonapi/blob/871959f4b1c9edbb477e9456325527ca78e13ec6/src/senaite/jsonapi/catalog.py#L113-L131
def make_query(self, **kw): """create a query suitable for the catalog """ query = kw.pop("query", {}) query.update(self.get_request_query()) query.update(self.get_custom_query()) query.update(self.get_keyword_query(**kw)) sort_on, sort_order = self.get_sort_spec() if sort_on and "sort_on" not in query: query.update({"sort_on": sort_on}) if sort_order and "sort_order" not in query: query.update({"sort_order": sort_order}) logger.info("make_query:: query={} | catalog={}".format( query, self.catalog)) return query
[ "def", "make_query", "(", "self", ",", "*", "*", "kw", ")", ":", "query", "=", "kw", ".", "pop", "(", "\"query\"", ",", "{", "}", ")", "query", ".", "update", "(", "self", ".", "get_request_query", "(", ")", ")", "query", ".", "update", "(", "self", ".", "get_custom_query", "(", ")", ")", "query", ".", "update", "(", "self", ".", "get_keyword_query", "(", "*", "*", "kw", ")", ")", "sort_on", ",", "sort_order", "=", "self", ".", "get_sort_spec", "(", ")", "if", "sort_on", "and", "\"sort_on\"", "not", "in", "query", ":", "query", ".", "update", "(", "{", "\"sort_on\"", ":", "sort_on", "}", ")", "if", "sort_order", "and", "\"sort_order\"", "not", "in", "query", ":", "query", ".", "update", "(", "{", "\"sort_order\"", ":", "sort_order", "}", ")", "logger", ".", "info", "(", "\"make_query:: query={} | catalog={}\"", ".", "format", "(", "query", ",", "self", ".", "catalog", ")", ")", "return", "query" ]
create a query suitable for the catalog
[ "create", "a", "query", "suitable", "for", "the", "catalog" ]
python
train
ska-sa/montblanc
install/tensorflow_ops_ext.py
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/install/tensorflow_ops_ext.py#L82-L152
def create_tensorflow_extension(nvcc_settings, device_info): """ Create an extension that builds the custom tensorflow ops """ import tensorflow as tf import glob use_cuda = (bool(nvcc_settings['cuda_available']) and tf.test.is_built_with_cuda()) # Source and includes source_path = os.path.join('montblanc', 'impl', 'rime', 'tensorflow', 'rime_ops') sources = glob.glob(os.path.join(source_path, '*.cpp')) # Header dependencies depends = glob.glob(os.path.join(source_path, '*.h')) # Include directories tf_inc = tf.sysconfig.get_include() include_dirs = [os.path.join('montblanc', 'include'), source_path] include_dirs += [tf_inc, os.path.join(tf_inc, "external", "nsync", "public")] # Libraries library_dirs = [tf.sysconfig.get_lib()] libraries = ['tensorflow_framework'] extra_link_args = ['-fPIC', '-fopenmp', '-g0'] # Macros define_macros = [ ('_MWAITXINTRIN_H_INCLUDED', None), ('_FORCE_INLINES', None), ('_GLIBCXX_USE_CXX11_ABI', 0)] # Common flags flags = ['-std=c++11'] gcc_flags = flags + ['-g0', '-fPIC', '-fopenmp', '-O2'] gcc_flags += ['-march=native', '-mtune=native'] nvcc_flags = flags + [] # Add cuda specific build information, if it is available if use_cuda: # CUDA source files sources += glob.glob(os.path.join(source_path, '*.cu')) # CUDA include directories include_dirs += nvcc_settings['include_dirs'] # CUDA header dependencies depends += glob.glob(os.path.join(source_path, '*.cuh')) # CUDA libraries library_dirs += nvcc_settings['library_dirs'] libraries += nvcc_settings['libraries'] # Flags nvcc_flags += ['-x', 'cu'] nvcc_flags += ['--compiler-options', '"-fPIC"'] # --gpu-architecture=sm_xy flags nvcc_flags += cuda_architecture_flags(device_info) # Ideally this would be set in define_macros, but # this must be set differently for gcc and nvcc nvcc_flags += ['-DGOOGLE_CUDA=%d' % int(use_cuda)] return Extension(tensorflow_extension_name, sources=sources, include_dirs=include_dirs, depends=depends, library_dirs=library_dirs, libraries=libraries, define_macros=define_macros, # this syntax is specific to this build system # we're only going to use certain compiler args with nvcc and not with gcc # the implementation of this trick is in customize_compiler_for_nvcc() above extra_compile_args={ 'gcc': gcc_flags, 'nvcc': nvcc_flags }, extra_link_args=extra_link_args, )
[ "def", "create_tensorflow_extension", "(", "nvcc_settings", ",", "device_info", ")", ":", "import", "tensorflow", "as", "tf", "import", "glob", "use_cuda", "=", "(", "bool", "(", "nvcc_settings", "[", "'cuda_available'", "]", ")", "and", "tf", ".", "test", ".", "is_built_with_cuda", "(", ")", ")", "# Source and includes", "source_path", "=", "os", ".", "path", ".", "join", "(", "'montblanc'", ",", "'impl'", ",", "'rime'", ",", "'tensorflow'", ",", "'rime_ops'", ")", "sources", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "source_path", ",", "'*.cpp'", ")", ")", "# Header dependencies", "depends", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "source_path", ",", "'*.h'", ")", ")", "# Include directories", "tf_inc", "=", "tf", ".", "sysconfig", ".", "get_include", "(", ")", "include_dirs", "=", "[", "os", ".", "path", ".", "join", "(", "'montblanc'", ",", "'include'", ")", ",", "source_path", "]", "include_dirs", "+=", "[", "tf_inc", ",", "os", ".", "path", ".", "join", "(", "tf_inc", ",", "\"external\"", ",", "\"nsync\"", ",", "\"public\"", ")", "]", "# Libraries", "library_dirs", "=", "[", "tf", ".", "sysconfig", ".", "get_lib", "(", ")", "]", "libraries", "=", "[", "'tensorflow_framework'", "]", "extra_link_args", "=", "[", "'-fPIC'", ",", "'-fopenmp'", ",", "'-g0'", "]", "# Macros", "define_macros", "=", "[", "(", "'_MWAITXINTRIN_H_INCLUDED'", ",", "None", ")", ",", "(", "'_FORCE_INLINES'", ",", "None", ")", ",", "(", "'_GLIBCXX_USE_CXX11_ABI'", ",", "0", ")", "]", "# Common flags", "flags", "=", "[", "'-std=c++11'", "]", "gcc_flags", "=", "flags", "+", "[", "'-g0'", ",", "'-fPIC'", ",", "'-fopenmp'", ",", "'-O2'", "]", "gcc_flags", "+=", "[", "'-march=native'", ",", "'-mtune=native'", "]", "nvcc_flags", "=", "flags", "+", "[", "]", "# Add cuda specific build information, if it is available", "if", "use_cuda", ":", "# CUDA source files", "sources", "+=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "source_path", ",", "'*.cu'", ")", ")", "# CUDA include directories", "include_dirs", "+=", "nvcc_settings", "[", "'include_dirs'", "]", "# CUDA header dependencies", "depends", "+=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "source_path", ",", "'*.cuh'", ")", ")", "# CUDA libraries", "library_dirs", "+=", "nvcc_settings", "[", "'library_dirs'", "]", "libraries", "+=", "nvcc_settings", "[", "'libraries'", "]", "# Flags", "nvcc_flags", "+=", "[", "'-x'", ",", "'cu'", "]", "nvcc_flags", "+=", "[", "'--compiler-options'", ",", "'\"-fPIC\"'", "]", "# --gpu-architecture=sm_xy flags", "nvcc_flags", "+=", "cuda_architecture_flags", "(", "device_info", ")", "# Ideally this would be set in define_macros, but", "# this must be set differently for gcc and nvcc", "nvcc_flags", "+=", "[", "'-DGOOGLE_CUDA=%d'", "%", "int", "(", "use_cuda", ")", "]", "return", "Extension", "(", "tensorflow_extension_name", ",", "sources", "=", "sources", ",", "include_dirs", "=", "include_dirs", ",", "depends", "=", "depends", ",", "library_dirs", "=", "library_dirs", ",", "libraries", "=", "libraries", ",", "define_macros", "=", "define_macros", ",", "# this syntax is specific to this build system", "# we're only going to use certain compiler args with nvcc and not with gcc", "# the implementation of this trick is in customize_compiler_for_nvcc() above", "extra_compile_args", "=", "{", "'gcc'", ":", "gcc_flags", ",", "'nvcc'", ":", "nvcc_flags", "}", ",", "extra_link_args", "=", "extra_link_args", ",", ")" ]
Create an extension that builds the custom tensorflow ops
[ "Create", "an", "extension", "that", "builds", "the", "custom", "tensorflow", "ops" ]
python
train
mongodb/motor
motor/core.py
https://github.com/mongodb/motor/blob/6af22720723bde7c78eb8cb126962cfbfc034b2c/motor/core.py#L1440-L1451
def close(self): """Close this change stream. Stops any "async for" loops using this change stream. """ if self.delegate: return self._close() # Never started. future = self._framework.get_future(self.get_io_loop()) future.set_result(None) return future
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "delegate", ":", "return", "self", ".", "_close", "(", ")", "# Never started.", "future", "=", "self", ".", "_framework", ".", "get_future", "(", "self", ".", "get_io_loop", "(", ")", ")", "future", ".", "set_result", "(", "None", ")", "return", "future" ]
Close this change stream. Stops any "async for" loops using this change stream.
[ "Close", "this", "change", "stream", "." ]
python
train
Alignak-monitoring/alignak
alignak/daemon.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/daemon.py#L830-L872
def do_stop(self): """Execute the stop of this daemon: - request the daemon to stop - request the http thread to stop, else force stop the thread - Close the http socket - Shutdown the manager - Stop and join all started "modules" :return: None """ logger.info("Stopping %s...", self.name) if self.sync_manager: logger.info("Shutting down synchronization manager...") self.sync_manager.shutdown() self.sync_manager = None # Maybe the modules manager is not even created! if self.modules_manager: logger.info("Shutting down modules manager...") self.modules_manager.stop_all() # todo: daemonize the process thanks to CherryPy plugin if self.http_daemon: logger.info("Shutting down HTTP daemon...") if self.http_daemon.cherrypy_thread: self.http_daemon.stop() self.http_daemon = None # todo: daemonize the process thanks to CherryPy plugin if self.http_thread: logger.info("Checking HTTP thread...") # Let a few seconds to exit self.http_thread.join(timeout=3) if self.http_thread.is_alive(): # pragma: no cover, should never happen... logger.warning("HTTP thread did not terminated. Force stopping the thread..") # try: # self.http_thread._Thread__stop() # pylint: disable=E1101 # except Exception as exp: # pylint: disable=broad-except # print("Exception: %s" % exp) else: logger.debug("HTTP thread exited") self.http_thread = None
[ "def", "do_stop", "(", "self", ")", ":", "logger", ".", "info", "(", "\"Stopping %s...\"", ",", "self", ".", "name", ")", "if", "self", ".", "sync_manager", ":", "logger", ".", "info", "(", "\"Shutting down synchronization manager...\"", ")", "self", ".", "sync_manager", ".", "shutdown", "(", ")", "self", ".", "sync_manager", "=", "None", "# Maybe the modules manager is not even created!", "if", "self", ".", "modules_manager", ":", "logger", ".", "info", "(", "\"Shutting down modules manager...\"", ")", "self", ".", "modules_manager", ".", "stop_all", "(", ")", "# todo: daemonize the process thanks to CherryPy plugin", "if", "self", ".", "http_daemon", ":", "logger", ".", "info", "(", "\"Shutting down HTTP daemon...\"", ")", "if", "self", ".", "http_daemon", ".", "cherrypy_thread", ":", "self", ".", "http_daemon", ".", "stop", "(", ")", "self", ".", "http_daemon", "=", "None", "# todo: daemonize the process thanks to CherryPy plugin", "if", "self", ".", "http_thread", ":", "logger", ".", "info", "(", "\"Checking HTTP thread...\"", ")", "# Let a few seconds to exit", "self", ".", "http_thread", ".", "join", "(", "timeout", "=", "3", ")", "if", "self", ".", "http_thread", ".", "is_alive", "(", ")", ":", "# pragma: no cover, should never happen...", "logger", ".", "warning", "(", "\"HTTP thread did not terminated. Force stopping the thread..\"", ")", "# try:", "# self.http_thread._Thread__stop() # pylint: disable=E1101", "# except Exception as exp: # pylint: disable=broad-except", "# print(\"Exception: %s\" % exp)", "else", ":", "logger", ".", "debug", "(", "\"HTTP thread exited\"", ")", "self", ".", "http_thread", "=", "None" ]
Execute the stop of this daemon: - request the daemon to stop - request the http thread to stop, else force stop the thread - Close the http socket - Shutdown the manager - Stop and join all started "modules" :return: None
[ "Execute", "the", "stop", "of", "this", "daemon", ":", "-", "request", "the", "daemon", "to", "stop", "-", "request", "the", "http", "thread", "to", "stop", "else", "force", "stop", "the", "thread", "-", "Close", "the", "http", "socket", "-", "Shutdown", "the", "manager", "-", "Stop", "and", "join", "all", "started", "modules" ]
python
train
gem/oq-engine
openquake/hazardlib/geo/geodetic.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/geo/geodetic.py#L489-L525
def distance_to_arc(alon, alat, aazimuth, plons, plats): """ Calculate a closest distance between a great circle arc and a point (or a collection of points). :param float alon, alat: Arc reference point longitude and latitude, in decimal degrees. :param azimuth: Arc azimuth (an angle between direction to a north and arc in clockwise direction), measured in a reference point, in decimal degrees. :param float plons, plats: Longitudes and latitudes of points to measure distance. Either scalar values or numpy arrays of decimal degrees. :returns: Distance in km, a scalar value or numpy array depending on ``plons`` and ``plats``. A distance is negative if the target point lies on the right hand side of the arc. Solves a spherical triangle formed by reference point, target point and a projection of target point to a reference great circle arc. """ azimuth_to_target = azimuth(alon, alat, plons, plats) distance_to_target = geodetic_distance(alon, alat, plons, plats) # find an angle between an arc and a great circle arc connecting # arc's reference point and a target point t_angle = (azimuth_to_target - aazimuth + 360) % 360 # in a spherical right triangle cosine of the angle of a cathetus # augmented to pi/2 is equal to sine of an opposite angle times # sine of hypotenuse, see # http://en.wikipedia.org/wiki/Spherical_trigonometry#Napier.27s_Pentagon angle = numpy.arccos( (numpy.sin(numpy.radians(t_angle)) * numpy.sin(distance_to_target / EARTH_RADIUS)) ) return (numpy.pi / 2 - angle) * EARTH_RADIUS
[ "def", "distance_to_arc", "(", "alon", ",", "alat", ",", "aazimuth", ",", "plons", ",", "plats", ")", ":", "azimuth_to_target", "=", "azimuth", "(", "alon", ",", "alat", ",", "plons", ",", "plats", ")", "distance_to_target", "=", "geodetic_distance", "(", "alon", ",", "alat", ",", "plons", ",", "plats", ")", "# find an angle between an arc and a great circle arc connecting", "# arc's reference point and a target point", "t_angle", "=", "(", "azimuth_to_target", "-", "aazimuth", "+", "360", ")", "%", "360", "# in a spherical right triangle cosine of the angle of a cathetus", "# augmented to pi/2 is equal to sine of an opposite angle times", "# sine of hypotenuse, see", "# http://en.wikipedia.org/wiki/Spherical_trigonometry#Napier.27s_Pentagon", "angle", "=", "numpy", ".", "arccos", "(", "(", "numpy", ".", "sin", "(", "numpy", ".", "radians", "(", "t_angle", ")", ")", "*", "numpy", ".", "sin", "(", "distance_to_target", "/", "EARTH_RADIUS", ")", ")", ")", "return", "(", "numpy", ".", "pi", "/", "2", "-", "angle", ")", "*", "EARTH_RADIUS" ]
Calculate a closest distance between a great circle arc and a point (or a collection of points). :param float alon, alat: Arc reference point longitude and latitude, in decimal degrees. :param azimuth: Arc azimuth (an angle between direction to a north and arc in clockwise direction), measured in a reference point, in decimal degrees. :param float plons, plats: Longitudes and latitudes of points to measure distance. Either scalar values or numpy arrays of decimal degrees. :returns: Distance in km, a scalar value or numpy array depending on ``plons`` and ``plats``. A distance is negative if the target point lies on the right hand side of the arc. Solves a spherical triangle formed by reference point, target point and a projection of target point to a reference great circle arc.
[ "Calculate", "a", "closest", "distance", "between", "a", "great", "circle", "arc", "and", "a", "point", "(", "or", "a", "collection", "of", "points", ")", "." ]
python
train
albertyw/pyziptax
pyziptax/ziptax.py
https://github.com/albertyw/pyziptax/blob/c56dd440e4cadff7f2dd4b72e5dcced06a44969d/pyziptax/ziptax.py#L36-L44
def make_request_data(self, zipcode, city, state): """ Make the request params given location data """ data = {'key': self.api_key, 'postalcode': str(zipcode), 'city': city, 'state': state } data = ZipTaxClient._clean_request_data(data) return data
[ "def", "make_request_data", "(", "self", ",", "zipcode", ",", "city", ",", "state", ")", ":", "data", "=", "{", "'key'", ":", "self", ".", "api_key", ",", "'postalcode'", ":", "str", "(", "zipcode", ")", ",", "'city'", ":", "city", ",", "'state'", ":", "state", "}", "data", "=", "ZipTaxClient", ".", "_clean_request_data", "(", "data", ")", "return", "data" ]
Make the request params given location data
[ "Make", "the", "request", "params", "given", "location", "data" ]
python
valid
ThreatConnect-Inc/tcex
tcex/tcex_bin_run.py
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_bin_run.py#L661-L682
def profile(self, profile): """Set the current profile. Args: profile (dict): The profile data. """ # clear staging data self._staging_data = None # retrieve language from install.json or assume Python lang = profile.get('install_json', {}).get('programLanguage', 'PYTHON') # load instance of ArgBuilder profile_args = ArgBuilder(lang, self.profile_args(profile.get('args'))) # set current profile self._profile = profile # attach instance to current profile self._profile['profile_args'] = profile_args # load tcex module after current profile is set self.load_tcex() # select report for current profile self.reports.profile(profile.get('profile_name')) # create required directories for tcrun to function self._create_tc_dirs()
[ "def", "profile", "(", "self", ",", "profile", ")", ":", "# clear staging data", "self", ".", "_staging_data", "=", "None", "# retrieve language from install.json or assume Python", "lang", "=", "profile", ".", "get", "(", "'install_json'", ",", "{", "}", ")", ".", "get", "(", "'programLanguage'", ",", "'PYTHON'", ")", "# load instance of ArgBuilder", "profile_args", "=", "ArgBuilder", "(", "lang", ",", "self", ".", "profile_args", "(", "profile", ".", "get", "(", "'args'", ")", ")", ")", "# set current profile", "self", ".", "_profile", "=", "profile", "# attach instance to current profile", "self", ".", "_profile", "[", "'profile_args'", "]", "=", "profile_args", "# load tcex module after current profile is set", "self", ".", "load_tcex", "(", ")", "# select report for current profile", "self", ".", "reports", ".", "profile", "(", "profile", ".", "get", "(", "'profile_name'", ")", ")", "# create required directories for tcrun to function", "self", ".", "_create_tc_dirs", "(", ")" ]
Set the current profile. Args: profile (dict): The profile data.
[ "Set", "the", "current", "profile", "." ]
python
train
google/textfsm
textfsm/parser.py
https://github.com/google/textfsm/blob/63a2aaece33e07947aa80963dca99b893964633b/textfsm/parser.py#L814-L859
def _ValidateFSM(self): """Checks state names and destinations for validity. Each destination state must exist, be a valid name and not be a reserved name. There must be a 'Start' state and if 'EOF' or 'End' states are specified, they must be empty. Returns: True if FSM is valid. Raises: TextFSMTemplateError: If any state definitions are invalid. """ # Must have 'Start' state. if 'Start' not in self.states: raise TextFSMTemplateError("Missing state 'Start'.") # 'End/EOF' state (if specified) must be empty. if self.states.get('End'): raise TextFSMTemplateError("Non-Empty 'End' state.") if self.states.get('EOF'): raise TextFSMTemplateError("Non-Empty 'EOF' state.") # Remove 'End' state. if 'End' in self.states: del self.states['End'] self.state_list.remove('End') # Ensure jump states are all valid. for state in self.states: for rule in self.states[state]: if rule.line_op == 'Error': continue if not rule.new_state or rule.new_state in ('End', 'EOF'): continue if rule.new_state not in self.states: raise TextFSMTemplateError( "State '%s' not found, referenced in state '%s'" % (rule.new_state, state)) return True
[ "def", "_ValidateFSM", "(", "self", ")", ":", "# Must have 'Start' state.", "if", "'Start'", "not", "in", "self", ".", "states", ":", "raise", "TextFSMTemplateError", "(", "\"Missing state 'Start'.\"", ")", "# 'End/EOF' state (if specified) must be empty.", "if", "self", ".", "states", ".", "get", "(", "'End'", ")", ":", "raise", "TextFSMTemplateError", "(", "\"Non-Empty 'End' state.\"", ")", "if", "self", ".", "states", ".", "get", "(", "'EOF'", ")", ":", "raise", "TextFSMTemplateError", "(", "\"Non-Empty 'EOF' state.\"", ")", "# Remove 'End' state.", "if", "'End'", "in", "self", ".", "states", ":", "del", "self", ".", "states", "[", "'End'", "]", "self", ".", "state_list", ".", "remove", "(", "'End'", ")", "# Ensure jump states are all valid.", "for", "state", "in", "self", ".", "states", ":", "for", "rule", "in", "self", ".", "states", "[", "state", "]", ":", "if", "rule", ".", "line_op", "==", "'Error'", ":", "continue", "if", "not", "rule", ".", "new_state", "or", "rule", ".", "new_state", "in", "(", "'End'", ",", "'EOF'", ")", ":", "continue", "if", "rule", ".", "new_state", "not", "in", "self", ".", "states", ":", "raise", "TextFSMTemplateError", "(", "\"State '%s' not found, referenced in state '%s'\"", "%", "(", "rule", ".", "new_state", ",", "state", ")", ")", "return", "True" ]
Checks state names and destinations for validity. Each destination state must exist, be a valid name and not be a reserved name. There must be a 'Start' state and if 'EOF' or 'End' states are specified, they must be empty. Returns: True if FSM is valid. Raises: TextFSMTemplateError: If any state definitions are invalid.
[ "Checks", "state", "names", "and", "destinations", "for", "validity", "." ]
python
train
trailofbits/manticore
manticore/native/cpu/arm.py
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/native/cpu/arm.py#L270-L280
def _write_APSR(self, apsr): """Auxiliary function - Writes flags from a full APSR (only 4 msb used)""" V = Operators.EXTRACT(apsr, 28, 1) C = Operators.EXTRACT(apsr, 29, 1) Z = Operators.EXTRACT(apsr, 30, 1) N = Operators.EXTRACT(apsr, 31, 1) self.write('APSR_V', V) self.write('APSR_C', C) self.write('APSR_Z', Z) self.write('APSR_N', N)
[ "def", "_write_APSR", "(", "self", ",", "apsr", ")", ":", "V", "=", "Operators", ".", "EXTRACT", "(", "apsr", ",", "28", ",", "1", ")", "C", "=", "Operators", ".", "EXTRACT", "(", "apsr", ",", "29", ",", "1", ")", "Z", "=", "Operators", ".", "EXTRACT", "(", "apsr", ",", "30", ",", "1", ")", "N", "=", "Operators", ".", "EXTRACT", "(", "apsr", ",", "31", ",", "1", ")", "self", ".", "write", "(", "'APSR_V'", ",", "V", ")", "self", ".", "write", "(", "'APSR_C'", ",", "C", ")", "self", ".", "write", "(", "'APSR_Z'", ",", "Z", ")", "self", ".", "write", "(", "'APSR_N'", ",", "N", ")" ]
Auxiliary function - Writes flags from a full APSR (only 4 msb used)
[ "Auxiliary", "function", "-", "Writes", "flags", "from", "a", "full", "APSR", "(", "only", "4", "msb", "used", ")" ]
python
valid
openego/eDisGo
edisgo/grid/network.py
https://github.com/openego/eDisGo/blob/e6245bdaf236f9c49dbda5a18c1c458290f41e2b/edisgo/grid/network.py#L142-L182
def plot_mv_grid_expansion_costs(self, **kwargs): """ Plots costs per MV line. For more information see :func:`edisgo.tools.plots.mv_grid_topology`. """ if self.network.pypsa is not None and \ self.network.results.grid_expansion_costs is not None: if isinstance(self, EDisGo): # convert index of grid expansion costs to str grid_expansion_costs = \ self.network.results.grid_expansion_costs.reset_index() grid_expansion_costs['index'] = \ grid_expansion_costs['index'].apply(lambda _: repr(_)) grid_expansion_costs.set_index('index', inplace=True) else: grid_expansion_costs = \ self.network.results.grid_expansion_costs plots.mv_grid_topology( self.network.pypsa, self.network.config, line_color='expansion_costs', grid_expansion_costs=grid_expansion_costs, filename=kwargs.get('filename', None), grid_district_geom=kwargs.get('grid_district_geom', True), background_map=kwargs.get('background_map', True), limits_cb_lines=kwargs.get('limits_cb_lines', None), xlim=kwargs.get('xlim', None), ylim=kwargs.get('ylim', None), lines_cmap=kwargs.get('lines_cmap', 'inferno_r'), title=kwargs.get('title', ''), scaling_factor_line_width=kwargs.get( 'scaling_factor_line_width', None) ) else: if self.network.pypsa is None: logging.warning("pypsa representation of MV grid needed to " "plot grid expansion costs.") if self.network.results.grid_expansion_costs is None: logging.warning("Grid expansion cost results needed to plot " "them.")
[ "def", "plot_mv_grid_expansion_costs", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "network", ".", "pypsa", "is", "not", "None", "and", "self", ".", "network", ".", "results", ".", "grid_expansion_costs", "is", "not", "None", ":", "if", "isinstance", "(", "self", ",", "EDisGo", ")", ":", "# convert index of grid expansion costs to str", "grid_expansion_costs", "=", "self", ".", "network", ".", "results", ".", "grid_expansion_costs", ".", "reset_index", "(", ")", "grid_expansion_costs", "[", "'index'", "]", "=", "grid_expansion_costs", "[", "'index'", "]", ".", "apply", "(", "lambda", "_", ":", "repr", "(", "_", ")", ")", "grid_expansion_costs", ".", "set_index", "(", "'index'", ",", "inplace", "=", "True", ")", "else", ":", "grid_expansion_costs", "=", "self", ".", "network", ".", "results", ".", "grid_expansion_costs", "plots", ".", "mv_grid_topology", "(", "self", ".", "network", ".", "pypsa", ",", "self", ".", "network", ".", "config", ",", "line_color", "=", "'expansion_costs'", ",", "grid_expansion_costs", "=", "grid_expansion_costs", ",", "filename", "=", "kwargs", ".", "get", "(", "'filename'", ",", "None", ")", ",", "grid_district_geom", "=", "kwargs", ".", "get", "(", "'grid_district_geom'", ",", "True", ")", ",", "background_map", "=", "kwargs", ".", "get", "(", "'background_map'", ",", "True", ")", ",", "limits_cb_lines", "=", "kwargs", ".", "get", "(", "'limits_cb_lines'", ",", "None", ")", ",", "xlim", "=", "kwargs", ".", "get", "(", "'xlim'", ",", "None", ")", ",", "ylim", "=", "kwargs", ".", "get", "(", "'ylim'", ",", "None", ")", ",", "lines_cmap", "=", "kwargs", ".", "get", "(", "'lines_cmap'", ",", "'inferno_r'", ")", ",", "title", "=", "kwargs", ".", "get", "(", "'title'", ",", "''", ")", ",", "scaling_factor_line_width", "=", "kwargs", ".", "get", "(", "'scaling_factor_line_width'", ",", "None", ")", ")", "else", ":", "if", "self", ".", "network", ".", "pypsa", "is", "None", ":", "logging", ".", "warning", "(", "\"pypsa representation of MV grid needed to \"", "\"plot grid expansion costs.\"", ")", "if", "self", ".", "network", ".", "results", ".", "grid_expansion_costs", "is", "None", ":", "logging", ".", "warning", "(", "\"Grid expansion cost results needed to plot \"", "\"them.\"", ")" ]
Plots costs per MV line. For more information see :func:`edisgo.tools.plots.mv_grid_topology`.
[ "Plots", "costs", "per", "MV", "line", "." ]
python
train
alfred82santa/dirty-models
dirty_models/base.py
https://github.com/alfred82santa/dirty-models/blob/354becdb751b21f673515eae928c256c7e923c50/dirty_models/base.py#L61-L70
def is_locked(self): """ Returns whether model is locked """ if not self.__locked__: return False elif self.get_parent(): return self.get_parent().is_locked() return True
[ "def", "is_locked", "(", "self", ")", ":", "if", "not", "self", ".", "__locked__", ":", "return", "False", "elif", "self", ".", "get_parent", "(", ")", ":", "return", "self", ".", "get_parent", "(", ")", ".", "is_locked", "(", ")", "return", "True" ]
Returns whether model is locked
[ "Returns", "whether", "model", "is", "locked" ]
python
train
pantsbuild/pants
src/python/pants/build_graph/target.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/build_graph/target.py#L174-L183
def target_base(self): """ :API: public :returns: the source root path for this target. """ source_root = self._sources_field.source_root if not source_root: raise TargetDefinitionException(self, 'Not under any configured source root.') return source_root.path
[ "def", "target_base", "(", "self", ")", ":", "source_root", "=", "self", ".", "_sources_field", ".", "source_root", "if", "not", "source_root", ":", "raise", "TargetDefinitionException", "(", "self", ",", "'Not under any configured source root.'", ")", "return", "source_root", ".", "path" ]
:API: public :returns: the source root path for this target.
[ ":", "API", ":", "public" ]
python
train
SUSE-Enceladus/ipa
ipa/scripts/cli_utils.py
https://github.com/SUSE-Enceladus/ipa/blob/0845eed0ea25a27dbb059ad1016105fa60002228/ipa/scripts/cli_utils.py#L91-L120
def echo_results(data, no_color, verbose=False): """Print test results in nagios style format.""" try: summary = data['summary'] except KeyError as error: click.secho( 'The results json is missing key: %s' % error, fg='red' ) sys.exit(1) if 'failed' in summary or 'error' in summary: fg = 'red' status = 'FAILED' else: fg = 'green' status = 'PASSED' results = '{} tests={}|pass={}|skip={}|fail={}|error={}'.format( status, str(summary.get('num_tests', 0)), str(summary.get('passed', 0)), str(summary.get('skipped', 0)), str(summary.get('failed', 0)), str(summary.get('error', 0)) ) echo_style(results, no_color, fg=fg) if verbose: echo_verbose_results(data, no_color)
[ "def", "echo_results", "(", "data", ",", "no_color", ",", "verbose", "=", "False", ")", ":", "try", ":", "summary", "=", "data", "[", "'summary'", "]", "except", "KeyError", "as", "error", ":", "click", ".", "secho", "(", "'The results json is missing key: %s'", "%", "error", ",", "fg", "=", "'red'", ")", "sys", ".", "exit", "(", "1", ")", "if", "'failed'", "in", "summary", "or", "'error'", "in", "summary", ":", "fg", "=", "'red'", "status", "=", "'FAILED'", "else", ":", "fg", "=", "'green'", "status", "=", "'PASSED'", "results", "=", "'{} tests={}|pass={}|skip={}|fail={}|error={}'", ".", "format", "(", "status", ",", "str", "(", "summary", ".", "get", "(", "'num_tests'", ",", "0", ")", ")", ",", "str", "(", "summary", ".", "get", "(", "'passed'", ",", "0", ")", ")", ",", "str", "(", "summary", ".", "get", "(", "'skipped'", ",", "0", ")", ")", ",", "str", "(", "summary", ".", "get", "(", "'failed'", ",", "0", ")", ")", ",", "str", "(", "summary", ".", "get", "(", "'error'", ",", "0", ")", ")", ")", "echo_style", "(", "results", ",", "no_color", ",", "fg", "=", "fg", ")", "if", "verbose", ":", "echo_verbose_results", "(", "data", ",", "no_color", ")" ]
Print test results in nagios style format.
[ "Print", "test", "results", "in", "nagios", "style", "format", "." ]
python
train
madmaze/pytesseract
src/pytesseract.py
https://github.com/madmaze/pytesseract/blob/dd7dffc227480e9de024e99a5e10e7664f42b2de/src/pytesseract.py#L281-L292
def get_tesseract_version(): ''' Returns LooseVersion object of the Tesseract version ''' try: return LooseVersion( subprocess.check_output( [tesseract_cmd, '--version'], stderr=subprocess.STDOUT ).decode('utf-8').split()[1].lstrip(string.printable[10:]) ) except OSError: raise TesseractNotFoundError()
[ "def", "get_tesseract_version", "(", ")", ":", "try", ":", "return", "LooseVersion", "(", "subprocess", ".", "check_output", "(", "[", "tesseract_cmd", ",", "'--version'", "]", ",", "stderr", "=", "subprocess", ".", "STDOUT", ")", ".", "decode", "(", "'utf-8'", ")", ".", "split", "(", ")", "[", "1", "]", ".", "lstrip", "(", "string", ".", "printable", "[", "10", ":", "]", ")", ")", "except", "OSError", ":", "raise", "TesseractNotFoundError", "(", ")" ]
Returns LooseVersion object of the Tesseract version
[ "Returns", "LooseVersion", "object", "of", "the", "Tesseract", "version" ]
python
train
BerkeleyAutomation/autolab_core
autolab_core/utils.py
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/utils.py#L64-L102
def histogram(values, num_bins, bounds, normalized=True, plot=False, color='b'): """Generate a histogram plot. Parameters ---------- values : :obj:`numpy.ndarray` An array of values to put in the histogram. num_bins : int The number equal-width bins in the histogram. bounds : :obj:`tuple` of float Two floats - a min and a max - that define the lower and upper ranges of the histogram, respectively. normalized : bool If True, the bins will show the percentage of elements they contain rather than raw counts. plot : bool If True, this function uses pyplot to plot the histogram. color : :obj:`str` The color identifier for the plotted bins. Returns ------- :obj:`tuple of `:obj:`numpy.ndarray` The values of the histogram and the bin edges as ndarrays. """ hist, bins = np.histogram(values, bins=num_bins, range=bounds) width = (bins[1] - bins[0]) if normalized: if np.sum(hist) > 0: hist = hist.astype(np.float32) / np.sum(hist) if plot: import matplotlib.pyplot as plt plt.bar(bins[:-1], hist, width=width, color=color) return hist, bins
[ "def", "histogram", "(", "values", ",", "num_bins", ",", "bounds", ",", "normalized", "=", "True", ",", "plot", "=", "False", ",", "color", "=", "'b'", ")", ":", "hist", ",", "bins", "=", "np", ".", "histogram", "(", "values", ",", "bins", "=", "num_bins", ",", "range", "=", "bounds", ")", "width", "=", "(", "bins", "[", "1", "]", "-", "bins", "[", "0", "]", ")", "if", "normalized", ":", "if", "np", ".", "sum", "(", "hist", ")", ">", "0", ":", "hist", "=", "hist", ".", "astype", "(", "np", ".", "float32", ")", "/", "np", ".", "sum", "(", "hist", ")", "if", "plot", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "plt", ".", "bar", "(", "bins", "[", ":", "-", "1", "]", ",", "hist", ",", "width", "=", "width", ",", "color", "=", "color", ")", "return", "hist", ",", "bins" ]
Generate a histogram plot. Parameters ---------- values : :obj:`numpy.ndarray` An array of values to put in the histogram. num_bins : int The number equal-width bins in the histogram. bounds : :obj:`tuple` of float Two floats - a min and a max - that define the lower and upper ranges of the histogram, respectively. normalized : bool If True, the bins will show the percentage of elements they contain rather than raw counts. plot : bool If True, this function uses pyplot to plot the histogram. color : :obj:`str` The color identifier for the plotted bins. Returns ------- :obj:`tuple of `:obj:`numpy.ndarray` The values of the histogram and the bin edges as ndarrays.
[ "Generate", "a", "histogram", "plot", "." ]
python
train
mabuchilab/QNET
src/qnet/algebra/core/abstract_algebra.py
https://github.com/mabuchilab/QNET/blob/cc20d26dad78691d34c67173e5cd67dcac94208a/src/qnet/algebra/core/abstract_algebra.py#L298-L320
def del_rules(cls, *names, attr=None): """Delete algebraic rules used by :meth:`create` Remove the rules with the given `names`, or all rules if no names are given Args: names (str): Names of rules to delete attr (None or str): Name of the class attribute from which to delete the rules. Cf. :meth:`add_rule`. Raises: KeyError: If any rules in `names` does not exist AttributeError: If invalid `attr` """ if attr is None: attr = cls._rules_attr() if len(names) == 0: getattr(cls, attr) # raise AttributeError if wrong attr setattr(cls, attr, OrderedDict()) else: for name in names: del getattr(cls, attr)[name]
[ "def", "del_rules", "(", "cls", ",", "*", "names", ",", "attr", "=", "None", ")", ":", "if", "attr", "is", "None", ":", "attr", "=", "cls", ".", "_rules_attr", "(", ")", "if", "len", "(", "names", ")", "==", "0", ":", "getattr", "(", "cls", ",", "attr", ")", "# raise AttributeError if wrong attr", "setattr", "(", "cls", ",", "attr", ",", "OrderedDict", "(", ")", ")", "else", ":", "for", "name", "in", "names", ":", "del", "getattr", "(", "cls", ",", "attr", ")", "[", "name", "]" ]
Delete algebraic rules used by :meth:`create` Remove the rules with the given `names`, or all rules if no names are given Args: names (str): Names of rules to delete attr (None or str): Name of the class attribute from which to delete the rules. Cf. :meth:`add_rule`. Raises: KeyError: If any rules in `names` does not exist AttributeError: If invalid `attr`
[ "Delete", "algebraic", "rules", "used", "by", ":", "meth", ":", "create" ]
python
train
andrewsnowden/dota2py
dota2py/parser.py
https://github.com/andrewsnowden/dota2py/blob/67637f4b9c160ea90c11b7e81545baf350affa7a/dota2py/parser.py#L262-L280
def parse_game_event(self, event): """ So CSVCMsg_GameEventList is a list of all events that can happen. A game event has an eventid which maps to a type of event that happened """ if event.eventid in self.event_lookup: #Bash this into a nicer data format to work with event_type = self.event_lookup[event.eventid] ge = GameEvent(event_type.name) for i, key in enumerate(event.keys): key_type = event_type.keys[i] ge.keys[key_type.name] = getattr(key, KEY_DATA_TYPES[key.type]) self.debug("|==========> %s" % (ge, )) self.run_hooks(ge)
[ "def", "parse_game_event", "(", "self", ",", "event", ")", ":", "if", "event", ".", "eventid", "in", "self", ".", "event_lookup", ":", "#Bash this into a nicer data format to work with", "event_type", "=", "self", ".", "event_lookup", "[", "event", ".", "eventid", "]", "ge", "=", "GameEvent", "(", "event_type", ".", "name", ")", "for", "i", ",", "key", "in", "enumerate", "(", "event", ".", "keys", ")", ":", "key_type", "=", "event_type", ".", "keys", "[", "i", "]", "ge", ".", "keys", "[", "key_type", ".", "name", "]", "=", "getattr", "(", "key", ",", "KEY_DATA_TYPES", "[", "key", ".", "type", "]", ")", "self", ".", "debug", "(", "\"|==========> %s\"", "%", "(", "ge", ",", ")", ")", "self", ".", "run_hooks", "(", "ge", ")" ]
So CSVCMsg_GameEventList is a list of all events that can happen. A game event has an eventid which maps to a type of event that happened
[ "So", "CSVCMsg_GameEventList", "is", "a", "list", "of", "all", "events", "that", "can", "happen", ".", "A", "game", "event", "has", "an", "eventid", "which", "maps", "to", "a", "type", "of", "event", "that", "happened" ]
python
train
senaite/senaite.core
bika/lims/content/worksheet.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/content/worksheet.py#L1166-L1174
def getQCAnalyses(self): """ Return the Quality Control analyses. :returns: a list of QC analyses :rtype: List of ReferenceAnalysis/DuplicateAnalysis """ qc_types = ['ReferenceAnalysis', 'DuplicateAnalysis'] analyses = self.getAnalyses() return [a for a in analyses if a.portal_type in qc_types]
[ "def", "getQCAnalyses", "(", "self", ")", ":", "qc_types", "=", "[", "'ReferenceAnalysis'", ",", "'DuplicateAnalysis'", "]", "analyses", "=", "self", ".", "getAnalyses", "(", ")", "return", "[", "a", "for", "a", "in", "analyses", "if", "a", ".", "portal_type", "in", "qc_types", "]" ]
Return the Quality Control analyses. :returns: a list of QC analyses :rtype: List of ReferenceAnalysis/DuplicateAnalysis
[ "Return", "the", "Quality", "Control", "analyses", ".", ":", "returns", ":", "a", "list", "of", "QC", "analyses", ":", "rtype", ":", "List", "of", "ReferenceAnalysis", "/", "DuplicateAnalysis" ]
python
train
HydrelioxGitHub/pybbox
pybbox/__init__.py
https://github.com/HydrelioxGitHub/pybbox/blob/bedcdccab5d18d36890ef8bf414845f2dec18b5c/pybbox/__init__.py#L245-L254
def get_down_used_bandwith(self): """ Return a percentage of the current used xdsl download bandwith Instant measure, can be very different from one call to another :return: 0 no bandwith is used, 100 all your bandwith is used :rtype: int """ ip_stats_up = self.get_ip_stats()['rx'] percent = ip_stats_up['bandwidth']*100/ip_stats_up['maxBandwidth'] return int(percent)
[ "def", "get_down_used_bandwith", "(", "self", ")", ":", "ip_stats_up", "=", "self", ".", "get_ip_stats", "(", ")", "[", "'rx'", "]", "percent", "=", "ip_stats_up", "[", "'bandwidth'", "]", "*", "100", "/", "ip_stats_up", "[", "'maxBandwidth'", "]", "return", "int", "(", "percent", ")" ]
Return a percentage of the current used xdsl download bandwith Instant measure, can be very different from one call to another :return: 0 no bandwith is used, 100 all your bandwith is used :rtype: int
[ "Return", "a", "percentage", "of", "the", "current", "used", "xdsl", "download", "bandwith", "Instant", "measure", "can", "be", "very", "different", "from", "one", "call", "to", "another", ":", "return", ":", "0", "no", "bandwith", "is", "used", "100", "all", "your", "bandwith", "is", "used", ":", "rtype", ":", "int" ]
python
train
apache/incubator-superset
superset/utils/core.py
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/utils/core.py#L403-L423
def error_msg_from_exception(e): """Translate exception into error message Database have different ways to handle exception. This function attempts to make sense of the exception object and construct a human readable sentence. TODO(bkyryliuk): parse the Presto error message from the connection created via create_engine. engine = create_engine('presto://localhost:3506/silver') - gives an e.message as the str(dict) presto.connect('localhost', port=3506, catalog='silver') - as a dict. The latter version is parsed correctly by this function. """ msg = '' if hasattr(e, 'message'): if isinstance(e.message, dict): msg = e.message.get('message') elif e.message: msg = '{}'.format(e.message) return msg or '{}'.format(e)
[ "def", "error_msg_from_exception", "(", "e", ")", ":", "msg", "=", "''", "if", "hasattr", "(", "e", ",", "'message'", ")", ":", "if", "isinstance", "(", "e", ".", "message", ",", "dict", ")", ":", "msg", "=", "e", ".", "message", ".", "get", "(", "'message'", ")", "elif", "e", ".", "message", ":", "msg", "=", "'{}'", ".", "format", "(", "e", ".", "message", ")", "return", "msg", "or", "'{}'", ".", "format", "(", "e", ")" ]
Translate exception into error message Database have different ways to handle exception. This function attempts to make sense of the exception object and construct a human readable sentence. TODO(bkyryliuk): parse the Presto error message from the connection created via create_engine. engine = create_engine('presto://localhost:3506/silver') - gives an e.message as the str(dict) presto.connect('localhost', port=3506, catalog='silver') - as a dict. The latter version is parsed correctly by this function.
[ "Translate", "exception", "into", "error", "message" ]
python
train
SheffieldML/GPy
GPy/core/symbolic.py
https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/core/symbolic.py#L371-L377
def _print_code(self, code): """Prepare code for string writing.""" # This needs a rewrite --- it doesn't check for match clashes! So sub11 would be replaced by sub1 before being replaced with sub11!! for key in self.variables.keys(): for arg in self.variables[key]: code = code.replace(arg.name, 'self.'+arg.name) return code
[ "def", "_print_code", "(", "self", ",", "code", ")", ":", "# This needs a rewrite --- it doesn't check for match clashes! So sub11 would be replaced by sub1 before being replaced with sub11!!", "for", "key", "in", "self", ".", "variables", ".", "keys", "(", ")", ":", "for", "arg", "in", "self", ".", "variables", "[", "key", "]", ":", "code", "=", "code", ".", "replace", "(", "arg", ".", "name", ",", "'self.'", "+", "arg", ".", "name", ")", "return", "code" ]
Prepare code for string writing.
[ "Prepare", "code", "for", "string", "writing", "." ]
python
train
tethysplatform/condorpy
condorpy/job.py
https://github.com/tethysplatform/condorpy/blob/a5aaaef0d73198f7d9756dda7abe98b4e209f1f4/condorpy/job.py#L173-L180
def job_file(self): """The path to the submit description file representing this job. """ job_file_name = '%s.job' % (self.name) job_file_path = os.path.join(self.initial_dir, job_file_name) self._job_file = job_file_path return self._job_file
[ "def", "job_file", "(", "self", ")", ":", "job_file_name", "=", "'%s.job'", "%", "(", "self", ".", "name", ")", "job_file_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "initial_dir", ",", "job_file_name", ")", "self", ".", "_job_file", "=", "job_file_path", "return", "self", ".", "_job_file" ]
The path to the submit description file representing this job.
[ "The", "path", "to", "the", "submit", "description", "file", "representing", "this", "job", "." ]
python
train
pgmpy/pgmpy
pgmpy/models/MarkovChain.py
https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/models/MarkovChain.py#L236-L294
def sample(self, start_state=None, size=1): """ Sample from the Markov Chain. Parameters: ----------- start_state: dict or array-like iterable Representing the starting states of the variables. If None is passed, a random start_state is chosen. size: int Number of samples to be generated. Return Type: ------------ pandas.DataFrame Examples: --------- >>> from pgmpy.models import MarkovChain as MC >>> from pgmpy.factors.discrete import State >>> model = MC(['intel', 'diff'], [2, 3]) >>> model.set_start_state([State('intel', 0), State('diff', 2)]) >>> intel_tm = {0: {0: 0.25, 1: 0.75}, 1: {0: 0.5, 1: 0.5}} >>> model.add_transition_model('intel', intel_tm) >>> diff_tm = {0: {0: 0.1, 1: 0.5, 2: 0.4}, 1: {0: 0.2, 1: 0.2, 2: 0.6 }, 2: {0: 0.7, 1: 0.15, 2: 0.15}} >>> model.add_transition_model('diff', diff_tm) >>> model.sample(size=5) intel diff 0 0 2 1 1 0 2 0 1 3 1 0 4 0 2 """ if start_state is None: if self.state is None: self.state = self.random_state() # else use previously-set state else: self.set_start_state(start_state) sampled = DataFrame(index=range(size), columns=self.variables) sampled.loc[0] = [st for var, st in self.state] var_states = defaultdict(dict) var_values = defaultdict(dict) samples = defaultdict(dict) for var in self.transition_models.keys(): for st in self.transition_models[var]: var_states[var][st] = list(self.transition_models[var][st].keys()) var_values[var][st] = list(self.transition_models[var][st].values()) samples[var][st] = sample_discrete(var_states[var][st], var_values[var][st], size=size) for i in range(size - 1): for j, (var, st) in enumerate(self.state): next_st = samples[var][st][i] self.state[j] = State(var, next_st) sampled.loc[i + 1] = [st for var, st in self.state] return sampled
[ "def", "sample", "(", "self", ",", "start_state", "=", "None", ",", "size", "=", "1", ")", ":", "if", "start_state", "is", "None", ":", "if", "self", ".", "state", "is", "None", ":", "self", ".", "state", "=", "self", ".", "random_state", "(", ")", "# else use previously-set state", "else", ":", "self", ".", "set_start_state", "(", "start_state", ")", "sampled", "=", "DataFrame", "(", "index", "=", "range", "(", "size", ")", ",", "columns", "=", "self", ".", "variables", ")", "sampled", ".", "loc", "[", "0", "]", "=", "[", "st", "for", "var", ",", "st", "in", "self", ".", "state", "]", "var_states", "=", "defaultdict", "(", "dict", ")", "var_values", "=", "defaultdict", "(", "dict", ")", "samples", "=", "defaultdict", "(", "dict", ")", "for", "var", "in", "self", ".", "transition_models", ".", "keys", "(", ")", ":", "for", "st", "in", "self", ".", "transition_models", "[", "var", "]", ":", "var_states", "[", "var", "]", "[", "st", "]", "=", "list", "(", "self", ".", "transition_models", "[", "var", "]", "[", "st", "]", ".", "keys", "(", ")", ")", "var_values", "[", "var", "]", "[", "st", "]", "=", "list", "(", "self", ".", "transition_models", "[", "var", "]", "[", "st", "]", ".", "values", "(", ")", ")", "samples", "[", "var", "]", "[", "st", "]", "=", "sample_discrete", "(", "var_states", "[", "var", "]", "[", "st", "]", ",", "var_values", "[", "var", "]", "[", "st", "]", ",", "size", "=", "size", ")", "for", "i", "in", "range", "(", "size", "-", "1", ")", ":", "for", "j", ",", "(", "var", ",", "st", ")", "in", "enumerate", "(", "self", ".", "state", ")", ":", "next_st", "=", "samples", "[", "var", "]", "[", "st", "]", "[", "i", "]", "self", ".", "state", "[", "j", "]", "=", "State", "(", "var", ",", "next_st", ")", "sampled", ".", "loc", "[", "i", "+", "1", "]", "=", "[", "st", "for", "var", ",", "st", "in", "self", ".", "state", "]", "return", "sampled" ]
Sample from the Markov Chain. Parameters: ----------- start_state: dict or array-like iterable Representing the starting states of the variables. If None is passed, a random start_state is chosen. size: int Number of samples to be generated. Return Type: ------------ pandas.DataFrame Examples: --------- >>> from pgmpy.models import MarkovChain as MC >>> from pgmpy.factors.discrete import State >>> model = MC(['intel', 'diff'], [2, 3]) >>> model.set_start_state([State('intel', 0), State('diff', 2)]) >>> intel_tm = {0: {0: 0.25, 1: 0.75}, 1: {0: 0.5, 1: 0.5}} >>> model.add_transition_model('intel', intel_tm) >>> diff_tm = {0: {0: 0.1, 1: 0.5, 2: 0.4}, 1: {0: 0.2, 1: 0.2, 2: 0.6 }, 2: {0: 0.7, 1: 0.15, 2: 0.15}} >>> model.add_transition_model('diff', diff_tm) >>> model.sample(size=5) intel diff 0 0 2 1 1 0 2 0 1 3 1 0 4 0 2
[ "Sample", "from", "the", "Markov", "Chain", "." ]
python
train
pypa/setuptools
setuptools/command/build_py.py
https://github.com/pypa/setuptools/blob/83c667e0b2a98193851c07115d1af65011ed0fb6/setuptools/command/build_py.py#L220-L235
def _get_platform_patterns(spec, package, src_dir): """ yield platform-specific path patterns (suitable for glob or fn_match) from a glob-based spec (such as self.package_data or self.exclude_package_data) matching package in src_dir. """ raw_patterns = itertools.chain( spec.get('', []), spec.get(package, []), ) return ( # Each pattern has to be converted to a platform-specific path os.path.join(src_dir, convert_path(pattern)) for pattern in raw_patterns )
[ "def", "_get_platform_patterns", "(", "spec", ",", "package", ",", "src_dir", ")", ":", "raw_patterns", "=", "itertools", ".", "chain", "(", "spec", ".", "get", "(", "''", ",", "[", "]", ")", ",", "spec", ".", "get", "(", "package", ",", "[", "]", ")", ",", ")", "return", "(", "# Each pattern has to be converted to a platform-specific path", "os", ".", "path", ".", "join", "(", "src_dir", ",", "convert_path", "(", "pattern", ")", ")", "for", "pattern", "in", "raw_patterns", ")" ]
yield platform-specific path patterns (suitable for glob or fn_match) from a glob-based spec (such as self.package_data or self.exclude_package_data) matching package in src_dir.
[ "yield", "platform", "-", "specific", "path", "patterns", "(", "suitable", "for", "glob", "or", "fn_match", ")", "from", "a", "glob", "-", "based", "spec", "(", "such", "as", "self", ".", "package_data", "or", "self", ".", "exclude_package_data", ")", "matching", "package", "in", "src_dir", "." ]
python
train
molmod/molmod
molmod/transformations.py
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/transformations.py#L379-L383
def inv(self): """The inverse transformation""" result = Complete(self.r.transpose(), np.dot(self.r.transpose(), -self.t)) result._cache_inv = self return result
[ "def", "inv", "(", "self", ")", ":", "result", "=", "Complete", "(", "self", ".", "r", ".", "transpose", "(", ")", ",", "np", ".", "dot", "(", "self", ".", "r", ".", "transpose", "(", ")", ",", "-", "self", ".", "t", ")", ")", "result", ".", "_cache_inv", "=", "self", "return", "result" ]
The inverse transformation
[ "The", "inverse", "transformation" ]
python
train
OCA/openupgradelib
openupgradelib/openupgrade_70.py
https://github.com/OCA/openupgradelib/blob/b220b6498075d62c1b64073cc934513a465cfd85/openupgradelib/openupgrade_70.py#L29-L53
def set_partner_id_from_partner_address_id( cr, pool, model_name, partner_field, address_field, table=None): """ Set the new partner_id on any table with migrated contact ids :param model_name: the model name of the target table :param partner_field: the column in the target model's table \ that will store the new partner when found :param address_field: the legacy field in the model's table \ that contains the old address in the model's table :param table: override the target model's table name in case it was renamed :returns: nothing """ model = pool.get(model_name) table = table or model._table openupgrade.logged_query( cr, """ UPDATE %(table)s SET %(partner_field)s=address.openupgrade_7_migrated_to_partner_id FROM res_partner_address address WHERE %(table)s.%(address_field)s=address.id """ % {'table': table, 'partner_field': partner_field, 'address_field': address_field})
[ "def", "set_partner_id_from_partner_address_id", "(", "cr", ",", "pool", ",", "model_name", ",", "partner_field", ",", "address_field", ",", "table", "=", "None", ")", ":", "model", "=", "pool", ".", "get", "(", "model_name", ")", "table", "=", "table", "or", "model", ".", "_table", "openupgrade", ".", "logged_query", "(", "cr", ",", "\"\"\"\n UPDATE %(table)s\n SET %(partner_field)s=address.openupgrade_7_migrated_to_partner_id\n FROM res_partner_address address\n WHERE %(table)s.%(address_field)s=address.id\n \"\"\"", "%", "{", "'table'", ":", "table", ",", "'partner_field'", ":", "partner_field", ",", "'address_field'", ":", "address_field", "}", ")" ]
Set the new partner_id on any table with migrated contact ids :param model_name: the model name of the target table :param partner_field: the column in the target model's table \ that will store the new partner when found :param address_field: the legacy field in the model's table \ that contains the old address in the model's table :param table: override the target model's table name in case it was renamed :returns: nothing
[ "Set", "the", "new", "partner_id", "on", "any", "table", "with", "migrated", "contact", "ids" ]
python
train
chrisspen/burlap
burlap/dj.py
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/dj.py#L430-L444
def manage_all(self, *args, **kwargs): """ Runs manage() across all unique site default databases. """ for site, site_data in self.iter_unique_databases(site='all'): if self.verbose: print('-'*80, file=sys.stderr) print('site:', site, file=sys.stderr) if self.env.available_sites_by_host: hostname = self.current_hostname sites_on_host = self.env.available_sites_by_host.get(hostname, []) if sites_on_host and site not in sites_on_host: self.vprint('skipping site:', site, sites_on_host, file=sys.stderr) continue self.manage(*args, **kwargs)
[ "def", "manage_all", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "for", "site", ",", "site_data", "in", "self", ".", "iter_unique_databases", "(", "site", "=", "'all'", ")", ":", "if", "self", ".", "verbose", ":", "print", "(", "'-'", "*", "80", ",", "file", "=", "sys", ".", "stderr", ")", "print", "(", "'site:'", ",", "site", ",", "file", "=", "sys", ".", "stderr", ")", "if", "self", ".", "env", ".", "available_sites_by_host", ":", "hostname", "=", "self", ".", "current_hostname", "sites_on_host", "=", "self", ".", "env", ".", "available_sites_by_host", ".", "get", "(", "hostname", ",", "[", "]", ")", "if", "sites_on_host", "and", "site", "not", "in", "sites_on_host", ":", "self", ".", "vprint", "(", "'skipping site:'", ",", "site", ",", "sites_on_host", ",", "file", "=", "sys", ".", "stderr", ")", "continue", "self", ".", "manage", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Runs manage() across all unique site default databases.
[ "Runs", "manage", "()", "across", "all", "unique", "site", "default", "databases", "." ]
python
valid
nerdvegas/rez
src/rez/resolved_context.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/resolved_context.py#L558-L563
def load(cls, path): """Load a resolved context from file.""" with open(path) as f: context = cls.read_from_buffer(f, path) context.set_load_path(path) return context
[ "def", "load", "(", "cls", ",", "path", ")", ":", "with", "open", "(", "path", ")", "as", "f", ":", "context", "=", "cls", ".", "read_from_buffer", "(", "f", ",", "path", ")", "context", ".", "set_load_path", "(", "path", ")", "return", "context" ]
Load a resolved context from file.
[ "Load", "a", "resolved", "context", "from", "file", "." ]
python
train
lmcinnes/umap
umap/rp_tree.py
https://github.com/lmcinnes/umap/blob/bbb01c03ba49f7bff8f77fd662d00e50d6686c77/umap/rp_tree.py#L596-L605
def max_sparse_hyperplane_size(tree): """Determine the most number on non zeros in a hyperplane entry""" if tree.is_leaf: return 0 else: return max( tree.hyperplane.shape[1], max_sparse_hyperplane_size(tree.left_child), max_sparse_hyperplane_size(tree.right_child), )
[ "def", "max_sparse_hyperplane_size", "(", "tree", ")", ":", "if", "tree", ".", "is_leaf", ":", "return", "0", "else", ":", "return", "max", "(", "tree", ".", "hyperplane", ".", "shape", "[", "1", "]", ",", "max_sparse_hyperplane_size", "(", "tree", ".", "left_child", ")", ",", "max_sparse_hyperplane_size", "(", "tree", ".", "right_child", ")", ",", ")" ]
Determine the most number on non zeros in a hyperplane entry
[ "Determine", "the", "most", "number", "on", "non", "zeros", "in", "a", "hyperplane", "entry" ]
python
train
senaite/senaite.lims
src/senaite/lims/browser/bootstrap/views.py
https://github.com/senaite/senaite.lims/blob/3c7fc7b462321fb354c478c19b5c20f3014fa398/src/senaite/lims/browser/bootstrap/views.py#L83-L91
def getViewportValues(self, view=None): """Determine the value of the viewport meta-tag """ values = { 'width': 'device-width', 'initial-scale': '1.0', } return ','.join('%s=%s' % (k, v) for k, v in values.items())
[ "def", "getViewportValues", "(", "self", ",", "view", "=", "None", ")", ":", "values", "=", "{", "'width'", ":", "'device-width'", ",", "'initial-scale'", ":", "'1.0'", ",", "}", "return", "','", ".", "join", "(", "'%s=%s'", "%", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "values", ".", "items", "(", ")", ")" ]
Determine the value of the viewport meta-tag
[ "Determine", "the", "value", "of", "the", "viewport", "meta", "-", "tag" ]
python
train
ff0000/scarlet
scarlet/cms/item.py
https://github.com/ff0000/scarlet/blob/6c37befd810916a2d7ffff2cdb2dab57bcb6d12e/scarlet/cms/item.py#L442-L455
def render(self, request, **kwargs): """ Renders this view. Adds cancel_url to the context. If the request get parameters contains 'popup' then the `render_type` is set to 'popup'. """ if request.GET.get('popup'): self.render_type = 'popup' kwargs['popup'] = 1 kwargs['cancel_url'] = self.get_cancel_url() if not self.object: kwargs['single_title'] = True return super(FormView, self).render(request, **kwargs)
[ "def", "render", "(", "self", ",", "request", ",", "*", "*", "kwargs", ")", ":", "if", "request", ".", "GET", ".", "get", "(", "'popup'", ")", ":", "self", ".", "render_type", "=", "'popup'", "kwargs", "[", "'popup'", "]", "=", "1", "kwargs", "[", "'cancel_url'", "]", "=", "self", ".", "get_cancel_url", "(", ")", "if", "not", "self", ".", "object", ":", "kwargs", "[", "'single_title'", "]", "=", "True", "return", "super", "(", "FormView", ",", "self", ")", ".", "render", "(", "request", ",", "*", "*", "kwargs", ")" ]
Renders this view. Adds cancel_url to the context. If the request get parameters contains 'popup' then the `render_type` is set to 'popup'.
[ "Renders", "this", "view", ".", "Adds", "cancel_url", "to", "the", "context", ".", "If", "the", "request", "get", "parameters", "contains", "popup", "then", "the", "render_type", "is", "set", "to", "popup", "." ]
python
train
nickmckay/LiPD-utilities
Python/lipd/misc.py
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/misc.py#L522-L532
def print_filename(path): """ Print out lipd filename that is being read or written :param str path: all file metadata :return str: filename """ if os.path.basename(path): return os.path.basename(path) return path
[ "def", "print_filename", "(", "path", ")", ":", "if", "os", ".", "path", ".", "basename", "(", "path", ")", ":", "return", "os", ".", "path", ".", "basename", "(", "path", ")", "return", "path" ]
Print out lipd filename that is being read or written :param str path: all file metadata :return str: filename
[ "Print", "out", "lipd", "filename", "that", "is", "being", "read", "or", "written" ]
python
train
nfcpy/nfcpy
src/nfc/tag/tt3_sony.py
https://github.com/nfcpy/nfcpy/blob/6649146d1afdd5e82b2b6b1ea00aa58d50785117/src/nfc/tag/tt3_sony.py#L281-L323
def search_service_code(self, service_index): """Search for a service code that corresponds to an index. The Search Service Code command provides access to the iterable list of services and areas within the activated system. The *service_index* argument may be any value from 0 to 0xffff. As long as there is a service or area found for a given *service_index*, the information returned is a tuple with either one or two 16-bit integer elements. Two integers are returned for an area definition, the first is the area code and the second is the largest possible service index for the area. One integer, the service code, is returned for a service definition. The return value is :const:`None` if the *service_index* was not found. For example, to print all services and areas of the active system: :: for i in xrange(0x10000): area_or_service = tag.search_service_code(i) if area_or_service is None: break elif len(area_or_service) == 1: sc = area_or_service[0] print(nfc.tag.tt3.ServiceCode(sc >> 6, sc & 0x3f)) elif len(area_or_service) == 2: area_code, area_last = area_or_service print("Area {0:04x}--{0:04x}".format(area_code, area_last)) Command execution errors raise :exc:`~nfc.tag.TagCommandError`. """ log.debug("search service code index {0}".format(service_index)) # The maximum response time is given by the value of PMM[3]. # Some cards (like RC-S860 with IC RC-S915) encode a value # that is too short, thus we use at lest 2 ms. a, e = self.pmm[3] & 7, self.pmm[3] >> 6 timeout = max(302E-6 * (a + 1) * 4**e, 0.002) data = pack("<H", service_index) data = self.send_cmd_recv_rsp(0x0A, data, timeout, check_status=False) if data != "\xFF\xFF": unpack_format = "<H" if len(data) == 2 else "<HH" return unpack(unpack_format, data)
[ "def", "search_service_code", "(", "self", ",", "service_index", ")", ":", "log", ".", "debug", "(", "\"search service code index {0}\"", ".", "format", "(", "service_index", ")", ")", "# The maximum response time is given by the value of PMM[3].", "# Some cards (like RC-S860 with IC RC-S915) encode a value", "# that is too short, thus we use at lest 2 ms.", "a", ",", "e", "=", "self", ".", "pmm", "[", "3", "]", "&", "7", ",", "self", ".", "pmm", "[", "3", "]", ">>", "6", "timeout", "=", "max", "(", "302E-6", "*", "(", "a", "+", "1", ")", "*", "4", "**", "e", ",", "0.002", ")", "data", "=", "pack", "(", "\"<H\"", ",", "service_index", ")", "data", "=", "self", ".", "send_cmd_recv_rsp", "(", "0x0A", ",", "data", ",", "timeout", ",", "check_status", "=", "False", ")", "if", "data", "!=", "\"\\xFF\\xFF\"", ":", "unpack_format", "=", "\"<H\"", "if", "len", "(", "data", ")", "==", "2", "else", "\"<HH\"", "return", "unpack", "(", "unpack_format", ",", "data", ")" ]
Search for a service code that corresponds to an index. The Search Service Code command provides access to the iterable list of services and areas within the activated system. The *service_index* argument may be any value from 0 to 0xffff. As long as there is a service or area found for a given *service_index*, the information returned is a tuple with either one or two 16-bit integer elements. Two integers are returned for an area definition, the first is the area code and the second is the largest possible service index for the area. One integer, the service code, is returned for a service definition. The return value is :const:`None` if the *service_index* was not found. For example, to print all services and areas of the active system: :: for i in xrange(0x10000): area_or_service = tag.search_service_code(i) if area_or_service is None: break elif len(area_or_service) == 1: sc = area_or_service[0] print(nfc.tag.tt3.ServiceCode(sc >> 6, sc & 0x3f)) elif len(area_or_service) == 2: area_code, area_last = area_or_service print("Area {0:04x}--{0:04x}".format(area_code, area_last)) Command execution errors raise :exc:`~nfc.tag.TagCommandError`.
[ "Search", "for", "a", "service", "code", "that", "corresponds", "to", "an", "index", "." ]
python
train
tanghaibao/jcvi
jcvi/assembly/postprocess.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/postprocess.py#L149-L174
def dust(args): """ %prog dust assembly.fasta Remove low-complexity contigs within assembly. """ p = OptionParser(dust.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) fastafile, = args dustfastafile = fastafile.rsplit(".", 1)[0] + ".dust.fasta" if need_update(fastafile, dustfastafile): cmd = "dustmasker -in {0}".format(fastafile) cmd += " -out {0} -outfmt fasta".format(dustfastafile) sh(cmd) for name, seq in parse_fasta(dustfastafile): nlow = sum(1 for x in seq if x in "acgtnN") pctlow = nlow * 100. / len(seq) if pctlow < 98: continue #print "{0}\t{1:.1f}".format(name, pctlow) print(name)
[ "def", "dust", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "dust", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "1", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "fastafile", ",", "=", "args", "dustfastafile", "=", "fastafile", ".", "rsplit", "(", "\".\"", ",", "1", ")", "[", "0", "]", "+", "\".dust.fasta\"", "if", "need_update", "(", "fastafile", ",", "dustfastafile", ")", ":", "cmd", "=", "\"dustmasker -in {0}\"", ".", "format", "(", "fastafile", ")", "cmd", "+=", "\" -out {0} -outfmt fasta\"", ".", "format", "(", "dustfastafile", ")", "sh", "(", "cmd", ")", "for", "name", ",", "seq", "in", "parse_fasta", "(", "dustfastafile", ")", ":", "nlow", "=", "sum", "(", "1", "for", "x", "in", "seq", "if", "x", "in", "\"acgtnN\"", ")", "pctlow", "=", "nlow", "*", "100.", "/", "len", "(", "seq", ")", "if", "pctlow", "<", "98", ":", "continue", "#print \"{0}\\t{1:.1f}\".format(name, pctlow)", "print", "(", "name", ")" ]
%prog dust assembly.fasta Remove low-complexity contigs within assembly.
[ "%prog", "dust", "assembly", ".", "fasta" ]
python
train
SHTOOLS/SHTOOLS
pyshtools/shclasses/shcoeffsgrid.py
https://github.com/SHTOOLS/SHTOOLS/blob/9a115cf83002df2ddec6b7f41aeb6be688e285de/pyshtools/shclasses/shcoeffsgrid.py#L3038-L3113
def _plot(self, xticks=[], yticks=[], minor_xticks=[], minor_yticks=[], xlabel='Longitude', ylabel='Latitude', ax=None, ax2=None, colorbar=None, cb_label=None, cb_orientation=None, grid=False, axes_labelsize=None, tick_labelsize=None, **kwargs): """Plot the raw data using a simply cylindrical projection.""" if ax is None: if colorbar is True: if cb_orientation == 'horizontal': scale = 1.5 else: scale = 1.1 else: scale = 1.2 figsize = (_mpl.rcParams['figure.figsize'][0], _mpl.rcParams['figure.figsize'][0]*scale) fig, axes = _plt.subplots(2, 1, figsize=figsize) axreal = axes.flat[0] axcomplex = axes.flat[1] else: axreal = ax axcomplex = ax2 deg = '$^{\circ}$' xticklabels = [str(int(y)) + deg for y in xticks] yticklabels = [str(int(y)) + deg for y in yticks] cim1 = axreal.imshow(self.data.real, origin='upper', extent=(0., 360., -90., 90.), **kwargs) axreal.set(title='Real component', xticks=xticks, yticks=yticks) axreal.set_xlabel(xlabel, fontsize=axes_labelsize) axreal.set_ylabel(ylabel, fontsize=axes_labelsize) axreal.set_xticklabels(xticklabels, fontsize=tick_labelsize) axreal.set_yticklabels(yticklabels, fontsize=tick_labelsize) axreal.set_xticks(minor_xticks, minor=True) axreal.set_yticks(minor_yticks, minor=True) axreal.grid(grid, which='major') cim2 = axcomplex.imshow(self.data.imag, origin='upper', extent=(0., 360., -90., 90.), **kwargs) axcomplex.set(title='Imaginary component', xticks=xticks, yticks=yticks) axcomplex.set_xlabel(xlabel, fontsize=axes_labelsize) axcomplex.set_ylabel(ylabel, fontsize=axes_labelsize) axcomplex.set_xticklabels(xticklabels, fontsize=tick_labelsize) axcomplex.set_yticklabels(yticklabels, fontsize=tick_labelsize) axcomplex.set_xticks(minor_xticks, minor=True) axcomplex.set_yticks(minor_yticks, minor=True) axcomplex.grid(grid, which='major') if colorbar is True: if cb_orientation == 'vertical': divider1 = _make_axes_locatable(axreal) cax1 = divider1.append_axes("right", size="2.5%", pad=0.05) cbar1 = _plt.colorbar(cim1, cax=cax1, orientation=cb_orientation) divider2 = _make_axes_locatable(axcomplex) cax2 = divider2.append_axes("right", size="2.5%", pad=0.05) cbar2 = _plt.colorbar(cim2, cax=cax2, orientation=cb_orientation) else: divider1 = _make_axes_locatable(axreal) cax1 = divider1.append_axes("bottom", size="5%", pad=0.5) cbar1 = _plt.colorbar(cim1, cax=cax1, orientation=cb_orientation) divider2 = _make_axes_locatable(axcomplex) cax2 = divider2.append_axes("bottom", size="5%", pad=0.5) cbar2 = _plt.colorbar(cim2, cax=cax2, orientation=cb_orientation) if cb_label is not None: cbar1.set_label(cb_label, fontsize=axes_labelsize) cbar2.set_label(cb_label, fontsize=axes_labelsize) cbar1.ax.tick_params(labelsize=tick_labelsize) cbar2.ax.tick_params(labelsize=tick_labelsize) if ax is None: return fig, axes
[ "def", "_plot", "(", "self", ",", "xticks", "=", "[", "]", ",", "yticks", "=", "[", "]", ",", "minor_xticks", "=", "[", "]", ",", "minor_yticks", "=", "[", "]", ",", "xlabel", "=", "'Longitude'", ",", "ylabel", "=", "'Latitude'", ",", "ax", "=", "None", ",", "ax2", "=", "None", ",", "colorbar", "=", "None", ",", "cb_label", "=", "None", ",", "cb_orientation", "=", "None", ",", "grid", "=", "False", ",", "axes_labelsize", "=", "None", ",", "tick_labelsize", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "ax", "is", "None", ":", "if", "colorbar", "is", "True", ":", "if", "cb_orientation", "==", "'horizontal'", ":", "scale", "=", "1.5", "else", ":", "scale", "=", "1.1", "else", ":", "scale", "=", "1.2", "figsize", "=", "(", "_mpl", ".", "rcParams", "[", "'figure.figsize'", "]", "[", "0", "]", ",", "_mpl", ".", "rcParams", "[", "'figure.figsize'", "]", "[", "0", "]", "*", "scale", ")", "fig", ",", "axes", "=", "_plt", ".", "subplots", "(", "2", ",", "1", ",", "figsize", "=", "figsize", ")", "axreal", "=", "axes", ".", "flat", "[", "0", "]", "axcomplex", "=", "axes", ".", "flat", "[", "1", "]", "else", ":", "axreal", "=", "ax", "axcomplex", "=", "ax2", "deg", "=", "'$^{\\circ}$'", "xticklabels", "=", "[", "str", "(", "int", "(", "y", ")", ")", "+", "deg", "for", "y", "in", "xticks", "]", "yticklabels", "=", "[", "str", "(", "int", "(", "y", ")", ")", "+", "deg", "for", "y", "in", "yticks", "]", "cim1", "=", "axreal", ".", "imshow", "(", "self", ".", "data", ".", "real", ",", "origin", "=", "'upper'", ",", "extent", "=", "(", "0.", ",", "360.", ",", "-", "90.", ",", "90.", ")", ",", "*", "*", "kwargs", ")", "axreal", ".", "set", "(", "title", "=", "'Real component'", ",", "xticks", "=", "xticks", ",", "yticks", "=", "yticks", ")", "axreal", ".", "set_xlabel", "(", "xlabel", ",", "fontsize", "=", "axes_labelsize", ")", "axreal", ".", "set_ylabel", "(", "ylabel", ",", "fontsize", "=", "axes_labelsize", ")", "axreal", ".", "set_xticklabels", "(", "xticklabels", ",", "fontsize", "=", "tick_labelsize", ")", "axreal", ".", "set_yticklabels", "(", "yticklabels", ",", "fontsize", "=", "tick_labelsize", ")", "axreal", ".", "set_xticks", "(", "minor_xticks", ",", "minor", "=", "True", ")", "axreal", ".", "set_yticks", "(", "minor_yticks", ",", "minor", "=", "True", ")", "axreal", ".", "grid", "(", "grid", ",", "which", "=", "'major'", ")", "cim2", "=", "axcomplex", ".", "imshow", "(", "self", ".", "data", ".", "imag", ",", "origin", "=", "'upper'", ",", "extent", "=", "(", "0.", ",", "360.", ",", "-", "90.", ",", "90.", ")", ",", "*", "*", "kwargs", ")", "axcomplex", ".", "set", "(", "title", "=", "'Imaginary component'", ",", "xticks", "=", "xticks", ",", "yticks", "=", "yticks", ")", "axcomplex", ".", "set_xlabel", "(", "xlabel", ",", "fontsize", "=", "axes_labelsize", ")", "axcomplex", ".", "set_ylabel", "(", "ylabel", ",", "fontsize", "=", "axes_labelsize", ")", "axcomplex", ".", "set_xticklabels", "(", "xticklabels", ",", "fontsize", "=", "tick_labelsize", ")", "axcomplex", ".", "set_yticklabels", "(", "yticklabels", ",", "fontsize", "=", "tick_labelsize", ")", "axcomplex", ".", "set_xticks", "(", "minor_xticks", ",", "minor", "=", "True", ")", "axcomplex", ".", "set_yticks", "(", "minor_yticks", ",", "minor", "=", "True", ")", "axcomplex", ".", "grid", "(", "grid", ",", "which", "=", "'major'", ")", "if", "colorbar", "is", "True", ":", "if", "cb_orientation", "==", "'vertical'", ":", "divider1", "=", "_make_axes_locatable", "(", "axreal", ")", "cax1", "=", "divider1", ".", "append_axes", "(", "\"right\"", ",", "size", "=", "\"2.5%\"", ",", "pad", "=", "0.05", ")", "cbar1", "=", "_plt", ".", "colorbar", "(", "cim1", ",", "cax", "=", "cax1", ",", "orientation", "=", "cb_orientation", ")", "divider2", "=", "_make_axes_locatable", "(", "axcomplex", ")", "cax2", "=", "divider2", ".", "append_axes", "(", "\"right\"", ",", "size", "=", "\"2.5%\"", ",", "pad", "=", "0.05", ")", "cbar2", "=", "_plt", ".", "colorbar", "(", "cim2", ",", "cax", "=", "cax2", ",", "orientation", "=", "cb_orientation", ")", "else", ":", "divider1", "=", "_make_axes_locatable", "(", "axreal", ")", "cax1", "=", "divider1", ".", "append_axes", "(", "\"bottom\"", ",", "size", "=", "\"5%\"", ",", "pad", "=", "0.5", ")", "cbar1", "=", "_plt", ".", "colorbar", "(", "cim1", ",", "cax", "=", "cax1", ",", "orientation", "=", "cb_orientation", ")", "divider2", "=", "_make_axes_locatable", "(", "axcomplex", ")", "cax2", "=", "divider2", ".", "append_axes", "(", "\"bottom\"", ",", "size", "=", "\"5%\"", ",", "pad", "=", "0.5", ")", "cbar2", "=", "_plt", ".", "colorbar", "(", "cim2", ",", "cax", "=", "cax2", ",", "orientation", "=", "cb_orientation", ")", "if", "cb_label", "is", "not", "None", ":", "cbar1", ".", "set_label", "(", "cb_label", ",", "fontsize", "=", "axes_labelsize", ")", "cbar2", ".", "set_label", "(", "cb_label", ",", "fontsize", "=", "axes_labelsize", ")", "cbar1", ".", "ax", ".", "tick_params", "(", "labelsize", "=", "tick_labelsize", ")", "cbar2", ".", "ax", ".", "tick_params", "(", "labelsize", "=", "tick_labelsize", ")", "if", "ax", "is", "None", ":", "return", "fig", ",", "axes" ]
Plot the raw data using a simply cylindrical projection.
[ "Plot", "the", "raw", "data", "using", "a", "simply", "cylindrical", "projection", "." ]
python
train
RedHatInsights/insights-core
insights/client/__init__.py
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/__init__.py#L134-L193
def _fetch(self, path, etag_file, target_path, force): """ returns (str): path to new egg. None if no update. """ url = self.connection.base_url + path # Searched for cached etag information current_etag = None if os.path.isfile(etag_file): with open(etag_file, 'r') as fp: current_etag = fp.read().strip() logger.debug('Found etag %s', current_etag) # Setup the new request for core retrieval logger.debug('Making request to %s for new core', url) # If the etag was found and we are not force fetching # Then add it to the request net_logger.info("GET %s", url) if current_etag and not force: logger.debug('Requesting new file with etag %s', current_etag) etag_headers = {'If-None-Match': current_etag} response = self.session.get(url, headers=etag_headers, timeout=self.config.http_timeout) else: logger.debug('Found no etag or forcing fetch') response = self.session.get(url, timeout=self.config.http_timeout) # Debug information logger.debug('Status code: %d', response.status_code) for header, value in response.headers.items(): logger.debug('%s: %s', header, value) # Debug the ETag logger.debug('ETag: %s', response.request.headers.get('If-None-Match')) # If data was received, write the new egg and etag if response.status_code == 200 and len(response.content) > 0: # Write the new core with open(target_path, 'wb') as handle: logger.debug('Data received, writing core to %s', target_path) handle.write(response.content) # Write the new etag with open(etag_file, 'w') as handle: logger.debug('Cacheing etag to %s', etag_file) handle.write(response.headers['etag']) return True # Received a 304 not modified # Return nothing elif response.status_code == 304: logger.debug('No data received') logger.debug('Tags match, not updating core') # Something unexpected received else: logger.debug('Received Code %s', response.status_code) logger.debug('Not writing new core, or updating etag') logger.debug('Please check config, error reaching %s', url)
[ "def", "_fetch", "(", "self", ",", "path", ",", "etag_file", ",", "target_path", ",", "force", ")", ":", "url", "=", "self", ".", "connection", ".", "base_url", "+", "path", "# Searched for cached etag information", "current_etag", "=", "None", "if", "os", ".", "path", ".", "isfile", "(", "etag_file", ")", ":", "with", "open", "(", "etag_file", ",", "'r'", ")", "as", "fp", ":", "current_etag", "=", "fp", ".", "read", "(", ")", ".", "strip", "(", ")", "logger", ".", "debug", "(", "'Found etag %s'", ",", "current_etag", ")", "# Setup the new request for core retrieval", "logger", ".", "debug", "(", "'Making request to %s for new core'", ",", "url", ")", "# If the etag was found and we are not force fetching", "# Then add it to the request", "net_logger", ".", "info", "(", "\"GET %s\"", ",", "url", ")", "if", "current_etag", "and", "not", "force", ":", "logger", ".", "debug", "(", "'Requesting new file with etag %s'", ",", "current_etag", ")", "etag_headers", "=", "{", "'If-None-Match'", ":", "current_etag", "}", "response", "=", "self", ".", "session", ".", "get", "(", "url", ",", "headers", "=", "etag_headers", ",", "timeout", "=", "self", ".", "config", ".", "http_timeout", ")", "else", ":", "logger", ".", "debug", "(", "'Found no etag or forcing fetch'", ")", "response", "=", "self", ".", "session", ".", "get", "(", "url", ",", "timeout", "=", "self", ".", "config", ".", "http_timeout", ")", "# Debug information", "logger", ".", "debug", "(", "'Status code: %d'", ",", "response", ".", "status_code", ")", "for", "header", ",", "value", "in", "response", ".", "headers", ".", "items", "(", ")", ":", "logger", ".", "debug", "(", "'%s: %s'", ",", "header", ",", "value", ")", "# Debug the ETag", "logger", ".", "debug", "(", "'ETag: %s'", ",", "response", ".", "request", ".", "headers", ".", "get", "(", "'If-None-Match'", ")", ")", "# If data was received, write the new egg and etag", "if", "response", ".", "status_code", "==", "200", "and", "len", "(", "response", ".", "content", ")", ">", "0", ":", "# Write the new core", "with", "open", "(", "target_path", ",", "'wb'", ")", "as", "handle", ":", "logger", ".", "debug", "(", "'Data received, writing core to %s'", ",", "target_path", ")", "handle", ".", "write", "(", "response", ".", "content", ")", "# Write the new etag", "with", "open", "(", "etag_file", ",", "'w'", ")", "as", "handle", ":", "logger", ".", "debug", "(", "'Cacheing etag to %s'", ",", "etag_file", ")", "handle", ".", "write", "(", "response", ".", "headers", "[", "'etag'", "]", ")", "return", "True", "# Received a 304 not modified", "# Return nothing", "elif", "response", ".", "status_code", "==", "304", ":", "logger", ".", "debug", "(", "'No data received'", ")", "logger", ".", "debug", "(", "'Tags match, not updating core'", ")", "# Something unexpected received", "else", ":", "logger", ".", "debug", "(", "'Received Code %s'", ",", "response", ".", "status_code", ")", "logger", ".", "debug", "(", "'Not writing new core, or updating etag'", ")", "logger", ".", "debug", "(", "'Please check config, error reaching %s'", ",", "url", ")" ]
returns (str): path to new egg. None if no update.
[ "returns", "(", "str", ")", ":", "path", "to", "new", "egg", ".", "None", "if", "no", "update", "." ]
python
train
rgs1/zk_shell
zk_shell/xclient.py
https://github.com/rgs1/zk_shell/blob/bbf34fdfcf1f81100e2a5816fad8af6afc782a54/zk_shell/xclient.py#L463-L479
def dump_by_server(self, hosts): """Returns the output of dump for each server. :param hosts: comma separated lists of members of the ZK ensemble. :returns: A dictionary of ((server_ip, port), ClientInfo). """ dump_by_endpoint = {} for endpoint in self._to_endpoints(hosts): try: out = self.cmd([endpoint], "dump") except self.CmdFailed as ex: out = "" dump_by_endpoint[endpoint] = out return dump_by_endpoint
[ "def", "dump_by_server", "(", "self", ",", "hosts", ")", ":", "dump_by_endpoint", "=", "{", "}", "for", "endpoint", "in", "self", ".", "_to_endpoints", "(", "hosts", ")", ":", "try", ":", "out", "=", "self", ".", "cmd", "(", "[", "endpoint", "]", ",", "\"dump\"", ")", "except", "self", ".", "CmdFailed", "as", "ex", ":", "out", "=", "\"\"", "dump_by_endpoint", "[", "endpoint", "]", "=", "out", "return", "dump_by_endpoint" ]
Returns the output of dump for each server. :param hosts: comma separated lists of members of the ZK ensemble. :returns: A dictionary of ((server_ip, port), ClientInfo).
[ "Returns", "the", "output", "of", "dump", "for", "each", "server", "." ]
python
train
tweepy/tweepy
tweepy/cache.py
https://github.com/tweepy/tweepy/blob/cc3894073905811c4d9fd816202f93454ed932da/tweepy/cache.py#L326-L340
def store(self, key, value): """Store the key, value pair in our redis server""" # Prepend tweepy to our key, # this makes it easier to identify tweepy keys in our redis server key = self.pre_identifier + key # Get a pipe (to execute several redis commands in one step) pipe = self.client.pipeline() # Set our values in a redis hash (similar to python dict) pipe.set(key, pickle.dumps((time.time(), value))) # Set the expiration pipe.expire(key, self.timeout) # Add the key to a set containing all the keys pipe.sadd(self.keys_container, key) # Execute the instructions in the redis server pipe.execute()
[ "def", "store", "(", "self", ",", "key", ",", "value", ")", ":", "# Prepend tweepy to our key,", "# this makes it easier to identify tweepy keys in our redis server", "key", "=", "self", ".", "pre_identifier", "+", "key", "# Get a pipe (to execute several redis commands in one step)", "pipe", "=", "self", ".", "client", ".", "pipeline", "(", ")", "# Set our values in a redis hash (similar to python dict)", "pipe", ".", "set", "(", "key", ",", "pickle", ".", "dumps", "(", "(", "time", ".", "time", "(", ")", ",", "value", ")", ")", ")", "# Set the expiration", "pipe", ".", "expire", "(", "key", ",", "self", ".", "timeout", ")", "# Add the key to a set containing all the keys", "pipe", ".", "sadd", "(", "self", ".", "keys_container", ",", "key", ")", "# Execute the instructions in the redis server", "pipe", ".", "execute", "(", ")" ]
Store the key, value pair in our redis server
[ "Store", "the", "key", "value", "pair", "in", "our", "redis", "server" ]
python
train
by46/simplekit
simplekit/docker/utils.py
https://github.com/by46/simplekit/blob/33f3ce6de33accc185e1057f096af41859db5976/simplekit/docker/utils.py#L48-L66
def parse_image_name(name): """ parse the image name into three element tuple, like below: (repository, name, version) :param name: `class`:`str`, name :return: (repository, name, version) """ name = name or "" if '/' in name: repository, other = name.split('/') else: repository, other = None, name if ':' in other: name, version = other.split(':') else: name, version = other, 'latest' return repository, name, version
[ "def", "parse_image_name", "(", "name", ")", ":", "name", "=", "name", "or", "\"\"", "if", "'/'", "in", "name", ":", "repository", ",", "other", "=", "name", ".", "split", "(", "'/'", ")", "else", ":", "repository", ",", "other", "=", "None", ",", "name", "if", "':'", "in", "other", ":", "name", ",", "version", "=", "other", ".", "split", "(", "':'", ")", "else", ":", "name", ",", "version", "=", "other", ",", "'latest'", "return", "repository", ",", "name", ",", "version" ]
parse the image name into three element tuple, like below: (repository, name, version) :param name: `class`:`str`, name :return: (repository, name, version)
[ "parse", "the", "image", "name", "into", "three", "element", "tuple", "like", "below", ":", "(", "repository", "name", "version", ")", ":", "param", "name", ":", "class", ":", "str", "name", ":", "return", ":", "(", "repository", "name", "version", ")" ]
python
train
vijayvarma392/surfinBH
surfinBH/_lal_spin_evolution.py
https://github.com/vijayvarma392/surfinBH/blob/9f2d25d00f894ee2ce9ffbb02f4e4a41fa7989eb/surfinBH/_lal_spin_evolution.py#L158-L211
def evolve_pn_spins(q, chiA0, chiB0, omega0, omegaTimesM_final, approximant='SpinTaylorT4', dt=0.1, spinO=7, phaseO=7): """ Evolves PN spins from a starting orbital frequency and spins to a final frequency. Inputs: q: Mass ratio (q>=1) chiA0: Dimless spin of BhA at initial freq. chiB0: Dimless spin of BhB at initial freq. omega0: Initial orbital frequency in dimless units. omegaTimesM_final: Final orbital frequency in dimless units. approximant: 'SpinTaylorT1/T2/T4'. Default: 'SpinTaylorT4'. dt: Dimless step time for evolution. Default: 0.1 . spinO: Twice PN order of spin effects. Default: 5 . phaseO: Twice PN order in phase. Default: 8 . Outputs (all are time series): chiA_end_copr: Spin of BhA at final frequency, in coprecessing frame. chiB_end_copr: Spin of BhB at final frequency, in coprecessing frame. q_copr_end: Coprecessing frame quaternion at final frequency. phi_end: Orbital phase in the coprecessing frame at final frequency. omegaTimesM_end Dimensionless final frequency. Should agree with omegaTimesM_final. The inertial frame is assumed to be aligned to the coorbital frame at orbital frequency = omega0. chiA0 and chiB0 are the inertial/coorbital frame spins at omega0. """ omega, phi, chiA, chiB, lNhat, e1 = lal_spin_evloution_wrapper(approximant, q, omega0, chiA0, chiB0, dt, spinO, phaseO) # Compute omega, inertial spins, angular momentum direction and orbital # phase when omega = omegaTimesM_final end_idx = np.argmin(np.abs(omega - omegaTimesM_final)) omegaTimesM_end = omega[end_idx] chiA_end = chiA[end_idx] chiB_end = chiB[end_idx] lNhat_end = lNhat[end_idx] phi_end = phi[end_idx] # Align the z-direction along orbital angular momentum direction # at end_idx. This moves us in to the coprecessing frame. q_copr_end = _utils.alignVec_quat(lNhat_end) chiA_end_copr = _utils.transformTimeDependentVector( np.array([q_copr_end]).T, np.array([chiA_end]).T, inverse=1).T[0] chiB_end_copr = _utils.transformTimeDependentVector( np.array([q_copr_end]).T, np.array([chiB_end]).T, inverse=1).T[0] return chiA_end_copr, chiB_end_copr, q_copr_end, phi_end, omegaTimesM_end
[ "def", "evolve_pn_spins", "(", "q", ",", "chiA0", ",", "chiB0", ",", "omega0", ",", "omegaTimesM_final", ",", "approximant", "=", "'SpinTaylorT4'", ",", "dt", "=", "0.1", ",", "spinO", "=", "7", ",", "phaseO", "=", "7", ")", ":", "omega", ",", "phi", ",", "chiA", ",", "chiB", ",", "lNhat", ",", "e1", "=", "lal_spin_evloution_wrapper", "(", "approximant", ",", "q", ",", "omega0", ",", "chiA0", ",", "chiB0", ",", "dt", ",", "spinO", ",", "phaseO", ")", "# Compute omega, inertial spins, angular momentum direction and orbital", "# phase when omega = omegaTimesM_final", "end_idx", "=", "np", ".", "argmin", "(", "np", ".", "abs", "(", "omega", "-", "omegaTimesM_final", ")", ")", "omegaTimesM_end", "=", "omega", "[", "end_idx", "]", "chiA_end", "=", "chiA", "[", "end_idx", "]", "chiB_end", "=", "chiB", "[", "end_idx", "]", "lNhat_end", "=", "lNhat", "[", "end_idx", "]", "phi_end", "=", "phi", "[", "end_idx", "]", "# Align the z-direction along orbital angular momentum direction", "# at end_idx. This moves us in to the coprecessing frame.", "q_copr_end", "=", "_utils", ".", "alignVec_quat", "(", "lNhat_end", ")", "chiA_end_copr", "=", "_utils", ".", "transformTimeDependentVector", "(", "np", ".", "array", "(", "[", "q_copr_end", "]", ")", ".", "T", ",", "np", ".", "array", "(", "[", "chiA_end", "]", ")", ".", "T", ",", "inverse", "=", "1", ")", ".", "T", "[", "0", "]", "chiB_end_copr", "=", "_utils", ".", "transformTimeDependentVector", "(", "np", ".", "array", "(", "[", "q_copr_end", "]", ")", ".", "T", ",", "np", ".", "array", "(", "[", "chiB_end", "]", ")", ".", "T", ",", "inverse", "=", "1", ")", ".", "T", "[", "0", "]", "return", "chiA_end_copr", ",", "chiB_end_copr", ",", "q_copr_end", ",", "phi_end", ",", "omegaTimesM_end" ]
Evolves PN spins from a starting orbital frequency and spins to a final frequency. Inputs: q: Mass ratio (q>=1) chiA0: Dimless spin of BhA at initial freq. chiB0: Dimless spin of BhB at initial freq. omega0: Initial orbital frequency in dimless units. omegaTimesM_final: Final orbital frequency in dimless units. approximant: 'SpinTaylorT1/T2/T4'. Default: 'SpinTaylorT4'. dt: Dimless step time for evolution. Default: 0.1 . spinO: Twice PN order of spin effects. Default: 5 . phaseO: Twice PN order in phase. Default: 8 . Outputs (all are time series): chiA_end_copr: Spin of BhA at final frequency, in coprecessing frame. chiB_end_copr: Spin of BhB at final frequency, in coprecessing frame. q_copr_end: Coprecessing frame quaternion at final frequency. phi_end: Orbital phase in the coprecessing frame at final frequency. omegaTimesM_end Dimensionless final frequency. Should agree with omegaTimesM_final. The inertial frame is assumed to be aligned to the coorbital frame at orbital frequency = omega0. chiA0 and chiB0 are the inertial/coorbital frame spins at omega0.
[ "Evolves", "PN", "spins", "from", "a", "starting", "orbital", "frequency", "and", "spins", "to", "a", "final", "frequency", "." ]
python
train
kodexlab/reliure
reliure/utils/log.py
https://github.com/kodexlab/reliure/blob/0450c7a9254c5c003162738458bbe0c49e777ba5/reliure/utils/log.py#L20-L33
def get_basic_logger(level=logging.WARN, scope='reliure'): """ return a basic logger that print on stdout msg from reliure lib """ logger = logging.getLogger(scope) logger.setLevel(level) # create console handler with a higher log level ch = logging.StreamHandler() ch.setLevel(level) # create formatter and add it to the handlers formatter = ColorFormatter('%(asctime)s:%(levelname)s:%(name)s:%(message)s') ch.setFormatter(formatter) # add the handlers to the logger logger.addHandler(ch) return logger
[ "def", "get_basic_logger", "(", "level", "=", "logging", ".", "WARN", ",", "scope", "=", "'reliure'", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "scope", ")", "logger", ".", "setLevel", "(", "level", ")", "# create console handler with a higher log level", "ch", "=", "logging", ".", "StreamHandler", "(", ")", "ch", ".", "setLevel", "(", "level", ")", "# create formatter and add it to the handlers", "formatter", "=", "ColorFormatter", "(", "'%(asctime)s:%(levelname)s:%(name)s:%(message)s'", ")", "ch", ".", "setFormatter", "(", "formatter", ")", "# add the handlers to the logger", "logger", ".", "addHandler", "(", "ch", ")", "return", "logger" ]
return a basic logger that print on stdout msg from reliure lib
[ "return", "a", "basic", "logger", "that", "print", "on", "stdout", "msg", "from", "reliure", "lib" ]
python
train
inasafe/inasafe
safe/metadata/base_metadata.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/metadata/base_metadata.py#L531-L565
def set(self, name, value, xml_path): """ Create a new metadata property. The accepted type depends on the property type which is determined by the xml_path :param name: the name of the property :type name: str :param value: the value of the property :type value: :param xml_path: the xml path where the property should be stored. This is split on / and the last element is used to determine the property type :type xml_path: str """ xml_type = xml_path.split('/')[-1] # check if the desired type is supported try: property_class = TYPE_CONVERSIONS[xml_type] except KeyError: raise KeyError('The xml type %s is not supported yet' % xml_type) try: metadata_property = property_class(name, value, xml_path) self._properties[name] = metadata_property self.set_last_update_to_now() except TypeError: if self.reading_ancillary_files: # we are parsing files so we want to accept as much as # possible without raising exceptions pass else: raise
[ "def", "set", "(", "self", ",", "name", ",", "value", ",", "xml_path", ")", ":", "xml_type", "=", "xml_path", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", "# check if the desired type is supported", "try", ":", "property_class", "=", "TYPE_CONVERSIONS", "[", "xml_type", "]", "except", "KeyError", ":", "raise", "KeyError", "(", "'The xml type %s is not supported yet'", "%", "xml_type", ")", "try", ":", "metadata_property", "=", "property_class", "(", "name", ",", "value", ",", "xml_path", ")", "self", ".", "_properties", "[", "name", "]", "=", "metadata_property", "self", ".", "set_last_update_to_now", "(", ")", "except", "TypeError", ":", "if", "self", ".", "reading_ancillary_files", ":", "# we are parsing files so we want to accept as much as", "# possible without raising exceptions", "pass", "else", ":", "raise" ]
Create a new metadata property. The accepted type depends on the property type which is determined by the xml_path :param name: the name of the property :type name: str :param value: the value of the property :type value: :param xml_path: the xml path where the property should be stored. This is split on / and the last element is used to determine the property type :type xml_path: str
[ "Create", "a", "new", "metadata", "property", "." ]
python
train
SuperCowPowers/bat
bat/utils/cache.py
https://github.com/SuperCowPowers/bat/blob/069e6bc52843dc07760969c531cc442ca7da8e0c/bat/utils/cache.py#L66-L75
def _check_limit(self): """Intenal method: check if current cache size exceeds maximum cache size and pop the oldest item in this case""" # First compress self._compress() # Then check the max size if len(self._store) >= self._max_size: self._store.popitem(last=False)
[ "def", "_check_limit", "(", "self", ")", ":", "# First compress", "self", ".", "_compress", "(", ")", "# Then check the max size", "if", "len", "(", "self", ".", "_store", ")", ">=", "self", ".", "_max_size", ":", "self", ".", "_store", ".", "popitem", "(", "last", "=", "False", ")" ]
Intenal method: check if current cache size exceeds maximum cache size and pop the oldest item in this case
[ "Intenal", "method", ":", "check", "if", "current", "cache", "size", "exceeds", "maximum", "cache", "size", "and", "pop", "the", "oldest", "item", "in", "this", "case" ]
python
train
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/docbook/__init__.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/docbook/__init__.py#L221-L272
def __xml_scan(node, env, path, arg): """ Simple XML file scanner, detecting local images and XIncludes as implicit dependencies. """ # Does the node exist yet? if not os.path.isfile(str(node)): return [] if env.get('DOCBOOK_SCANENT',''): # Use simple pattern matching for system entities..., no support # for recursion yet. contents = node.get_text_contents() return sentity_re.findall(contents) xsl_file = os.path.join(scriptpath,'utils','xmldepend.xsl') if not has_libxml2 or prefer_xsltproc: if has_lxml and not prefer_xsltproc: from lxml import etree xsl_tree = etree.parse(xsl_file) doc = etree.parse(str(node)) result = doc.xslt(xsl_tree) depfiles = [x.strip() for x in str(result).splitlines() if x.strip() != "" and not x.startswith("<?xml ")] return depfiles else: # Try to call xsltproc xsltproc = env.subst("$DOCBOOK_XSLTPROC") if xsltproc and xsltproc.endswith('xsltproc'): result = env.backtick(' '.join([xsltproc, xsl_file, str(node)])) depfiles = [x.strip() for x in str(result).splitlines() if x.strip() != "" and not x.startswith("<?xml ")] return depfiles else: # Use simple pattern matching, there is currently no support # for xi:includes... contents = node.get_text_contents() return include_re.findall(contents) styledoc = libxml2.parseFile(xsl_file) style = libxslt.parseStylesheetDoc(styledoc) doc = libxml2.readFile(str(node), None, libxml2.XML_PARSE_NOENT) result = style.applyStylesheet(doc, None) depfiles = [] for x in str(result).splitlines(): if x.strip() != "" and not x.startswith("<?xml "): depfiles.extend(x.strip().split()) style.freeStylesheet() doc.freeDoc() result.freeDoc() return depfiles
[ "def", "__xml_scan", "(", "node", ",", "env", ",", "path", ",", "arg", ")", ":", "# Does the node exist yet?", "if", "not", "os", ".", "path", ".", "isfile", "(", "str", "(", "node", ")", ")", ":", "return", "[", "]", "if", "env", ".", "get", "(", "'DOCBOOK_SCANENT'", ",", "''", ")", ":", "# Use simple pattern matching for system entities..., no support ", "# for recursion yet.", "contents", "=", "node", ".", "get_text_contents", "(", ")", "return", "sentity_re", ".", "findall", "(", "contents", ")", "xsl_file", "=", "os", ".", "path", ".", "join", "(", "scriptpath", ",", "'utils'", ",", "'xmldepend.xsl'", ")", "if", "not", "has_libxml2", "or", "prefer_xsltproc", ":", "if", "has_lxml", "and", "not", "prefer_xsltproc", ":", "from", "lxml", "import", "etree", "xsl_tree", "=", "etree", ".", "parse", "(", "xsl_file", ")", "doc", "=", "etree", ".", "parse", "(", "str", "(", "node", ")", ")", "result", "=", "doc", ".", "xslt", "(", "xsl_tree", ")", "depfiles", "=", "[", "x", ".", "strip", "(", ")", "for", "x", "in", "str", "(", "result", ")", ".", "splitlines", "(", ")", "if", "x", ".", "strip", "(", ")", "!=", "\"\"", "and", "not", "x", ".", "startswith", "(", "\"<?xml \"", ")", "]", "return", "depfiles", "else", ":", "# Try to call xsltproc", "xsltproc", "=", "env", ".", "subst", "(", "\"$DOCBOOK_XSLTPROC\"", ")", "if", "xsltproc", "and", "xsltproc", ".", "endswith", "(", "'xsltproc'", ")", ":", "result", "=", "env", ".", "backtick", "(", "' '", ".", "join", "(", "[", "xsltproc", ",", "xsl_file", ",", "str", "(", "node", ")", "]", ")", ")", "depfiles", "=", "[", "x", ".", "strip", "(", ")", "for", "x", "in", "str", "(", "result", ")", ".", "splitlines", "(", ")", "if", "x", ".", "strip", "(", ")", "!=", "\"\"", "and", "not", "x", ".", "startswith", "(", "\"<?xml \"", ")", "]", "return", "depfiles", "else", ":", "# Use simple pattern matching, there is currently no support", "# for xi:includes...", "contents", "=", "node", ".", "get_text_contents", "(", ")", "return", "include_re", ".", "findall", "(", "contents", ")", "styledoc", "=", "libxml2", ".", "parseFile", "(", "xsl_file", ")", "style", "=", "libxslt", ".", "parseStylesheetDoc", "(", "styledoc", ")", "doc", "=", "libxml2", ".", "readFile", "(", "str", "(", "node", ")", ",", "None", ",", "libxml2", ".", "XML_PARSE_NOENT", ")", "result", "=", "style", ".", "applyStylesheet", "(", "doc", ",", "None", ")", "depfiles", "=", "[", "]", "for", "x", "in", "str", "(", "result", ")", ".", "splitlines", "(", ")", ":", "if", "x", ".", "strip", "(", ")", "!=", "\"\"", "and", "not", "x", ".", "startswith", "(", "\"<?xml \"", ")", ":", "depfiles", ".", "extend", "(", "x", ".", "strip", "(", ")", ".", "split", "(", ")", ")", "style", ".", "freeStylesheet", "(", ")", "doc", ".", "freeDoc", "(", ")", "result", ".", "freeDoc", "(", ")", "return", "depfiles" ]
Simple XML file scanner, detecting local images and XIncludes as implicit dependencies.
[ "Simple", "XML", "file", "scanner", "detecting", "local", "images", "and", "XIncludes", "as", "implicit", "dependencies", "." ]
python
train
fredericklussier/TinyPeriodicTask
tinyPeriodicTask/TinyPeriodicTask.py
https://github.com/fredericklussier/TinyPeriodicTask/blob/be79e349bf6f73c1ba7576eb5acc6e812ffcfe36/tinyPeriodicTask/TinyPeriodicTask.py#L172-L179
def useThis(self, *args, **kwargs): """ Change parameter of the callback function. :param *args, **kwargs: parameter(s) to use when executing the callback function. """ self._callback = functools.partial(self._callback, *args, **kwargs)
[ "def", "useThis", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_callback", "=", "functools", ".", "partial", "(", "self", ".", "_callback", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Change parameter of the callback function. :param *args, **kwargs: parameter(s) to use when executing the callback function.
[ "Change", "parameter", "of", "the", "callback", "function", "." ]
python
train
tensorflow/mesh
mesh_tensorflow/layers.py
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/layers.py#L963-L989
def attention_bias_local_block(mesh, block_length, memory_length, dtype=tf.int32): """Bias for attention for local blocks where attention to right is disallowed. Create the bias matrix by using two separate masks, one for the memory part which doesn't overlap with the query and second which interacts with the query and should be disallowed to look to the right of the current query position. Args: mesh: a MeshTensorflow object block_length: a mtf.Dimension memory_length: a mtf.Dimension dtype: a tf.dtype Returns: a mtf.Tensor with shape [block_length, memory_length] """ memory_length = mtf.Dimension(memory_length.name, block_length.size) memory_mask = mtf.zeros(mesh, [block_length, memory_length], dtype=dtype) mask = mtf.cast(mtf.less(mtf.range(mesh, block_length, dtype=dtype), mtf.range(mesh, memory_length, dtype=dtype)), dtype=dtype) mask = mtf.cast( mtf.concat([memory_mask, mask], memory_length.name), dtype=tf.float32) * -1e9 return mask
[ "def", "attention_bias_local_block", "(", "mesh", ",", "block_length", ",", "memory_length", ",", "dtype", "=", "tf", ".", "int32", ")", ":", "memory_length", "=", "mtf", ".", "Dimension", "(", "memory_length", ".", "name", ",", "block_length", ".", "size", ")", "memory_mask", "=", "mtf", ".", "zeros", "(", "mesh", ",", "[", "block_length", ",", "memory_length", "]", ",", "dtype", "=", "dtype", ")", "mask", "=", "mtf", ".", "cast", "(", "mtf", ".", "less", "(", "mtf", ".", "range", "(", "mesh", ",", "block_length", ",", "dtype", "=", "dtype", ")", ",", "mtf", ".", "range", "(", "mesh", ",", "memory_length", ",", "dtype", "=", "dtype", ")", ")", ",", "dtype", "=", "dtype", ")", "mask", "=", "mtf", ".", "cast", "(", "mtf", ".", "concat", "(", "[", "memory_mask", ",", "mask", "]", ",", "memory_length", ".", "name", ")", ",", "dtype", "=", "tf", ".", "float32", ")", "*", "-", "1e9", "return", "mask" ]
Bias for attention for local blocks where attention to right is disallowed. Create the bias matrix by using two separate masks, one for the memory part which doesn't overlap with the query and second which interacts with the query and should be disallowed to look to the right of the current query position. Args: mesh: a MeshTensorflow object block_length: a mtf.Dimension memory_length: a mtf.Dimension dtype: a tf.dtype Returns: a mtf.Tensor with shape [block_length, memory_length]
[ "Bias", "for", "attention", "for", "local", "blocks", "where", "attention", "to", "right", "is", "disallowed", "." ]
python
train
kstaniek/condoor
condoor/device.py
https://github.com/kstaniek/condoor/blob/77c054b29d4e286c1d7aca2c74dff86b805e1fae/condoor/device.py#L476-L480
def reload(self, reload_timeout, save_config, no_reload_cmd): """Reload device.""" if not no_reload_cmd: self.ctrl.send_command(self.driver.reload_cmd) return self.driver.reload(reload_timeout, save_config)
[ "def", "reload", "(", "self", ",", "reload_timeout", ",", "save_config", ",", "no_reload_cmd", ")", ":", "if", "not", "no_reload_cmd", ":", "self", ".", "ctrl", ".", "send_command", "(", "self", ".", "driver", ".", "reload_cmd", ")", "return", "self", ".", "driver", ".", "reload", "(", "reload_timeout", ",", "save_config", ")" ]
Reload device.
[ "Reload", "device", "." ]
python
train
aiogram/aiogram
aiogram/bot/bot.py
https://github.com/aiogram/aiogram/blob/2af930149ce2482547721e2c8755c10307295e48/aiogram/bot/bot.py#L809-L848
async def send_contact(self, chat_id: typing.Union[base.Integer, base.String], phone_number: base.String, first_name: base.String, last_name: typing.Union[base.String, None] = None, vcard: typing.Union[base.String, None] = None, disable_notification: typing.Union[base.Boolean, None] = None, reply_to_message_id: typing.Union[base.Integer, None] = None, reply_markup: typing.Union[types.InlineKeyboardMarkup, types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply, None] = None) -> types.Message: """ Use this method to send phone contacts. Source: https://core.telegram.org/bots/api#sendcontact :param chat_id: Unique identifier for the target chat or username of the target channel :type chat_id: :obj:`typing.Union[base.Integer, base.String]` :param phone_number: Contact's phone number :type phone_number: :obj:`base.String` :param first_name: Contact's first name :type first_name: :obj:`base.String` :param last_name: Contact's last name :type last_name: :obj:`typing.Union[base.String, None]` :param vcard: vcard :type vcard: :obj:`typing.Union[base.String, None]` :param disable_notification: Sends the message silently. Users will receive a notification with no sound :type disable_notification: :obj:`typing.Union[base.Boolean, None]` :param reply_to_message_id: If the message is a reply, ID of the original message :type reply_to_message_id: :obj:`typing.Union[base.Integer, None]` :param reply_markup: Additional interface options :type reply_markup: :obj:`typing.Union[types.InlineKeyboardMarkup, types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply, None]` :return: On success, the sent Message is returned :rtype: :obj:`types.Message` """ reply_markup = prepare_arg(reply_markup) payload = generate_payload(**locals()) result = await self.request(api.Methods.SEND_CONTACT, payload) return types.Message(**result)
[ "async", "def", "send_contact", "(", "self", ",", "chat_id", ":", "typing", ".", "Union", "[", "base", ".", "Integer", ",", "base", ".", "String", "]", ",", "phone_number", ":", "base", ".", "String", ",", "first_name", ":", "base", ".", "String", ",", "last_name", ":", "typing", ".", "Union", "[", "base", ".", "String", ",", "None", "]", "=", "None", ",", "vcard", ":", "typing", ".", "Union", "[", "base", ".", "String", ",", "None", "]", "=", "None", ",", "disable_notification", ":", "typing", ".", "Union", "[", "base", ".", "Boolean", ",", "None", "]", "=", "None", ",", "reply_to_message_id", ":", "typing", ".", "Union", "[", "base", ".", "Integer", ",", "None", "]", "=", "None", ",", "reply_markup", ":", "typing", ".", "Union", "[", "types", ".", "InlineKeyboardMarkup", ",", "types", ".", "ReplyKeyboardMarkup", ",", "types", ".", "ReplyKeyboardRemove", ",", "types", ".", "ForceReply", ",", "None", "]", "=", "None", ")", "->", "types", ".", "Message", ":", "reply_markup", "=", "prepare_arg", "(", "reply_markup", ")", "payload", "=", "generate_payload", "(", "*", "*", "locals", "(", ")", ")", "result", "=", "await", "self", ".", "request", "(", "api", ".", "Methods", ".", "SEND_CONTACT", ",", "payload", ")", "return", "types", ".", "Message", "(", "*", "*", "result", ")" ]
Use this method to send phone contacts. Source: https://core.telegram.org/bots/api#sendcontact :param chat_id: Unique identifier for the target chat or username of the target channel :type chat_id: :obj:`typing.Union[base.Integer, base.String]` :param phone_number: Contact's phone number :type phone_number: :obj:`base.String` :param first_name: Contact's first name :type first_name: :obj:`base.String` :param last_name: Contact's last name :type last_name: :obj:`typing.Union[base.String, None]` :param vcard: vcard :type vcard: :obj:`typing.Union[base.String, None]` :param disable_notification: Sends the message silently. Users will receive a notification with no sound :type disable_notification: :obj:`typing.Union[base.Boolean, None]` :param reply_to_message_id: If the message is a reply, ID of the original message :type reply_to_message_id: :obj:`typing.Union[base.Integer, None]` :param reply_markup: Additional interface options :type reply_markup: :obj:`typing.Union[types.InlineKeyboardMarkup, types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply, None]` :return: On success, the sent Message is returned :rtype: :obj:`types.Message`
[ "Use", "this", "method", "to", "send", "phone", "contacts", "." ]
python
train
StanfordBioinformatics/loom
utils/loomengine_utils/connection.py
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/utils/loomengine_utils/connection.py#L108-L134
def _make_request_to_server(self, query_function, raise_for_status=True, time_limit_seconds=2, retry_delay_seconds=0.2): """Retry sending request until timeout or until receiving a response. """ start_time = datetime.datetime.now() while datetime.datetime.now() - start_time < datetime.timedelta( 0, time_limit_seconds): error = None response = None try: response = query_function() except requests.exceptions.ConnectionError as e: error = ServerConnectionError( "No response from server.\n%s" % e) except: if response: logger.info(response.text) raise if response is not None and raise_for_status: # raises requests.exceptions.HTTPError self._raise_for_status(response) if error: time.sleep(retry_delay_seconds) continue else: return response raise error
[ "def", "_make_request_to_server", "(", "self", ",", "query_function", ",", "raise_for_status", "=", "True", ",", "time_limit_seconds", "=", "2", ",", "retry_delay_seconds", "=", "0.2", ")", ":", "start_time", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "while", "datetime", ".", "datetime", ".", "now", "(", ")", "-", "start_time", "<", "datetime", ".", "timedelta", "(", "0", ",", "time_limit_seconds", ")", ":", "error", "=", "None", "response", "=", "None", "try", ":", "response", "=", "query_function", "(", ")", "except", "requests", ".", "exceptions", ".", "ConnectionError", "as", "e", ":", "error", "=", "ServerConnectionError", "(", "\"No response from server.\\n%s\"", "%", "e", ")", "except", ":", "if", "response", ":", "logger", ".", "info", "(", "response", ".", "text", ")", "raise", "if", "response", "is", "not", "None", "and", "raise_for_status", ":", "# raises requests.exceptions.HTTPError", "self", ".", "_raise_for_status", "(", "response", ")", "if", "error", ":", "time", ".", "sleep", "(", "retry_delay_seconds", ")", "continue", "else", ":", "return", "response", "raise", "error" ]
Retry sending request until timeout or until receiving a response.
[ "Retry", "sending", "request", "until", "timeout", "or", "until", "receiving", "a", "response", "." ]
python
train
nrcharles/caelum
caelum/forecast.py
https://github.com/nrcharles/caelum/blob/9a8e65806385978556d7bb2e6870f003ff82023e/caelum/forecast.py#L44-L55
def hourly(place): """return data as list of dicts with all data filled in.""" # time in utc? lat, lon = place url = "https://api.forecast.io/forecast/%s/%s,%s?solar" % (APIKEY, lat, lon) w_data = json.loads(urllib2.urlopen(url).read()) hourly_data = w_data['hourly']['data'] mangled = [] for i in hourly_data: mangled.append(mangle(i)) return mangled
[ "def", "hourly", "(", "place", ")", ":", "# time in utc?", "lat", ",", "lon", "=", "place", "url", "=", "\"https://api.forecast.io/forecast/%s/%s,%s?solar\"", "%", "(", "APIKEY", ",", "lat", ",", "lon", ")", "w_data", "=", "json", ".", "loads", "(", "urllib2", ".", "urlopen", "(", "url", ")", ".", "read", "(", ")", ")", "hourly_data", "=", "w_data", "[", "'hourly'", "]", "[", "'data'", "]", "mangled", "=", "[", "]", "for", "i", "in", "hourly_data", ":", "mangled", ".", "append", "(", "mangle", "(", "i", ")", ")", "return", "mangled" ]
return data as list of dicts with all data filled in.
[ "return", "data", "as", "list", "of", "dicts", "with", "all", "data", "filled", "in", "." ]
python
train
myint/rstcheck
rstcheck.py
https://github.com/myint/rstcheck/blob/2f975906b75f3b88d501ef3b13d213815cf7079a/rstcheck.py#L107-L117
def run(self): """Run directive.""" try: language = self.arguments[0] except IndexError: language = '' code = '\n'.join(self.content) literal = docutils.nodes.literal_block(code, code) literal['classes'].append('code-block') literal['language'] = language return [literal]
[ "def", "run", "(", "self", ")", ":", "try", ":", "language", "=", "self", ".", "arguments", "[", "0", "]", "except", "IndexError", ":", "language", "=", "''", "code", "=", "'\\n'", ".", "join", "(", "self", ".", "content", ")", "literal", "=", "docutils", ".", "nodes", ".", "literal_block", "(", "code", ",", "code", ")", "literal", "[", "'classes'", "]", ".", "append", "(", "'code-block'", ")", "literal", "[", "'language'", "]", "=", "language", "return", "[", "literal", "]" ]
Run directive.
[ "Run", "directive", "." ]
python
train
rambleraptor/amusement
amusement/parks/universal/UniversalPark.py
https://github.com/rambleraptor/amusement/blob/ec850035747a5b0549f9ea2ee4399bff035460be/amusement/parks/universal/UniversalPark.py#L34-L46
def _buildPark(self): token = self._get_token() ride_page = self._get_request(token, self.getUrl()) show_page = self._get_request(token, SHOW_URL) for ride in ride_page['Results']: if ride['VenueId'] == self.getId(): self._make_attraction(ride) """ for show in page['Shows']: if show['VenueId'] == self.getId(): self._make_show(show) """
[ "def", "_buildPark", "(", "self", ")", ":", "token", "=", "self", ".", "_get_token", "(", ")", "ride_page", "=", "self", ".", "_get_request", "(", "token", ",", "self", ".", "getUrl", "(", ")", ")", "show_page", "=", "self", ".", "_get_request", "(", "token", ",", "SHOW_URL", ")", "for", "ride", "in", "ride_page", "[", "'Results'", "]", ":", "if", "ride", "[", "'VenueId'", "]", "==", "self", ".", "getId", "(", ")", ":", "self", ".", "_make_attraction", "(", "ride", ")" ]
for show in page['Shows']: if show['VenueId'] == self.getId(): self._make_show(show)
[ "for", "show", "in", "page", "[", "Shows", "]", ":", "if", "show", "[", "VenueId", "]", "==", "self", ".", "getId", "()", ":", "self", ".", "_make_show", "(", "show", ")" ]
python
train
DarkEnergySurvey/ugali
ugali/utils/stats.py
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/stats.py#L286-L292
def mean_interval(self, name, alpha=_alpha, **kwargs): """ Interval assuming gaussian posterior. """ data = self.get(name,**kwargs) #return ugali.utils.stats.mean_interval(data,alpha) return mean_interval(data,alpha)
[ "def", "mean_interval", "(", "self", ",", "name", ",", "alpha", "=", "_alpha", ",", "*", "*", "kwargs", ")", ":", "data", "=", "self", ".", "get", "(", "name", ",", "*", "*", "kwargs", ")", "#return ugali.utils.stats.mean_interval(data,alpha)", "return", "mean_interval", "(", "data", ",", "alpha", ")" ]
Interval assuming gaussian posterior.
[ "Interval", "assuming", "gaussian", "posterior", "." ]
python
train
StackStorm/pybind
pybind/nos/v7_2_0/event_handler/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v7_2_0/event_handler/__init__.py#L94-L115
def _set_event_handler_list(self, v, load=False): """ Setter method for event_handler_list, mapped from YANG variable /event_handler/event_handler_list (list) If this variable is read-only (config: false) in the source YANG file, then _set_event_handler_list is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_event_handler_list() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("name",event_handler_list.event_handler_list, yang_name="event-handler-list", rest_name="event-handler-list", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'Event handler configuration', u'cli-no-key-completion': None, u'cli-suppress-list-no': None, u'cli-drop-node-name': None, u'cli-suppress-key-abbreviation': None, u'cli-no-match-completion': None}}), is_container='list', yang_name="event-handler-list", rest_name="event-handler-list", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Event handler configuration', u'cli-no-key-completion': None, u'cli-suppress-list-no': None, u'cli-drop-node-name': None, u'cli-suppress-key-abbreviation': None, u'cli-no-match-completion': None}}, namespace='urn:brocade.com:mgmt:brocade-event-handler', defining_module='brocade-event-handler', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """event_handler_list must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("name",event_handler_list.event_handler_list, yang_name="event-handler-list", rest_name="event-handler-list", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'Event handler configuration', u'cli-no-key-completion': None, u'cli-suppress-list-no': None, u'cli-drop-node-name': None, u'cli-suppress-key-abbreviation': None, u'cli-no-match-completion': None}}), is_container='list', yang_name="event-handler-list", rest_name="event-handler-list", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Event handler configuration', u'cli-no-key-completion': None, u'cli-suppress-list-no': None, u'cli-drop-node-name': None, u'cli-suppress-key-abbreviation': None, u'cli-no-match-completion': None}}, namespace='urn:brocade.com:mgmt:brocade-event-handler', defining_module='brocade-event-handler', yang_type='list', is_config=True)""", }) self.__event_handler_list = t if hasattr(self, '_set'): self._set()
[ "def", "_set_event_handler_list", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "YANGListType", "(", "\"name\"", ",", "event_handler_list", ".", "event_handler_list", ",", "yang_name", "=", "\"event-handler-list\"", ",", "rest_name", "=", "\"event-handler-list\"", ",", "parent", "=", "self", ",", "is_container", "=", "'list'", ",", "user_ordered", "=", "False", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "yang_keys", "=", "'name'", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Event handler configuration'", ",", "u'cli-no-key-completion'", ":", "None", ",", "u'cli-suppress-list-no'", ":", "None", ",", "u'cli-drop-node-name'", ":", "None", ",", "u'cli-suppress-key-abbreviation'", ":", "None", ",", "u'cli-no-match-completion'", ":", "None", "}", "}", ")", ",", "is_container", "=", "'list'", ",", "yang_name", "=", "\"event-handler-list\"", ",", "rest_name", "=", "\"event-handler-list\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Event handler configuration'", ",", "u'cli-no-key-completion'", ":", "None", ",", "u'cli-suppress-list-no'", ":", "None", ",", "u'cli-drop-node-name'", ":", "None", ",", "u'cli-suppress-key-abbreviation'", ":", "None", ",", "u'cli-no-match-completion'", ":", "None", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-event-handler'", ",", "defining_module", "=", "'brocade-event-handler'", ",", "yang_type", "=", "'list'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"event_handler_list must be of a type compatible with list\"\"\"", ",", "'defined-type'", ":", "\"list\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=YANGListType(\"name\",event_handler_list.event_handler_list, yang_name=\"event-handler-list\", rest_name=\"event-handler-list\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'Event handler configuration', u'cli-no-key-completion': None, u'cli-suppress-list-no': None, u'cli-drop-node-name': None, u'cli-suppress-key-abbreviation': None, u'cli-no-match-completion': None}}), is_container='list', yang_name=\"event-handler-list\", rest_name=\"event-handler-list\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Event handler configuration', u'cli-no-key-completion': None, u'cli-suppress-list-no': None, u'cli-drop-node-name': None, u'cli-suppress-key-abbreviation': None, u'cli-no-match-completion': None}}, namespace='urn:brocade.com:mgmt:brocade-event-handler', defining_module='brocade-event-handler', yang_type='list', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__event_handler_list", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for event_handler_list, mapped from YANG variable /event_handler/event_handler_list (list) If this variable is read-only (config: false) in the source YANG file, then _set_event_handler_list is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_event_handler_list() directly.
[ "Setter", "method", "for", "event_handler_list", "mapped", "from", "YANG", "variable", "/", "event_handler", "/", "event_handler_list", "(", "list", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_event_handler_list", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_event_handler_list", "()", "directly", "." ]
python
train
ActivisionGameScience/assertpy
assertpy/assertpy.py
https://github.com/ActivisionGameScience/assertpy/blob/08d799cdb01f9a25d3e20672efac991c7bc26d79/assertpy/assertpy.py#L231-L244
def is_instance_of(self, some_class): """Asserts that val is an instance of the given class.""" try: if not isinstance(self.val, some_class): if hasattr(self.val, '__name__'): t = self.val.__name__ elif hasattr(self.val, '__class__'): t = self.val.__class__.__name__ else: t = 'unknown' self._err('Expected <%s:%s> to be instance of class <%s>, but was not.' % (self.val, t, some_class.__name__)) except TypeError: raise TypeError('given arg must be a class') return self
[ "def", "is_instance_of", "(", "self", ",", "some_class", ")", ":", "try", ":", "if", "not", "isinstance", "(", "self", ".", "val", ",", "some_class", ")", ":", "if", "hasattr", "(", "self", ".", "val", ",", "'__name__'", ")", ":", "t", "=", "self", ".", "val", ".", "__name__", "elif", "hasattr", "(", "self", ".", "val", ",", "'__class__'", ")", ":", "t", "=", "self", ".", "val", ".", "__class__", ".", "__name__", "else", ":", "t", "=", "'unknown'", "self", ".", "_err", "(", "'Expected <%s:%s> to be instance of class <%s>, but was not.'", "%", "(", "self", ".", "val", ",", "t", ",", "some_class", ".", "__name__", ")", ")", "except", "TypeError", ":", "raise", "TypeError", "(", "'given arg must be a class'", ")", "return", "self" ]
Asserts that val is an instance of the given class.
[ "Asserts", "that", "val", "is", "an", "instance", "of", "the", "given", "class", "." ]
python
valid
reingart/pyafipws
wslum.py
https://github.com/reingart/pyafipws/blob/ee87cfe4ac12285ab431df5fec257f103042d1ab/wslum.py#L544-L574
def ConsultarBonificacionesPenalizaciones(self, sep="||"): "Retorna un listado de bonificaciones/penalizaciones con código y descripción" ret = self.client.consultarBonificacionesPenalizaciones( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, )['respuesta'] self.__analizar_errores(ret) self.XmlResponse = self.client.xml_response array = ret.get('tipo', []) if sep is None: # sin separador, devuelve un diccionario con clave cod_variadedad # y valor: {"descripcion": ds_variedad, "clases": lista_clases} # siendo lista_clases = [{'codigo': ..., 'descripcion': ...}] return dict([(it['codigo'], {'descripcion': it['descripcion'], 'subtipo': it['subtipo']}) for it in array]) else: # con separador, devuelve una lista de strings: # || cod.variedad || desc.variedad || desc.clase || cod.clase || ret = [] for it in array: for subtipo in it['subtipo']: ret.append( ("%s %%s %s %%s %s %%s %s %%s %s %%s %s %%s %s" % (sep, sep, sep, sep, sep, sep, sep)) % (it['codigo'], it['descripcion'], subtipo['descripcion'], subtipo['codigo'], subtipo['valor'], subtipo['signo']) ) return ret
[ "def", "ConsultarBonificacionesPenalizaciones", "(", "self", ",", "sep", "=", "\"||\"", ")", ":", "ret", "=", "self", ".", "client", ".", "consultarBonificacionesPenalizaciones", "(", "auth", "=", "{", "'token'", ":", "self", ".", "Token", ",", "'sign'", ":", "self", ".", "Sign", ",", "'cuit'", ":", "self", ".", "Cuit", ",", "}", ",", ")", "[", "'respuesta'", "]", "self", ".", "__analizar_errores", "(", "ret", ")", "self", ".", "XmlResponse", "=", "self", ".", "client", ".", "xml_response", "array", "=", "ret", ".", "get", "(", "'tipo'", ",", "[", "]", ")", "if", "sep", "is", "None", ":", "# sin separador, devuelve un diccionario con clave cod_variadedad", "# y valor: {\"descripcion\": ds_variedad, \"clases\": lista_clases}", "# siendo lista_clases = [{'codigo': ..., 'descripcion': ...}]", "return", "dict", "(", "[", "(", "it", "[", "'codigo'", "]", ",", "{", "'descripcion'", ":", "it", "[", "'descripcion'", "]", ",", "'subtipo'", ":", "it", "[", "'subtipo'", "]", "}", ")", "for", "it", "in", "array", "]", ")", "else", ":", "# con separador, devuelve una lista de strings:", "# || cod.variedad || desc.variedad || desc.clase || cod.clase ||", "ret", "=", "[", "]", "for", "it", "in", "array", ":", "for", "subtipo", "in", "it", "[", "'subtipo'", "]", ":", "ret", ".", "append", "(", "(", "\"%s %%s %s %%s %s %%s %s %%s %s %%s %s %%s %s\"", "%", "(", "sep", ",", "sep", ",", "sep", ",", "sep", ",", "sep", ",", "sep", ",", "sep", ")", ")", "%", "(", "it", "[", "'codigo'", "]", ",", "it", "[", "'descripcion'", "]", ",", "subtipo", "[", "'descripcion'", "]", ",", "subtipo", "[", "'codigo'", "]", ",", "subtipo", "[", "'valor'", "]", ",", "subtipo", "[", "'signo'", "]", ")", ")", "return", "ret" ]
Retorna un listado de bonificaciones/penalizaciones con código y descripción
[ "Retorna", "un", "listado", "de", "bonificaciones", "/", "penalizaciones", "con", "código", "y", "descripción" ]
python
train
goose3/goose3
goose3/extractors/videos.py
https://github.com/goose3/goose3/blob/e6994b1b1826af2720a091d1bff5ca15594f558d/goose3/extractors/videos.py#L67-L78
def get_video(self, node): """ Create a video object from a video embed """ video = Video() video._embed_code = self.get_embed_code(node) video._embed_type = self.get_embed_type(node) video._width = self.get_width(node) video._height = self.get_height(node) video._src = self.get_src(node) video._provider = self.get_provider(video.src) return video
[ "def", "get_video", "(", "self", ",", "node", ")", ":", "video", "=", "Video", "(", ")", "video", ".", "_embed_code", "=", "self", ".", "get_embed_code", "(", "node", ")", "video", ".", "_embed_type", "=", "self", ".", "get_embed_type", "(", "node", ")", "video", ".", "_width", "=", "self", ".", "get_width", "(", "node", ")", "video", ".", "_height", "=", "self", ".", "get_height", "(", "node", ")", "video", ".", "_src", "=", "self", ".", "get_src", "(", "node", ")", "video", ".", "_provider", "=", "self", ".", "get_provider", "(", "video", ".", "src", ")", "return", "video" ]
Create a video object from a video embed
[ "Create", "a", "video", "object", "from", "a", "video", "embed" ]
python
valid
pandas-dev/pandas
pandas/core/sorting.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/sorting.py#L406-L507
def safe_sort(values, labels=None, na_sentinel=-1, assume_unique=False): """ Sort ``values`` and reorder corresponding ``labels``. ``values`` should be unique if ``labels`` is not None. Safe for use with mixed types (int, str), orders ints before strs. .. versionadded:: 0.19.0 Parameters ---------- values : list-like Sequence; must be unique if ``labels`` is not None. labels : list_like Indices to ``values``. All out of bound indices are treated as "not found" and will be masked with ``na_sentinel``. na_sentinel : int, default -1 Value in ``labels`` to mark "not found". Ignored when ``labels`` is None. assume_unique : bool, default False When True, ``values`` are assumed to be unique, which can speed up the calculation. Ignored when ``labels`` is None. Returns ------- ordered : ndarray Sorted ``values`` new_labels : ndarray Reordered ``labels``; returned when ``labels`` is not None. Raises ------ TypeError * If ``values`` is not list-like or if ``labels`` is neither None nor list-like * If ``values`` cannot be sorted ValueError * If ``labels`` is not None and ``values`` contain duplicates. """ if not is_list_like(values): raise TypeError("Only list-like objects are allowed to be passed to" "safe_sort as values") if not isinstance(values, np.ndarray): # don't convert to string types dtype, _ = infer_dtype_from_array(values) values = np.asarray(values, dtype=dtype) def sort_mixed(values): # order ints before strings, safe in py3 str_pos = np.array([isinstance(x, str) for x in values], dtype=bool) nums = np.sort(values[~str_pos]) strs = np.sort(values[str_pos]) return np.concatenate([nums, np.asarray(strs, dtype=object)]) sorter = None if lib.infer_dtype(values, skipna=False) == 'mixed-integer': # unorderable in py3 if mixed str/int ordered = sort_mixed(values) else: try: sorter = values.argsort() ordered = values.take(sorter) except TypeError: # try this anyway ordered = sort_mixed(values) # labels: if labels is None: return ordered if not is_list_like(labels): raise TypeError("Only list-like objects or None are allowed to be" "passed to safe_sort as labels") labels = ensure_platform_int(np.asarray(labels)) from pandas import Index if not assume_unique and not Index(values).is_unique: raise ValueError("values should be unique if labels is not None") if sorter is None: # mixed types (hash_klass, _), values = algorithms._get_data_algo( values, algorithms._hashtables) t = hash_klass(len(values)) t.map_locations(values) sorter = ensure_platform_int(t.lookup(ordered)) reverse_indexer = np.empty(len(sorter), dtype=np.int_) reverse_indexer.put(sorter, np.arange(len(sorter))) mask = (labels < -len(values)) | (labels >= len(values)) | \ (labels == na_sentinel) # (Out of bound indices will be masked with `na_sentinel` next, so we may # deal with them here without performance loss using `mode='wrap'`.) new_labels = reverse_indexer.take(labels, mode='wrap') np.putmask(new_labels, mask, na_sentinel) return ordered, ensure_platform_int(new_labels)
[ "def", "safe_sort", "(", "values", ",", "labels", "=", "None", ",", "na_sentinel", "=", "-", "1", ",", "assume_unique", "=", "False", ")", ":", "if", "not", "is_list_like", "(", "values", ")", ":", "raise", "TypeError", "(", "\"Only list-like objects are allowed to be passed to\"", "\"safe_sort as values\"", ")", "if", "not", "isinstance", "(", "values", ",", "np", ".", "ndarray", ")", ":", "# don't convert to string types", "dtype", ",", "_", "=", "infer_dtype_from_array", "(", "values", ")", "values", "=", "np", ".", "asarray", "(", "values", ",", "dtype", "=", "dtype", ")", "def", "sort_mixed", "(", "values", ")", ":", "# order ints before strings, safe in py3", "str_pos", "=", "np", ".", "array", "(", "[", "isinstance", "(", "x", ",", "str", ")", "for", "x", "in", "values", "]", ",", "dtype", "=", "bool", ")", "nums", "=", "np", ".", "sort", "(", "values", "[", "~", "str_pos", "]", ")", "strs", "=", "np", ".", "sort", "(", "values", "[", "str_pos", "]", ")", "return", "np", ".", "concatenate", "(", "[", "nums", ",", "np", ".", "asarray", "(", "strs", ",", "dtype", "=", "object", ")", "]", ")", "sorter", "=", "None", "if", "lib", ".", "infer_dtype", "(", "values", ",", "skipna", "=", "False", ")", "==", "'mixed-integer'", ":", "# unorderable in py3 if mixed str/int", "ordered", "=", "sort_mixed", "(", "values", ")", "else", ":", "try", ":", "sorter", "=", "values", ".", "argsort", "(", ")", "ordered", "=", "values", ".", "take", "(", "sorter", ")", "except", "TypeError", ":", "# try this anyway", "ordered", "=", "sort_mixed", "(", "values", ")", "# labels:", "if", "labels", "is", "None", ":", "return", "ordered", "if", "not", "is_list_like", "(", "labels", ")", ":", "raise", "TypeError", "(", "\"Only list-like objects or None are allowed to be\"", "\"passed to safe_sort as labels\"", ")", "labels", "=", "ensure_platform_int", "(", "np", ".", "asarray", "(", "labels", ")", ")", "from", "pandas", "import", "Index", "if", "not", "assume_unique", "and", "not", "Index", "(", "values", ")", ".", "is_unique", ":", "raise", "ValueError", "(", "\"values should be unique if labels is not None\"", ")", "if", "sorter", "is", "None", ":", "# mixed types", "(", "hash_klass", ",", "_", ")", ",", "values", "=", "algorithms", ".", "_get_data_algo", "(", "values", ",", "algorithms", ".", "_hashtables", ")", "t", "=", "hash_klass", "(", "len", "(", "values", ")", ")", "t", ".", "map_locations", "(", "values", ")", "sorter", "=", "ensure_platform_int", "(", "t", ".", "lookup", "(", "ordered", ")", ")", "reverse_indexer", "=", "np", ".", "empty", "(", "len", "(", "sorter", ")", ",", "dtype", "=", "np", ".", "int_", ")", "reverse_indexer", ".", "put", "(", "sorter", ",", "np", ".", "arange", "(", "len", "(", "sorter", ")", ")", ")", "mask", "=", "(", "labels", "<", "-", "len", "(", "values", ")", ")", "|", "(", "labels", ">=", "len", "(", "values", ")", ")", "|", "(", "labels", "==", "na_sentinel", ")", "# (Out of bound indices will be masked with `na_sentinel` next, so we may", "# deal with them here without performance loss using `mode='wrap'`.)", "new_labels", "=", "reverse_indexer", ".", "take", "(", "labels", ",", "mode", "=", "'wrap'", ")", "np", ".", "putmask", "(", "new_labels", ",", "mask", ",", "na_sentinel", ")", "return", "ordered", ",", "ensure_platform_int", "(", "new_labels", ")" ]
Sort ``values`` and reorder corresponding ``labels``. ``values`` should be unique if ``labels`` is not None. Safe for use with mixed types (int, str), orders ints before strs. .. versionadded:: 0.19.0 Parameters ---------- values : list-like Sequence; must be unique if ``labels`` is not None. labels : list_like Indices to ``values``. All out of bound indices are treated as "not found" and will be masked with ``na_sentinel``. na_sentinel : int, default -1 Value in ``labels`` to mark "not found". Ignored when ``labels`` is None. assume_unique : bool, default False When True, ``values`` are assumed to be unique, which can speed up the calculation. Ignored when ``labels`` is None. Returns ------- ordered : ndarray Sorted ``values`` new_labels : ndarray Reordered ``labels``; returned when ``labels`` is not None. Raises ------ TypeError * If ``values`` is not list-like or if ``labels`` is neither None nor list-like * If ``values`` cannot be sorted ValueError * If ``labels`` is not None and ``values`` contain duplicates.
[ "Sort", "values", "and", "reorder", "corresponding", "labels", ".", "values", "should", "be", "unique", "if", "labels", "is", "not", "None", ".", "Safe", "for", "use", "with", "mixed", "types", "(", "int", "str", ")", "orders", "ints", "before", "strs", "." ]
python
train
rackerlabs/rackspace-python-neutronclient
neutronclient/v2_0/client.py
https://github.com/rackerlabs/rackspace-python-neutronclient/blob/5a5009a8fe078e3aa1d582176669f1b28ab26bef/neutronclient/v2_0/client.py#L1598-L1601
def update_bandwidth_limit_rule(self, rule, policy, body=None): """Updates a bandwidth limit rule.""" return self.put(self.qos_bandwidth_limit_rule_path % (policy, rule), body=body)
[ "def", "update_bandwidth_limit_rule", "(", "self", ",", "rule", ",", "policy", ",", "body", "=", "None", ")", ":", "return", "self", ".", "put", "(", "self", ".", "qos_bandwidth_limit_rule_path", "%", "(", "policy", ",", "rule", ")", ",", "body", "=", "body", ")" ]
Updates a bandwidth limit rule.
[ "Updates", "a", "bandwidth", "limit", "rule", "." ]
python
train
ray-project/ray
python/ray/tune/ray_trial_executor.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/ray_trial_executor.py#L188-L221
def start_trial(self, trial, checkpoint=None): """Starts the trial. Will not return resources if trial repeatedly fails on start. Args: trial (Trial): Trial to be started. checkpoint (Checkpoint): A Python object or path storing the state of trial. """ self._commit_resources(trial.resources) try: self._start_trial(trial, checkpoint) except Exception as e: logger.exception("Error starting runner for Trial %s", str(trial)) error_msg = traceback.format_exc() time.sleep(2) self._stop_trial(trial, error=True, error_msg=error_msg) if isinstance(e, AbortTrialExecution): return # don't retry fatal Tune errors try: # This forces the trial to not start from checkpoint. trial.clear_checkpoint() logger.info( "Trying to start runner for Trial %s without checkpoint.", str(trial)) self._start_trial(trial) except Exception: logger.exception( "Error starting runner for Trial %s, aborting!", str(trial)) error_msg = traceback.format_exc() self._stop_trial(trial, error=True, error_msg=error_msg)
[ "def", "start_trial", "(", "self", ",", "trial", ",", "checkpoint", "=", "None", ")", ":", "self", ".", "_commit_resources", "(", "trial", ".", "resources", ")", "try", ":", "self", ".", "_start_trial", "(", "trial", ",", "checkpoint", ")", "except", "Exception", "as", "e", ":", "logger", ".", "exception", "(", "\"Error starting runner for Trial %s\"", ",", "str", "(", "trial", ")", ")", "error_msg", "=", "traceback", ".", "format_exc", "(", ")", "time", ".", "sleep", "(", "2", ")", "self", ".", "_stop_trial", "(", "trial", ",", "error", "=", "True", ",", "error_msg", "=", "error_msg", ")", "if", "isinstance", "(", "e", ",", "AbortTrialExecution", ")", ":", "return", "# don't retry fatal Tune errors", "try", ":", "# This forces the trial to not start from checkpoint.", "trial", ".", "clear_checkpoint", "(", ")", "logger", ".", "info", "(", "\"Trying to start runner for Trial %s without checkpoint.\"", ",", "str", "(", "trial", ")", ")", "self", ".", "_start_trial", "(", "trial", ")", "except", "Exception", ":", "logger", ".", "exception", "(", "\"Error starting runner for Trial %s, aborting!\"", ",", "str", "(", "trial", ")", ")", "error_msg", "=", "traceback", ".", "format_exc", "(", ")", "self", ".", "_stop_trial", "(", "trial", ",", "error", "=", "True", ",", "error_msg", "=", "error_msg", ")" ]
Starts the trial. Will not return resources if trial repeatedly fails on start. Args: trial (Trial): Trial to be started. checkpoint (Checkpoint): A Python object or path storing the state of trial.
[ "Starts", "the", "trial", "." ]
python
train
gagneurlab/concise
concise/layers.py
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/layers.py#L73-L78
def InputSplines(seq_length, n_bases=10, name=None, **kwargs): """Input placeholder for array returned by `encodeSplines` Wrapper for: `keras.layers.Input((seq_length, n_bases), name=name, **kwargs)` """ return Input((seq_length, n_bases), name=name, **kwargs)
[ "def", "InputSplines", "(", "seq_length", ",", "n_bases", "=", "10", ",", "name", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "Input", "(", "(", "seq_length", ",", "n_bases", ")", ",", "name", "=", "name", ",", "*", "*", "kwargs", ")" ]
Input placeholder for array returned by `encodeSplines` Wrapper for: `keras.layers.Input((seq_length, n_bases), name=name, **kwargs)`
[ "Input", "placeholder", "for", "array", "returned", "by", "encodeSplines" ]
python
train
atlassian-api/atlassian-python-api
atlassian/confluence.py
https://github.com/atlassian-api/atlassian-python-api/blob/540d269905c3e7547b666fe30c647b2d512cf358/atlassian/confluence.py#L330-L372
def attach_file(self, filename, page_id=None, title=None, space=None, comment=None): """ Attach (upload) a file to a page, if it exists it will update the automatically version the new file and keep the old one. :param title: The page name :type title: ``str`` :param space: The space name :type space: ``str`` :param page_id: The page id to which we would like to upload the file :type page_id: ``str`` :param filename: The file to upload :type filename: ``str`` :param comment: A comment describing this upload/file :type comment: ``str`` """ page_id = self.get_page_id(space=space, title=title) if page_id is None else page_id type = 'attachment' if page_id is not None: extension = os.path.splitext(filename)[-1] content_type = self.content_types.get(extension, "application/binary") comment = comment if comment else "Uploaded {filename}.".format(filename=filename) data = { 'type': type, "fileName": filename, "contentType": content_type, "comment": comment, "minorEdit": "true"} headers = { 'X-Atlassian-Token': 'nocheck', 'Accept': 'application/json'} path = 'rest/api/content/{page_id}/child/attachment'.format(page_id=page_id) # get base name of the file to get the attachment from confluence. file_base_name = os.path.basename(filename) # Check if there is already a file with the same name attachments = self.get(path=path, headers=headers, params={'filename': file_base_name}) if attachments['size']: path = path + '/' + attachments['results'][0]['id'] + '/data' with open(filename, 'rb') as infile: return self.post(path=path, data=data, headers=headers, files={'file': (filename, infile, content_type)}) else: log.warning("No 'page_id' found, not uploading attachments") return None
[ "def", "attach_file", "(", "self", ",", "filename", ",", "page_id", "=", "None", ",", "title", "=", "None", ",", "space", "=", "None", ",", "comment", "=", "None", ")", ":", "page_id", "=", "self", ".", "get_page_id", "(", "space", "=", "space", ",", "title", "=", "title", ")", "if", "page_id", "is", "None", "else", "page_id", "type", "=", "'attachment'", "if", "page_id", "is", "not", "None", ":", "extension", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "[", "-", "1", "]", "content_type", "=", "self", ".", "content_types", ".", "get", "(", "extension", ",", "\"application/binary\"", ")", "comment", "=", "comment", "if", "comment", "else", "\"Uploaded {filename}.\"", ".", "format", "(", "filename", "=", "filename", ")", "data", "=", "{", "'type'", ":", "type", ",", "\"fileName\"", ":", "filename", ",", "\"contentType\"", ":", "content_type", ",", "\"comment\"", ":", "comment", ",", "\"minorEdit\"", ":", "\"true\"", "}", "headers", "=", "{", "'X-Atlassian-Token'", ":", "'nocheck'", ",", "'Accept'", ":", "'application/json'", "}", "path", "=", "'rest/api/content/{page_id}/child/attachment'", ".", "format", "(", "page_id", "=", "page_id", ")", "# get base name of the file to get the attachment from confluence.", "file_base_name", "=", "os", ".", "path", ".", "basename", "(", "filename", ")", "# Check if there is already a file with the same name", "attachments", "=", "self", ".", "get", "(", "path", "=", "path", ",", "headers", "=", "headers", ",", "params", "=", "{", "'filename'", ":", "file_base_name", "}", ")", "if", "attachments", "[", "'size'", "]", ":", "path", "=", "path", "+", "'/'", "+", "attachments", "[", "'results'", "]", "[", "0", "]", "[", "'id'", "]", "+", "'/data'", "with", "open", "(", "filename", ",", "'rb'", ")", "as", "infile", ":", "return", "self", ".", "post", "(", "path", "=", "path", ",", "data", "=", "data", ",", "headers", "=", "headers", ",", "files", "=", "{", "'file'", ":", "(", "filename", ",", "infile", ",", "content_type", ")", "}", ")", "else", ":", "log", ".", "warning", "(", "\"No 'page_id' found, not uploading attachments\"", ")", "return", "None" ]
Attach (upload) a file to a page, if it exists it will update the automatically version the new file and keep the old one. :param title: The page name :type title: ``str`` :param space: The space name :type space: ``str`` :param page_id: The page id to which we would like to upload the file :type page_id: ``str`` :param filename: The file to upload :type filename: ``str`` :param comment: A comment describing this upload/file :type comment: ``str``
[ "Attach", "(", "upload", ")", "a", "file", "to", "a", "page", "if", "it", "exists", "it", "will", "update", "the", "automatically", "version", "the", "new", "file", "and", "keep", "the", "old", "one", ".", ":", "param", "title", ":", "The", "page", "name", ":", "type", "title", ":", "str", ":", "param", "space", ":", "The", "space", "name", ":", "type", "space", ":", "str", ":", "param", "page_id", ":", "The", "page", "id", "to", "which", "we", "would", "like", "to", "upload", "the", "file", ":", "type", "page_id", ":", "str", ":", "param", "filename", ":", "The", "file", "to", "upload", ":", "type", "filename", ":", "str", ":", "param", "comment", ":", "A", "comment", "describing", "this", "upload", "/", "file", ":", "type", "comment", ":", "str" ]
python
train