repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
tanghaibao/jcvi
jcvi/formats/bed.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/bed.py#L1274-L1375
def bins(args): """ %prog bins bedfile fastafile Bin bed lengths into each consecutive window. Use --subtract to remove bases from window, e.g. --subtract gaps.bed ignores the gap sequences. """ from jcvi.formats.sizes import Sizes p = OptionParser(bins.__doc__) p.add_option("--binsize", default=100000, type="int", help="Size of the bins [default: %default]") p.add_option("--subtract", help="Subtract bases from window [default: %default]") p.add_option("--mode", default="span", choices=("span", "count", "score"), help="Accumulate feature based on [default: %default]") p.add_option("--nomerge", default=False, action="store_true", help="Do not merge features") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) bedfile, fastafile = args subtract = opts.subtract mode = opts.mode assert op.exists(bedfile), "File `{0}` not found".format(bedfile) binsize = opts.binsize binfile = bedfile + ".{0}".format(binsize) binfile += ".{0}.bins".format(mode) if not need_update(bedfile, binfile): return binfile sz = Sizes(fastafile) sizesfile = sz.filename sizes = sz.mapping fw = open(binfile, "w") scores = "median" if mode == "score" else None if not opts.nomerge: bedfile = mergeBed(bedfile, nms=True, scores=scores) if subtract: subtractmerge = mergeBed(subtract) subtract_complement = complementBed(subtractmerge, sizesfile) bedfile = intersectBed(bedfile, subtract_complement) bedfile = sort([bedfile, "-i"]) bed = Bed(bedfile) sbdict = dict(bed.sub_beds()) for chr, chr_len in sorted(sizes.items()): chr_len = sizes[chr] subbeds = sbdict.get(chr, []) nbins = chr_len / binsize last_bin = chr_len % binsize if last_bin: nbins += 1 a = np.zeros(nbins) # values b = np.zeros(nbins, dtype="int") # bases c = np.zeros(nbins, dtype="int") # count b[:-1] = binsize b[-1] = last_bin for bb in subbeds: start, end = bb.start, bb.end startbin = start / binsize endbin = end / binsize assert startbin <= endbin c[startbin:endbin + 1] += 1 if mode == "score": a[startbin:endbin + 1] += float(bb.score) elif mode == "span": if startbin == endbin: a[startbin] += end - start + 1 if startbin < endbin: firstsize = (startbin + 1) * binsize - start + 1 lastsize = end - endbin * binsize a[startbin] += firstsize if startbin + 1 < endbin: a[startbin + 1:endbin] += binsize a[endbin] += lastsize if mode == "count": a = c for xa, xb in zip(a, b): print("\t".join(str(x) for x in (chr, xa, xb)), file=fw) fw.close() if subtract: subtractbinfile = bins([subtract, fastafile, "--binsize={0}".format(binsize)]) binfile = subtractbins(binfile, subtractbinfile) return binfile
[ "def", "bins", "(", "args", ")", ":", "from", "jcvi", ".", "formats", ".", "sizes", "import", "Sizes", "p", "=", "OptionParser", "(", "bins", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--binsize\"", ",", "default", "=", "100000", ",", "type", "=", "\"int\"", ",", "help", "=", "\"Size of the bins [default: %default]\"", ")", "p", ".", "add_option", "(", "\"--subtract\"", ",", "help", "=", "\"Subtract bases from window [default: %default]\"", ")", "p", ".", "add_option", "(", "\"--mode\"", ",", "default", "=", "\"span\"", ",", "choices", "=", "(", "\"span\"", ",", "\"count\"", ",", "\"score\"", ")", ",", "help", "=", "\"Accumulate feature based on [default: %default]\"", ")", "p", ".", "add_option", "(", "\"--nomerge\"", ",", "default", "=", "False", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Do not merge features\"", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "2", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "bedfile", ",", "fastafile", "=", "args", "subtract", "=", "opts", ".", "subtract", "mode", "=", "opts", ".", "mode", "assert", "op", ".", "exists", "(", "bedfile", ")", ",", "\"File `{0}` not found\"", ".", "format", "(", "bedfile", ")", "binsize", "=", "opts", ".", "binsize", "binfile", "=", "bedfile", "+", "\".{0}\"", ".", "format", "(", "binsize", ")", "binfile", "+=", "\".{0}.bins\"", ".", "format", "(", "mode", ")", "if", "not", "need_update", "(", "bedfile", ",", "binfile", ")", ":", "return", "binfile", "sz", "=", "Sizes", "(", "fastafile", ")", "sizesfile", "=", "sz", ".", "filename", "sizes", "=", "sz", ".", "mapping", "fw", "=", "open", "(", "binfile", ",", "\"w\"", ")", "scores", "=", "\"median\"", "if", "mode", "==", "\"score\"", "else", "None", "if", "not", "opts", ".", "nomerge", ":", "bedfile", "=", "mergeBed", "(", "bedfile", ",", "nms", "=", "True", ",", "scores", "=", "scores", ")", "if", "subtract", ":", "subtractmerge", "=", "mergeBed", "(", "subtract", ")", "subtract_complement", "=", "complementBed", "(", "subtractmerge", ",", "sizesfile", ")", "bedfile", "=", "intersectBed", "(", "bedfile", ",", "subtract_complement", ")", "bedfile", "=", "sort", "(", "[", "bedfile", ",", "\"-i\"", "]", ")", "bed", "=", "Bed", "(", "bedfile", ")", "sbdict", "=", "dict", "(", "bed", ".", "sub_beds", "(", ")", ")", "for", "chr", ",", "chr_len", "in", "sorted", "(", "sizes", ".", "items", "(", ")", ")", ":", "chr_len", "=", "sizes", "[", "chr", "]", "subbeds", "=", "sbdict", ".", "get", "(", "chr", ",", "[", "]", ")", "nbins", "=", "chr_len", "/", "binsize", "last_bin", "=", "chr_len", "%", "binsize", "if", "last_bin", ":", "nbins", "+=", "1", "a", "=", "np", ".", "zeros", "(", "nbins", ")", "# values", "b", "=", "np", ".", "zeros", "(", "nbins", ",", "dtype", "=", "\"int\"", ")", "# bases", "c", "=", "np", ".", "zeros", "(", "nbins", ",", "dtype", "=", "\"int\"", ")", "# count", "b", "[", ":", "-", "1", "]", "=", "binsize", "b", "[", "-", "1", "]", "=", "last_bin", "for", "bb", "in", "subbeds", ":", "start", ",", "end", "=", "bb", ".", "start", ",", "bb", ".", "end", "startbin", "=", "start", "/", "binsize", "endbin", "=", "end", "/", "binsize", "assert", "startbin", "<=", "endbin", "c", "[", "startbin", ":", "endbin", "+", "1", "]", "+=", "1", "if", "mode", "==", "\"score\"", ":", "a", "[", "startbin", ":", "endbin", "+", "1", "]", "+=", "float", "(", "bb", ".", "score", ")", "elif", "mode", "==", "\"span\"", ":", "if", "startbin", "==", "endbin", ":", "a", "[", "startbin", "]", "+=", "end", "-", "start", "+", "1", "if", "startbin", "<", "endbin", ":", "firstsize", "=", "(", "startbin", "+", "1", ")", "*", "binsize", "-", "start", "+", "1", "lastsize", "=", "end", "-", "endbin", "*", "binsize", "a", "[", "startbin", "]", "+=", "firstsize", "if", "startbin", "+", "1", "<", "endbin", ":", "a", "[", "startbin", "+", "1", ":", "endbin", "]", "+=", "binsize", "a", "[", "endbin", "]", "+=", "lastsize", "if", "mode", "==", "\"count\"", ":", "a", "=", "c", "for", "xa", ",", "xb", "in", "zip", "(", "a", ",", "b", ")", ":", "print", "(", "\"\\t\"", ".", "join", "(", "str", "(", "x", ")", "for", "x", "in", "(", "chr", ",", "xa", ",", "xb", ")", ")", ",", "file", "=", "fw", ")", "fw", ".", "close", "(", ")", "if", "subtract", ":", "subtractbinfile", "=", "bins", "(", "[", "subtract", ",", "fastafile", ",", "\"--binsize={0}\"", ".", "format", "(", "binsize", ")", "]", ")", "binfile", "=", "subtractbins", "(", "binfile", ",", "subtractbinfile", ")", "return", "binfile" ]
%prog bins bedfile fastafile Bin bed lengths into each consecutive window. Use --subtract to remove bases from window, e.g. --subtract gaps.bed ignores the gap sequences.
[ "%prog", "bins", "bedfile", "fastafile" ]
python
train
fedora-infra/fedmsg
fedmsg/crypto/utils.py
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/crypto/utils.py#L11-L54
def fix_datagrepper_message(message): """ See if a message is (probably) a datagrepper message and attempt to mutate it to pass signature validation. Datagrepper adds the 'source_name' and 'source_version' keys. If messages happen to use those keys, they will fail message validation. Additionally, a 'headers' dictionary is present on all responses, regardless of whether it was in the original message or not. This is deleted if it's null, which won't be correct in all cases. Finally, datagrepper turns the 'timestamp' field into a float, but it might have been an integer when the message was signed. A copy of the dictionary is made and returned if altering the message is necessary. I'm so sorry. Args: message (dict): A message to clean up. Returns: dict: A copy of the provided message, with the datagrepper-related keys removed if they were present. """ if not ('source_name' in message and 'source_version' in message): return message # Don't mutate the original message message = message.copy() del message['source_name'] del message['source_version'] # datanommer adds the headers field to the message in all cases. # This is a huge problem because if the signature was generated with a 'headers' # key set and we delete it here, messages will fail validation, but if we don't # messages will fail validation if they didn't have a 'headers' key set. # # There's no way to know whether or not the headers field was part of the signed # message or not. Generally, the problem is datanommer is mutating messages. if 'headers' in message and not message['headers']: del message['headers'] if 'timestamp' in message: message['timestamp'] = int(message['timestamp']) return message
[ "def", "fix_datagrepper_message", "(", "message", ")", ":", "if", "not", "(", "'source_name'", "in", "message", "and", "'source_version'", "in", "message", ")", ":", "return", "message", "# Don't mutate the original message", "message", "=", "message", ".", "copy", "(", ")", "del", "message", "[", "'source_name'", "]", "del", "message", "[", "'source_version'", "]", "# datanommer adds the headers field to the message in all cases.", "# This is a huge problem because if the signature was generated with a 'headers'", "# key set and we delete it here, messages will fail validation, but if we don't", "# messages will fail validation if they didn't have a 'headers' key set.", "#", "# There's no way to know whether or not the headers field was part of the signed", "# message or not. Generally, the problem is datanommer is mutating messages.", "if", "'headers'", "in", "message", "and", "not", "message", "[", "'headers'", "]", ":", "del", "message", "[", "'headers'", "]", "if", "'timestamp'", "in", "message", ":", "message", "[", "'timestamp'", "]", "=", "int", "(", "message", "[", "'timestamp'", "]", ")", "return", "message" ]
See if a message is (probably) a datagrepper message and attempt to mutate it to pass signature validation. Datagrepper adds the 'source_name' and 'source_version' keys. If messages happen to use those keys, they will fail message validation. Additionally, a 'headers' dictionary is present on all responses, regardless of whether it was in the original message or not. This is deleted if it's null, which won't be correct in all cases. Finally, datagrepper turns the 'timestamp' field into a float, but it might have been an integer when the message was signed. A copy of the dictionary is made and returned if altering the message is necessary. I'm so sorry. Args: message (dict): A message to clean up. Returns: dict: A copy of the provided message, with the datagrepper-related keys removed if they were present.
[ "See", "if", "a", "message", "is", "(", "probably", ")", "a", "datagrepper", "message", "and", "attempt", "to", "mutate", "it", "to", "pass", "signature", "validation", "." ]
python
train
wakatime/wakatime
wakatime/packages/pygments/regexopt.py
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/regexopt.py#L27-L80
def regex_opt_inner(strings, open_paren): """Return a regex that matches any string in the sorted list of strings.""" close_paren = open_paren and ')' or '' # print strings, repr(open_paren) if not strings: # print '-> nothing left' return '' first = strings[0] if len(strings) == 1: # print '-> only 1 string' return open_paren + escape(first) + close_paren if not first: # print '-> first string empty' return open_paren + regex_opt_inner(strings[1:], '(?:') \ + '?' + close_paren if len(first) == 1: # multiple one-char strings? make a charset oneletter = [] rest = [] for s in strings: if len(s) == 1: oneletter.append(s) else: rest.append(s) if len(oneletter) > 1: # do we have more than one oneletter string? if rest: # print '-> 1-character + rest' return open_paren + regex_opt_inner(rest, '') + '|' \ + make_charset(oneletter) + close_paren # print '-> only 1-character' return open_paren + make_charset(oneletter) + close_paren prefix = commonprefix(strings) if prefix: plen = len(prefix) # we have a prefix for all strings # print '-> prefix:', prefix return open_paren + escape(prefix) \ + regex_opt_inner([s[plen:] for s in strings], '(?:') \ + close_paren # is there a suffix? strings_rev = [s[::-1] for s in strings] suffix = commonprefix(strings_rev) if suffix: slen = len(suffix) # print '-> suffix:', suffix[::-1] return open_paren \ + regex_opt_inner(sorted(s[:-slen] for s in strings), '(?:') \ + escape(suffix[::-1]) + close_paren # recurse on common 1-string prefixes # print '-> last resort' return open_paren + \ '|'.join(regex_opt_inner(list(group[1]), '') for group in groupby(strings, lambda s: s[0] == first[0])) \ + close_paren
[ "def", "regex_opt_inner", "(", "strings", ",", "open_paren", ")", ":", "close_paren", "=", "open_paren", "and", "')'", "or", "''", "# print strings, repr(open_paren)", "if", "not", "strings", ":", "# print '-> nothing left'", "return", "''", "first", "=", "strings", "[", "0", "]", "if", "len", "(", "strings", ")", "==", "1", ":", "# print '-> only 1 string'", "return", "open_paren", "+", "escape", "(", "first", ")", "+", "close_paren", "if", "not", "first", ":", "# print '-> first string empty'", "return", "open_paren", "+", "regex_opt_inner", "(", "strings", "[", "1", ":", "]", ",", "'(?:'", ")", "+", "'?'", "+", "close_paren", "if", "len", "(", "first", ")", "==", "1", ":", "# multiple one-char strings? make a charset", "oneletter", "=", "[", "]", "rest", "=", "[", "]", "for", "s", "in", "strings", ":", "if", "len", "(", "s", ")", "==", "1", ":", "oneletter", ".", "append", "(", "s", ")", "else", ":", "rest", ".", "append", "(", "s", ")", "if", "len", "(", "oneletter", ")", ">", "1", ":", "# do we have more than one oneletter string?", "if", "rest", ":", "# print '-> 1-character + rest'", "return", "open_paren", "+", "regex_opt_inner", "(", "rest", ",", "''", ")", "+", "'|'", "+", "make_charset", "(", "oneletter", ")", "+", "close_paren", "# print '-> only 1-character'", "return", "open_paren", "+", "make_charset", "(", "oneletter", ")", "+", "close_paren", "prefix", "=", "commonprefix", "(", "strings", ")", "if", "prefix", ":", "plen", "=", "len", "(", "prefix", ")", "# we have a prefix for all strings", "# print '-> prefix:', prefix", "return", "open_paren", "+", "escape", "(", "prefix", ")", "+", "regex_opt_inner", "(", "[", "s", "[", "plen", ":", "]", "for", "s", "in", "strings", "]", ",", "'(?:'", ")", "+", "close_paren", "# is there a suffix?", "strings_rev", "=", "[", "s", "[", ":", ":", "-", "1", "]", "for", "s", "in", "strings", "]", "suffix", "=", "commonprefix", "(", "strings_rev", ")", "if", "suffix", ":", "slen", "=", "len", "(", "suffix", ")", "# print '-> suffix:', suffix[::-1]", "return", "open_paren", "+", "regex_opt_inner", "(", "sorted", "(", "s", "[", ":", "-", "slen", "]", "for", "s", "in", "strings", ")", ",", "'(?:'", ")", "+", "escape", "(", "suffix", "[", ":", ":", "-", "1", "]", ")", "+", "close_paren", "# recurse on common 1-string prefixes", "# print '-> last resort'", "return", "open_paren", "+", "'|'", ".", "join", "(", "regex_opt_inner", "(", "list", "(", "group", "[", "1", "]", ")", ",", "''", ")", "for", "group", "in", "groupby", "(", "strings", ",", "lambda", "s", ":", "s", "[", "0", "]", "==", "first", "[", "0", "]", ")", ")", "+", "close_paren" ]
Return a regex that matches any string in the sorted list of strings.
[ "Return", "a", "regex", "that", "matches", "any", "string", "in", "the", "sorted", "list", "of", "strings", "." ]
python
train
saltstack/salt
salt/version.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/version.py#L627-L704
def system_information(): ''' Report system versions. ''' def system_version(): ''' Return host system version. ''' lin_ver = linux_distribution() mac_ver = platform.mac_ver() win_ver = platform.win32_ver() if lin_ver[0]: return ' '.join(lin_ver) elif mac_ver[0]: if isinstance(mac_ver[1], (tuple, list)) and ''.join(mac_ver[1]): return ' '.join([mac_ver[0], '.'.join(mac_ver[1]), mac_ver[2]]) else: return ' '.join([mac_ver[0], mac_ver[2]]) elif win_ver[0]: return ' '.join(win_ver) else: return '' if platform.win32_ver()[0]: # Get the version and release info based on the Windows Operating # System Product Name. As long as Microsoft maintains a similar format # this should be future proof import win32api # pylint: disable=3rd-party-module-not-gated import win32con # pylint: disable=3rd-party-module-not-gated # Get the product name from the registry hkey = win32con.HKEY_LOCAL_MACHINE key = 'SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion' value_name = 'ProductName' reg_handle = win32api.RegOpenKey(hkey, key) # Returns a tuple of (product_name, value_type) product_name, _ = win32api.RegQueryValueEx(reg_handle, value_name) version = 'Unknown' release = '' if 'Server' in product_name: for item in product_name.split(' '): # If it's all digits, then it's version if re.match(r'\d+', item): version = item # If it starts with R and then numbers, it's the release # ie: R2 if re.match(r'^R\d+$', item): release = item release = '{0}Server{1}'.format(version, release) else: for item in product_name.split(' '): # If it's a number, decimal number, Thin or Vista, then it's the # version if re.match(r'^(\d+(\.\d+)?)|Thin|Vista$', item): version = item release = version _, ver, sp, extra = platform.win32_ver() version = ' '.join([release, ver, sp, extra]) else: version = system_version() release = platform.release() system = [ ('system', platform.system()), ('dist', ' '.join(linux_distribution(full_distribution_name=False))), ('release', release), ('machine', platform.machine()), ('version', version), ('locale', __salt_system_encoding__), ] for name, attr in system: yield name, attr continue
[ "def", "system_information", "(", ")", ":", "def", "system_version", "(", ")", ":", "'''\n Return host system version.\n '''", "lin_ver", "=", "linux_distribution", "(", ")", "mac_ver", "=", "platform", ".", "mac_ver", "(", ")", "win_ver", "=", "platform", ".", "win32_ver", "(", ")", "if", "lin_ver", "[", "0", "]", ":", "return", "' '", ".", "join", "(", "lin_ver", ")", "elif", "mac_ver", "[", "0", "]", ":", "if", "isinstance", "(", "mac_ver", "[", "1", "]", ",", "(", "tuple", ",", "list", ")", ")", "and", "''", ".", "join", "(", "mac_ver", "[", "1", "]", ")", ":", "return", "' '", ".", "join", "(", "[", "mac_ver", "[", "0", "]", ",", "'.'", ".", "join", "(", "mac_ver", "[", "1", "]", ")", ",", "mac_ver", "[", "2", "]", "]", ")", "else", ":", "return", "' '", ".", "join", "(", "[", "mac_ver", "[", "0", "]", ",", "mac_ver", "[", "2", "]", "]", ")", "elif", "win_ver", "[", "0", "]", ":", "return", "' '", ".", "join", "(", "win_ver", ")", "else", ":", "return", "''", "if", "platform", ".", "win32_ver", "(", ")", "[", "0", "]", ":", "# Get the version and release info based on the Windows Operating", "# System Product Name. As long as Microsoft maintains a similar format", "# this should be future proof", "import", "win32api", "# pylint: disable=3rd-party-module-not-gated", "import", "win32con", "# pylint: disable=3rd-party-module-not-gated", "# Get the product name from the registry", "hkey", "=", "win32con", ".", "HKEY_LOCAL_MACHINE", "key", "=", "'SOFTWARE\\\\Microsoft\\\\Windows NT\\\\CurrentVersion'", "value_name", "=", "'ProductName'", "reg_handle", "=", "win32api", ".", "RegOpenKey", "(", "hkey", ",", "key", ")", "# Returns a tuple of (product_name, value_type)", "product_name", ",", "_", "=", "win32api", ".", "RegQueryValueEx", "(", "reg_handle", ",", "value_name", ")", "version", "=", "'Unknown'", "release", "=", "''", "if", "'Server'", "in", "product_name", ":", "for", "item", "in", "product_name", ".", "split", "(", "' '", ")", ":", "# If it's all digits, then it's version", "if", "re", ".", "match", "(", "r'\\d+'", ",", "item", ")", ":", "version", "=", "item", "# If it starts with R and then numbers, it's the release", "# ie: R2", "if", "re", ".", "match", "(", "r'^R\\d+$'", ",", "item", ")", ":", "release", "=", "item", "release", "=", "'{0}Server{1}'", ".", "format", "(", "version", ",", "release", ")", "else", ":", "for", "item", "in", "product_name", ".", "split", "(", "' '", ")", ":", "# If it's a number, decimal number, Thin or Vista, then it's the", "# version", "if", "re", ".", "match", "(", "r'^(\\d+(\\.\\d+)?)|Thin|Vista$'", ",", "item", ")", ":", "version", "=", "item", "release", "=", "version", "_", ",", "ver", ",", "sp", ",", "extra", "=", "platform", ".", "win32_ver", "(", ")", "version", "=", "' '", ".", "join", "(", "[", "release", ",", "ver", ",", "sp", ",", "extra", "]", ")", "else", ":", "version", "=", "system_version", "(", ")", "release", "=", "platform", ".", "release", "(", ")", "system", "=", "[", "(", "'system'", ",", "platform", ".", "system", "(", ")", ")", ",", "(", "'dist'", ",", "' '", ".", "join", "(", "linux_distribution", "(", "full_distribution_name", "=", "False", ")", ")", ")", ",", "(", "'release'", ",", "release", ")", ",", "(", "'machine'", ",", "platform", ".", "machine", "(", ")", ")", ",", "(", "'version'", ",", "version", ")", ",", "(", "'locale'", ",", "__salt_system_encoding__", ")", ",", "]", "for", "name", ",", "attr", "in", "system", ":", "yield", "name", ",", "attr", "continue" ]
Report system versions.
[ "Report", "system", "versions", "." ]
python
train
jadolg/rocketchat_API
rocketchat_API/rocketchat.py
https://github.com/jadolg/rocketchat_API/blob/f220d094434991cb9892418245f054ea06f28aad/rocketchat_API/rocketchat.py#L147-L154
def users_get_presence(self, user_id=None, username=None, **kwargs): """Gets the online presence of the a user.""" if user_id: return self.__call_api_get('users.getPresence', userId=user_id, kwargs=kwargs) elif username: return self.__call_api_get('users.getPresence', username=username, kwargs=kwargs) else: raise RocketMissingParamException('userID or username required')
[ "def", "users_get_presence", "(", "self", ",", "user_id", "=", "None", ",", "username", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "user_id", ":", "return", "self", ".", "__call_api_get", "(", "'users.getPresence'", ",", "userId", "=", "user_id", ",", "kwargs", "=", "kwargs", ")", "elif", "username", ":", "return", "self", ".", "__call_api_get", "(", "'users.getPresence'", ",", "username", "=", "username", ",", "kwargs", "=", "kwargs", ")", "else", ":", "raise", "RocketMissingParamException", "(", "'userID or username required'", ")" ]
Gets the online presence of the a user.
[ "Gets", "the", "online", "presence", "of", "the", "a", "user", "." ]
python
train
cloudsigma/cgroupspy
cgroupspy/utils.py
https://github.com/cloudsigma/cgroupspy/blob/e705ac4ccdfe33d8ecc700e9a35a9556084449ca/cgroupspy/utils.py#L41-L47
def walk_up_tree(root): """Post-order depth-first""" for child in root.children: for el in walk_up_tree(child): yield el yield root
[ "def", "walk_up_tree", "(", "root", ")", ":", "for", "child", "in", "root", ".", "children", ":", "for", "el", "in", "walk_up_tree", "(", "child", ")", ":", "yield", "el", "yield", "root" ]
Post-order depth-first
[ "Post", "-", "order", "depth", "-", "first" ]
python
train
cwacek/python-jsonschema-objects
python_jsonschema_objects/classbuilder.py
https://github.com/cwacek/python-jsonschema-objects/blob/54c82bfaec9c099c472663742abfc7de373a5e49/python_jsonschema_objects/classbuilder.py#L293-L327
def missing_property_names(self): """ Returns a list of properties which are required and missing. Properties are excluded from this list if they are allowed to be null. :return: list of missing properties. """ propname = lambda x: self.__prop_names__[x] missing = [] for x in self.__required__: # Allow the null type propinfo = self.propinfo(propname(x)) null_type = False if 'type' in propinfo: type_info = propinfo['type'] null_type = (type_info == 'null' or isinstance(type_info, (list, tuple)) and 'null' in type_info) elif 'oneOf' in propinfo: for o in propinfo['oneOf']: type_info = o.get('type') if type_info and type_info == 'null' \ or isinstance(type_info, (list, tuple)) \ and 'null' in type_info: null_type = True break if (propname(x) not in self._properties and null_type) or \ (self._properties[propname(x)] is None and not null_type): missing.append(x) return missing
[ "def", "missing_property_names", "(", "self", ")", ":", "propname", "=", "lambda", "x", ":", "self", ".", "__prop_names__", "[", "x", "]", "missing", "=", "[", "]", "for", "x", "in", "self", ".", "__required__", ":", "# Allow the null type", "propinfo", "=", "self", ".", "propinfo", "(", "propname", "(", "x", ")", ")", "null_type", "=", "False", "if", "'type'", "in", "propinfo", ":", "type_info", "=", "propinfo", "[", "'type'", "]", "null_type", "=", "(", "type_info", "==", "'null'", "or", "isinstance", "(", "type_info", ",", "(", "list", ",", "tuple", ")", ")", "and", "'null'", "in", "type_info", ")", "elif", "'oneOf'", "in", "propinfo", ":", "for", "o", "in", "propinfo", "[", "'oneOf'", "]", ":", "type_info", "=", "o", ".", "get", "(", "'type'", ")", "if", "type_info", "and", "type_info", "==", "'null'", "or", "isinstance", "(", "type_info", ",", "(", "list", ",", "tuple", ")", ")", "and", "'null'", "in", "type_info", ":", "null_type", "=", "True", "break", "if", "(", "propname", "(", "x", ")", "not", "in", "self", ".", "_properties", "and", "null_type", ")", "or", "(", "self", ".", "_properties", "[", "propname", "(", "x", ")", "]", "is", "None", "and", "not", "null_type", ")", ":", "missing", ".", "append", "(", "x", ")", "return", "missing" ]
Returns a list of properties which are required and missing. Properties are excluded from this list if they are allowed to be null. :return: list of missing properties.
[ "Returns", "a", "list", "of", "properties", "which", "are", "required", "and", "missing", "." ]
python
train
svartalf/python-opus
opus/api/decoder.py
https://github.com/svartalf/python-opus/blob/a3c1d556d2772b5be659ddd08c033ddd4d566b3a/opus/api/decoder.py#L32-L41
def create(fs, channels): """Allocates and initializes a decoder state""" result_code = ctypes.c_int() result = _create(fs, channels, ctypes.byref(result_code)) if result_code.value is not 0: raise OpusError(result_code.value) return result
[ "def", "create", "(", "fs", ",", "channels", ")", ":", "result_code", "=", "ctypes", ".", "c_int", "(", ")", "result", "=", "_create", "(", "fs", ",", "channels", ",", "ctypes", ".", "byref", "(", "result_code", ")", ")", "if", "result_code", ".", "value", "is", "not", "0", ":", "raise", "OpusError", "(", "result_code", ".", "value", ")", "return", "result" ]
Allocates and initializes a decoder state
[ "Allocates", "and", "initializes", "a", "decoder", "state" ]
python
train
shoebot/shoebot
shoebot/grammar/nodebox.py
https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/shoebot/grammar/nodebox.py#L157-L165
def circle(self, x, y, diameter, draw=True, **kwargs): '''Draw a circle :param x: x-coordinate of the top left corner :param y: y-coordinate of the top left corner :param diameter: Diameter of circle. :param draw: Draw immediately (defaults to True, set to False to inhibit drawing) :return: Path object representing circle ''' return self.ellipse(x, y, diameter, diameter, draw, **kwargs)
[ "def", "circle", "(", "self", ",", "x", ",", "y", ",", "diameter", ",", "draw", "=", "True", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "ellipse", "(", "x", ",", "y", ",", "diameter", ",", "diameter", ",", "draw", ",", "*", "*", "kwargs", ")" ]
Draw a circle :param x: x-coordinate of the top left corner :param y: y-coordinate of the top left corner :param diameter: Diameter of circle. :param draw: Draw immediately (defaults to True, set to False to inhibit drawing) :return: Path object representing circle
[ "Draw", "a", "circle", ":", "param", "x", ":", "x", "-", "coordinate", "of", "the", "top", "left", "corner", ":", "param", "y", ":", "y", "-", "coordinate", "of", "the", "top", "left", "corner", ":", "param", "diameter", ":", "Diameter", "of", "circle", ".", ":", "param", "draw", ":", "Draw", "immediately", "(", "defaults", "to", "True", "set", "to", "False", "to", "inhibit", "drawing", ")", ":", "return", ":", "Path", "object", "representing", "circle" ]
python
valid
guaix-ucm/pyemir
emirdrp/processing/wavecal/rectwv_coeff_to_ds9.py
https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/processing/wavecal/rectwv_coeff_to_ds9.py#L59-L84
def save_four_ds9(rectwv_coeff, debugplot=0): """Save the 4 possible ds9 region files. Parameters ---------- rectwv_coeff : RectWaveCoeff instance Rectification and wavelength calibration coefficients for the particular CSU configuration. debugplot : int Debugging level for messages and plots. For details see 'numina.array.display.pause_debugplot.py'. """ for limits, rectified, suffix in zip( ['frontiers', 'frontiers', 'boundaries', 'boundaries'], [False, True, False, True], ['rawimage', 'rectified', 'rawimage', 'rectified'] ): output = rectwv_coeff_to_ds9(rectwv_coeff=rectwv_coeff, limits=limits, rectified=rectified) filename = 'ds9_' + limits + '_' + suffix + '.reg' if abs(debugplot) >= 10: print('>>> Saving: ', filename) save_ds9(output, filename)
[ "def", "save_four_ds9", "(", "rectwv_coeff", ",", "debugplot", "=", "0", ")", ":", "for", "limits", ",", "rectified", ",", "suffix", "in", "zip", "(", "[", "'frontiers'", ",", "'frontiers'", ",", "'boundaries'", ",", "'boundaries'", "]", ",", "[", "False", ",", "True", ",", "False", ",", "True", "]", ",", "[", "'rawimage'", ",", "'rectified'", ",", "'rawimage'", ",", "'rectified'", "]", ")", ":", "output", "=", "rectwv_coeff_to_ds9", "(", "rectwv_coeff", "=", "rectwv_coeff", ",", "limits", "=", "limits", ",", "rectified", "=", "rectified", ")", "filename", "=", "'ds9_'", "+", "limits", "+", "'_'", "+", "suffix", "+", "'.reg'", "if", "abs", "(", "debugplot", ")", ">=", "10", ":", "print", "(", "'>>> Saving: '", ",", "filename", ")", "save_ds9", "(", "output", ",", "filename", ")" ]
Save the 4 possible ds9 region files. Parameters ---------- rectwv_coeff : RectWaveCoeff instance Rectification and wavelength calibration coefficients for the particular CSU configuration. debugplot : int Debugging level for messages and plots. For details see 'numina.array.display.pause_debugplot.py'.
[ "Save", "the", "4", "possible", "ds9", "region", "files", "." ]
python
train
jtwhite79/pyemu
pyemu/utils/gw_utils.py
https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/utils/gw_utils.py#L1813-L1852
def apply_sfr_obs(): """apply the sfr observation process - pairs with setup_sfr_obs(). requires sfr_obs.config. Writes <sfr_out_file>.processed, where <sfr_out_file> is defined in "sfr_obs.config" Parameters ---------- None Returns ------- df : pd.DataFrame a dataframe of aggregrated sfr segment aquifer and outflow """ assert os.path.exists("sfr_obs.config") df_key = pd.read_csv("sfr_obs.config",index_col=0) assert df_key.iloc[0,0] == "sfr_out_file",df_key.iloc[0,:] sfr_out_file = df_key.iloc[0,1] df_key = df_key.iloc[1:,:] df_key.loc[:, "segment"] = df_key.segment.apply(np.int) df_key.index = df_key.segment seg_group_dict = df_key.groupby(df_key.obs_base).groups sfr_kper = load_sfr_out(sfr_out_file) kpers = list(sfr_kper.keys()) kpers.sort() #results = {o:[] for o in seg_group_dict.keys()} results = [] for kper in kpers: df = sfr_kper[kper] for obs_base,segs in seg_group_dict.items(): agg = df.loc[segs.values,:].sum() # still agg flout where seg groups are passed! #print(obs_base,agg) results.append([kper,obs_base,agg["flaqx"],agg["flout"]]) df = pd.DataFrame(data=results,columns=["kper","obs_base","flaqx","flout"]) df.sort_values(by=["kper","obs_base"],inplace=True) df.to_csv(sfr_out_file+".processed",sep=' ',index=False) return df
[ "def", "apply_sfr_obs", "(", ")", ":", "assert", "os", ".", "path", ".", "exists", "(", "\"sfr_obs.config\"", ")", "df_key", "=", "pd", ".", "read_csv", "(", "\"sfr_obs.config\"", ",", "index_col", "=", "0", ")", "assert", "df_key", ".", "iloc", "[", "0", ",", "0", "]", "==", "\"sfr_out_file\"", ",", "df_key", ".", "iloc", "[", "0", ",", ":", "]", "sfr_out_file", "=", "df_key", ".", "iloc", "[", "0", ",", "1", "]", "df_key", "=", "df_key", ".", "iloc", "[", "1", ":", ",", ":", "]", "df_key", ".", "loc", "[", ":", ",", "\"segment\"", "]", "=", "df_key", ".", "segment", ".", "apply", "(", "np", ".", "int", ")", "df_key", ".", "index", "=", "df_key", ".", "segment", "seg_group_dict", "=", "df_key", ".", "groupby", "(", "df_key", ".", "obs_base", ")", ".", "groups", "sfr_kper", "=", "load_sfr_out", "(", "sfr_out_file", ")", "kpers", "=", "list", "(", "sfr_kper", ".", "keys", "(", ")", ")", "kpers", ".", "sort", "(", ")", "#results = {o:[] for o in seg_group_dict.keys()}", "results", "=", "[", "]", "for", "kper", "in", "kpers", ":", "df", "=", "sfr_kper", "[", "kper", "]", "for", "obs_base", ",", "segs", "in", "seg_group_dict", ".", "items", "(", ")", ":", "agg", "=", "df", ".", "loc", "[", "segs", ".", "values", ",", ":", "]", ".", "sum", "(", ")", "# still agg flout where seg groups are passed!", "#print(obs_base,agg)", "results", ".", "append", "(", "[", "kper", ",", "obs_base", ",", "agg", "[", "\"flaqx\"", "]", ",", "agg", "[", "\"flout\"", "]", "]", ")", "df", "=", "pd", ".", "DataFrame", "(", "data", "=", "results", ",", "columns", "=", "[", "\"kper\"", ",", "\"obs_base\"", ",", "\"flaqx\"", ",", "\"flout\"", "]", ")", "df", ".", "sort_values", "(", "by", "=", "[", "\"kper\"", ",", "\"obs_base\"", "]", ",", "inplace", "=", "True", ")", "df", ".", "to_csv", "(", "sfr_out_file", "+", "\".processed\"", ",", "sep", "=", "' '", ",", "index", "=", "False", ")", "return", "df" ]
apply the sfr observation process - pairs with setup_sfr_obs(). requires sfr_obs.config. Writes <sfr_out_file>.processed, where <sfr_out_file> is defined in "sfr_obs.config" Parameters ---------- None Returns ------- df : pd.DataFrame a dataframe of aggregrated sfr segment aquifer and outflow
[ "apply", "the", "sfr", "observation", "process", "-", "pairs", "with", "setup_sfr_obs", "()", ".", "requires", "sfr_obs", ".", "config", ".", "Writes", "<sfr_out_file", ">", ".", "processed", "where", "<sfr_out_file", ">", "is", "defined", "in", "sfr_obs", ".", "config" ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/core/prefilter.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/prefilter.py#L174-L178
def register_transformer(self, transformer): """Register a transformer instance.""" if transformer not in self._transformers: self._transformers.append(transformer) self.sort_transformers()
[ "def", "register_transformer", "(", "self", ",", "transformer", ")", ":", "if", "transformer", "not", "in", "self", ".", "_transformers", ":", "self", ".", "_transformers", ".", "append", "(", "transformer", ")", "self", ".", "sort_transformers", "(", ")" ]
Register a transformer instance.
[ "Register", "a", "transformer", "instance", "." ]
python
test
scour-project/scour
scour/scour.py
https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L2061-L2555
def cleanPath(element, options): """ Cleans the path string (d attribute) of the element """ global _num_bytes_saved_in_path_data global _num_path_segments_removed # this gets the parser object from svg_regex.py oldPathStr = element.getAttribute('d') path = svg_parser.parse(oldPathStr) style = _getStyle(element) # This determines whether the stroke has round or square linecaps. If it does, we do not want to collapse empty # segments, as they are actually rendered (as circles or squares with diameter/dimension matching the path-width). has_round_or_square_linecaps = ( element.getAttribute('stroke-linecap') in ['round', 'square'] or 'stroke-linecap' in style and style['stroke-linecap'] in ['round', 'square'] ) # This determines whether the stroke has intermediate markers. If it does, we do not want to collapse # straight segments running in the same direction, as markers are rendered on the intermediate nodes. has_intermediate_markers = ( element.hasAttribute('marker') or element.hasAttribute('marker-mid') or 'marker' in style or 'marker-mid' in style ) # The first command must be a moveto, and whether it's relative (m) # or absolute (M), the first set of coordinates *is* absolute. So # the first iteration of the loop below will get x,y and startx,starty. # convert absolute coordinates into relative ones. # Reuse the data structure 'path', since we're not adding or removing subcommands. # Also reuse the coordinate lists since we're not adding or removing any. x = y = 0 for pathIndex in range(len(path)): cmd, data = path[pathIndex] # Changes to cmd don't get through to the data structure i = 0 # adjust abs to rel # only the A command has some values that we don't want to adjust (radii, rotation, flags) if cmd == 'A': for i in range(i, len(data), 7): data[i + 5] -= x data[i + 6] -= y x += data[i + 5] y += data[i + 6] path[pathIndex] = ('a', data) elif cmd == 'a': x += sum(data[5::7]) y += sum(data[6::7]) elif cmd == 'H': for i in range(i, len(data)): data[i] -= x x += data[i] path[pathIndex] = ('h', data) elif cmd == 'h': x += sum(data) elif cmd == 'V': for i in range(i, len(data)): data[i] -= y y += data[i] path[pathIndex] = ('v', data) elif cmd == 'v': y += sum(data) elif cmd == 'M': startx, starty = data[0], data[1] # If this is a path starter, don't convert its first # coordinate to relative; that would just make it (0, 0) if pathIndex != 0: data[0] -= x data[1] -= y x, y = startx, starty i = 2 for i in range(i, len(data), 2): data[i] -= x data[i + 1] -= y x += data[i] y += data[i + 1] path[pathIndex] = ('m', data) elif cmd in ['L', 'T']: for i in range(i, len(data), 2): data[i] -= x data[i + 1] -= y x += data[i] y += data[i + 1] path[pathIndex] = (cmd.lower(), data) elif cmd in ['m']: if pathIndex == 0: # START OF PATH - this is an absolute moveto # followed by relative linetos startx, starty = data[0], data[1] x, y = startx, starty i = 2 else: startx = x + data[0] starty = y + data[1] for i in range(i, len(data), 2): x += data[i] y += data[i + 1] elif cmd in ['l', 't']: x += sum(data[0::2]) y += sum(data[1::2]) elif cmd in ['S', 'Q']: for i in range(i, len(data), 4): data[i] -= x data[i + 1] -= y data[i + 2] -= x data[i + 3] -= y x += data[i + 2] y += data[i + 3] path[pathIndex] = (cmd.lower(), data) elif cmd in ['s', 'q']: x += sum(data[2::4]) y += sum(data[3::4]) elif cmd == 'C': for i in range(i, len(data), 6): data[i] -= x data[i + 1] -= y data[i + 2] -= x data[i + 3] -= y data[i + 4] -= x data[i + 5] -= y x += data[i + 4] y += data[i + 5] path[pathIndex] = ('c', data) elif cmd == 'c': x += sum(data[4::6]) y += sum(data[5::6]) elif cmd in ['z', 'Z']: x, y = startx, starty path[pathIndex] = ('z', data) # remove empty segments and redundant commands # Reuse the data structure 'path' and the coordinate lists, even if we're # deleting items, because these deletions are relatively cheap. if not has_round_or_square_linecaps: # remove empty path segments for pathIndex in range(len(path)): cmd, data = path[pathIndex] i = 0 if cmd in ['m', 'l', 't']: if cmd == 'm': # It might be tempting to rewrite "m0 0 ..." into # "l..." here. However, this is an unsound # optimization in general as "m0 0 ... z" is # different from "l...z". # # To do such a rewrite, we need to understand the # full subpath. This logic happens after this # loop. i = 2 while i < len(data): if data[i] == data[i + 1] == 0: del data[i:i + 2] _num_path_segments_removed += 1 else: i += 2 elif cmd == 'c': while i < len(data): if data[i] == data[i + 1] == data[i + 2] == data[i + 3] == data[i + 4] == data[i + 5] == 0: del data[i:i + 6] _num_path_segments_removed += 1 else: i += 6 elif cmd == 'a': while i < len(data): if data[i + 5] == data[i + 6] == 0: del data[i:i + 7] _num_path_segments_removed += 1 else: i += 7 elif cmd == 'q': while i < len(data): if data[i] == data[i + 1] == data[i + 2] == data[i + 3] == 0: del data[i:i + 4] _num_path_segments_removed += 1 else: i += 4 elif cmd in ['h', 'v']: oldLen = len(data) path[pathIndex] = (cmd, [coord for coord in data if coord != 0]) _num_path_segments_removed += len(path[pathIndex][1]) - oldLen # remove no-op commands pathIndex = len(path) subpath_needs_anchor = False # NB: We can never rewrite the first m/M command (expect if it # is the only command) while pathIndex > 1: pathIndex -= 1 cmd, data = path[pathIndex] if cmd == 'z': next_cmd, next_data = path[pathIndex - 1] if next_cmd == 'm' and len(next_data) == 2: # mX Yz -> mX Y # note the len check on next_data as it is not # safe to rewrite "m0 0 1 1z" in general (it is a # question of where the "pen" ends - you can # continue a draw on the same subpath after a # "z"). del path[pathIndex] _num_path_segments_removed += 1 else: # it is not safe to rewrite "m0 0 ..." to "l..." # because of this "z" command. subpath_needs_anchor = True elif cmd == 'm': if len(path) - 1 == pathIndex and len(data) == 2: # Ends with an empty move (but no line/draw # following it) del path[pathIndex] _num_path_segments_removed += 1 continue if subpath_needs_anchor: subpath_needs_anchor = False elif data[0] == data[1] == 0: # unanchored, i.e. we can replace "m0 0 ..." with # "l..." as there is no "z" after it. path[pathIndex] = ('l', data[2:]) _num_path_segments_removed += 1 # fixup: Delete subcommands having no coordinates. path = [elem for elem in path if len(elem[1]) > 0 or elem[0] == 'z'] # convert straight curves into lines newPath = [path[0]] for (cmd, data) in path[1:]: i = 0 newData = data if cmd == 'c': newData = [] while i < len(data): # since all commands are now relative, we can think of previous point as (0,0) # and new point (dx,dy) is (data[i+4],data[i+5]) # eqn of line will be y = (dy/dx)*x or if dx=0 then eqn of line is x=0 (p1x, p1y) = (data[i], data[i + 1]) (p2x, p2y) = (data[i + 2], data[i + 3]) dx = data[i + 4] dy = data[i + 5] foundStraightCurve = False if dx == 0: if p1x == 0 and p2x == 0: foundStraightCurve = True else: m = dy / dx if p1y == m * p1x and p2y == m * p2x: foundStraightCurve = True if foundStraightCurve: # flush any existing curve coords first if newData: newPath.append((cmd, newData)) newData = [] # now create a straight line segment newPath.append(('l', [dx, dy])) else: newData.extend(data[i:i + 6]) i += 6 if newData or cmd == 'z' or cmd == 'Z': newPath.append((cmd, newData)) path = newPath # collapse all consecutive commands of the same type into one command prevCmd = '' prevData = [] newPath = [] for (cmd, data) in path: if prevCmd == '': # initialize with current path cmd and data prevCmd = cmd prevData = data else: # collapse if # - cmd is not moveto (explicit moveto commands are not drawn) # - the previous and current commands are the same type, # - the previous command is moveto and the current is lineto # (subsequent moveto pairs are treated as implicit lineto commands) if cmd != 'm' and (cmd == prevCmd or (cmd == 'l' and prevCmd == 'm')): prevData.extend(data) # else flush the previous command if it is not the same type as the current command else: newPath.append((prevCmd, prevData)) prevCmd = cmd prevData = data # flush last command and data newPath.append((prevCmd, prevData)) path = newPath # convert to shorthand path segments where possible newPath = [] for (cmd, data) in path: # convert line segments into h,v where possible if cmd == 'l': i = 0 lineTuples = [] while i < len(data): if data[i] == 0: # vertical if lineTuples: # flush the existing line command newPath.append(('l', lineTuples)) lineTuples = [] # append the v and then the remaining line coords newPath.append(('v', [data[i + 1]])) _num_path_segments_removed += 1 elif data[i + 1] == 0: if lineTuples: # flush the line command, then append the h and then the remaining line coords newPath.append(('l', lineTuples)) lineTuples = [] newPath.append(('h', [data[i]])) _num_path_segments_removed += 1 else: lineTuples.extend(data[i:i + 2]) i += 2 if lineTuples: newPath.append(('l', lineTuples)) # also handle implied relative linetos elif cmd == 'm': i = 2 lineTuples = [data[0], data[1]] while i < len(data): if data[i] == 0: # vertical if lineTuples: # flush the existing m/l command newPath.append((cmd, lineTuples)) lineTuples = [] cmd = 'l' # dealing with linetos now # append the v and then the remaining line coords newPath.append(('v', [data[i + 1]])) _num_path_segments_removed += 1 elif data[i + 1] == 0: if lineTuples: # flush the m/l command, then append the h and then the remaining line coords newPath.append((cmd, lineTuples)) lineTuples = [] cmd = 'l' # dealing with linetos now newPath.append(('h', [data[i]])) _num_path_segments_removed += 1 else: lineTuples.extend(data[i:i + 2]) i += 2 if lineTuples: newPath.append((cmd, lineTuples)) # convert Bézier curve segments into s where possible elif cmd == 'c': # set up the assumed bezier control point as the current point, # i.e. (0,0) since we're using relative coords bez_ctl_pt = (0, 0) # however if the previous command was 's' # the assumed control point is a reflection of the previous control point at the current point if len(newPath): (prevCmd, prevData) = newPath[-1] if prevCmd == 's': bez_ctl_pt = (prevData[-2] - prevData[-4], prevData[-1] - prevData[-3]) i = 0 curveTuples = [] while i < len(data): # rotate by 180deg means negate both coordinates # if the previous control point is equal then we can substitute a # shorthand bezier command if bez_ctl_pt[0] == data[i] and bez_ctl_pt[1] == data[i + 1]: if curveTuples: newPath.append(('c', curveTuples)) curveTuples = [] # append the s command newPath.append(('s', [data[i + 2], data[i + 3], data[i + 4], data[i + 5]])) _num_path_segments_removed += 1 else: j = 0 while j <= 5: curveTuples.append(data[i + j]) j += 1 # set up control point for next curve segment bez_ctl_pt = (data[i + 4] - data[i + 2], data[i + 5] - data[i + 3]) i += 6 if curveTuples: newPath.append(('c', curveTuples)) # convert quadratic curve segments into t where possible elif cmd == 'q': quad_ctl_pt = (0, 0) i = 0 curveTuples = [] while i < len(data): if quad_ctl_pt[0] == data[i] and quad_ctl_pt[1] == data[i + 1]: if curveTuples: newPath.append(('q', curveTuples)) curveTuples = [] # append the t command newPath.append(('t', [data[i + 2], data[i + 3]])) _num_path_segments_removed += 1 else: j = 0 while j <= 3: curveTuples.append(data[i + j]) j += 1 quad_ctl_pt = (data[i + 2] - data[i], data[i + 3] - data[i + 1]) i += 4 if curveTuples: newPath.append(('q', curveTuples)) else: newPath.append((cmd, data)) path = newPath # For each m, l, h or v, collapse unnecessary coordinates that run in the same direction # i.e. "h-100-100" becomes "h-200" but "h300-100" does not change. # If the path has intermediate markers we have to preserve intermediate nodes, though. # Reuse the data structure 'path', since we're not adding or removing subcommands. # Also reuse the coordinate lists, even if we're deleting items, because these # deletions are relatively cheap. if not has_intermediate_markers: for pathIndex in range(len(path)): cmd, data = path[pathIndex] # h / v expects only one parameter and we start drawing with the first (so we need at least 2) if cmd in ['h', 'v'] and len(data) >= 2: coordIndex = 0 while coordIndex+1 < len(data): if is_same_sign(data[coordIndex], data[coordIndex+1]): data[coordIndex] += data[coordIndex+1] del data[coordIndex+1] _num_path_segments_removed += 1 else: coordIndex += 1 # l expects two parameters and we start drawing with the first (so we need at least 4) elif cmd == 'l' and len(data) >= 4: coordIndex = 0 while coordIndex+2 < len(data): if is_same_direction(*data[coordIndex:coordIndex+4]): data[coordIndex] += data[coordIndex+2] data[coordIndex+1] += data[coordIndex+3] del data[coordIndex+2] # delete the next two elements del data[coordIndex+2] _num_path_segments_removed += 1 else: coordIndex += 2 # m expects two parameters but we have to skip the first pair as it's not drawn (so we need at least 6) elif cmd == 'm' and len(data) >= 6: coordIndex = 2 while coordIndex+2 < len(data): if is_same_direction(*data[coordIndex:coordIndex+4]): data[coordIndex] += data[coordIndex+2] data[coordIndex+1] += data[coordIndex+3] del data[coordIndex+2] # delete the next two elements del data[coordIndex+2] _num_path_segments_removed += 1 else: coordIndex += 2 # it is possible that we have consecutive h, v, c, t commands now # so again collapse all consecutive commands of the same type into one command prevCmd = '' prevData = [] newPath = [path[0]] for (cmd, data) in path[1:]: # flush the previous command if it is not the same type as the current command if prevCmd != '': if cmd != prevCmd or cmd == 'm': newPath.append((prevCmd, prevData)) prevCmd = '' prevData = [] # if the previous and current commands are the same type, collapse if cmd == prevCmd and cmd != 'm': prevData.extend(data) # save last command and data else: prevCmd = cmd prevData = data # flush last command and data if prevCmd != '': newPath.append((prevCmd, prevData)) path = newPath newPathStr = serializePath(path, options) # if for whatever reason we actually made the path longer don't use it # TODO: maybe we could compare path lengths after each optimization step and use the shortest if len(newPathStr) <= len(oldPathStr): _num_bytes_saved_in_path_data += (len(oldPathStr) - len(newPathStr)) element.setAttribute('d', newPathStr)
[ "def", "cleanPath", "(", "element", ",", "options", ")", ":", "global", "_num_bytes_saved_in_path_data", "global", "_num_path_segments_removed", "# this gets the parser object from svg_regex.py", "oldPathStr", "=", "element", ".", "getAttribute", "(", "'d'", ")", "path", "=", "svg_parser", ".", "parse", "(", "oldPathStr", ")", "style", "=", "_getStyle", "(", "element", ")", "# This determines whether the stroke has round or square linecaps. If it does, we do not want to collapse empty", "# segments, as they are actually rendered (as circles or squares with diameter/dimension matching the path-width).", "has_round_or_square_linecaps", "=", "(", "element", ".", "getAttribute", "(", "'stroke-linecap'", ")", "in", "[", "'round'", ",", "'square'", "]", "or", "'stroke-linecap'", "in", "style", "and", "style", "[", "'stroke-linecap'", "]", "in", "[", "'round'", ",", "'square'", "]", ")", "# This determines whether the stroke has intermediate markers. If it does, we do not want to collapse", "# straight segments running in the same direction, as markers are rendered on the intermediate nodes.", "has_intermediate_markers", "=", "(", "element", ".", "hasAttribute", "(", "'marker'", ")", "or", "element", ".", "hasAttribute", "(", "'marker-mid'", ")", "or", "'marker'", "in", "style", "or", "'marker-mid'", "in", "style", ")", "# The first command must be a moveto, and whether it's relative (m)", "# or absolute (M), the first set of coordinates *is* absolute. So", "# the first iteration of the loop below will get x,y and startx,starty.", "# convert absolute coordinates into relative ones.", "# Reuse the data structure 'path', since we're not adding or removing subcommands.", "# Also reuse the coordinate lists since we're not adding or removing any.", "x", "=", "y", "=", "0", "for", "pathIndex", "in", "range", "(", "len", "(", "path", ")", ")", ":", "cmd", ",", "data", "=", "path", "[", "pathIndex", "]", "# Changes to cmd don't get through to the data structure", "i", "=", "0", "# adjust abs to rel", "# only the A command has some values that we don't want to adjust (radii, rotation, flags)", "if", "cmd", "==", "'A'", ":", "for", "i", "in", "range", "(", "i", ",", "len", "(", "data", ")", ",", "7", ")", ":", "data", "[", "i", "+", "5", "]", "-=", "x", "data", "[", "i", "+", "6", "]", "-=", "y", "x", "+=", "data", "[", "i", "+", "5", "]", "y", "+=", "data", "[", "i", "+", "6", "]", "path", "[", "pathIndex", "]", "=", "(", "'a'", ",", "data", ")", "elif", "cmd", "==", "'a'", ":", "x", "+=", "sum", "(", "data", "[", "5", ":", ":", "7", "]", ")", "y", "+=", "sum", "(", "data", "[", "6", ":", ":", "7", "]", ")", "elif", "cmd", "==", "'H'", ":", "for", "i", "in", "range", "(", "i", ",", "len", "(", "data", ")", ")", ":", "data", "[", "i", "]", "-=", "x", "x", "+=", "data", "[", "i", "]", "path", "[", "pathIndex", "]", "=", "(", "'h'", ",", "data", ")", "elif", "cmd", "==", "'h'", ":", "x", "+=", "sum", "(", "data", ")", "elif", "cmd", "==", "'V'", ":", "for", "i", "in", "range", "(", "i", ",", "len", "(", "data", ")", ")", ":", "data", "[", "i", "]", "-=", "y", "y", "+=", "data", "[", "i", "]", "path", "[", "pathIndex", "]", "=", "(", "'v'", ",", "data", ")", "elif", "cmd", "==", "'v'", ":", "y", "+=", "sum", "(", "data", ")", "elif", "cmd", "==", "'M'", ":", "startx", ",", "starty", "=", "data", "[", "0", "]", ",", "data", "[", "1", "]", "# If this is a path starter, don't convert its first", "# coordinate to relative; that would just make it (0, 0)", "if", "pathIndex", "!=", "0", ":", "data", "[", "0", "]", "-=", "x", "data", "[", "1", "]", "-=", "y", "x", ",", "y", "=", "startx", ",", "starty", "i", "=", "2", "for", "i", "in", "range", "(", "i", ",", "len", "(", "data", ")", ",", "2", ")", ":", "data", "[", "i", "]", "-=", "x", "data", "[", "i", "+", "1", "]", "-=", "y", "x", "+=", "data", "[", "i", "]", "y", "+=", "data", "[", "i", "+", "1", "]", "path", "[", "pathIndex", "]", "=", "(", "'m'", ",", "data", ")", "elif", "cmd", "in", "[", "'L'", ",", "'T'", "]", ":", "for", "i", "in", "range", "(", "i", ",", "len", "(", "data", ")", ",", "2", ")", ":", "data", "[", "i", "]", "-=", "x", "data", "[", "i", "+", "1", "]", "-=", "y", "x", "+=", "data", "[", "i", "]", "y", "+=", "data", "[", "i", "+", "1", "]", "path", "[", "pathIndex", "]", "=", "(", "cmd", ".", "lower", "(", ")", ",", "data", ")", "elif", "cmd", "in", "[", "'m'", "]", ":", "if", "pathIndex", "==", "0", ":", "# START OF PATH - this is an absolute moveto", "# followed by relative linetos", "startx", ",", "starty", "=", "data", "[", "0", "]", ",", "data", "[", "1", "]", "x", ",", "y", "=", "startx", ",", "starty", "i", "=", "2", "else", ":", "startx", "=", "x", "+", "data", "[", "0", "]", "starty", "=", "y", "+", "data", "[", "1", "]", "for", "i", "in", "range", "(", "i", ",", "len", "(", "data", ")", ",", "2", ")", ":", "x", "+=", "data", "[", "i", "]", "y", "+=", "data", "[", "i", "+", "1", "]", "elif", "cmd", "in", "[", "'l'", ",", "'t'", "]", ":", "x", "+=", "sum", "(", "data", "[", "0", ":", ":", "2", "]", ")", "y", "+=", "sum", "(", "data", "[", "1", ":", ":", "2", "]", ")", "elif", "cmd", "in", "[", "'S'", ",", "'Q'", "]", ":", "for", "i", "in", "range", "(", "i", ",", "len", "(", "data", ")", ",", "4", ")", ":", "data", "[", "i", "]", "-=", "x", "data", "[", "i", "+", "1", "]", "-=", "y", "data", "[", "i", "+", "2", "]", "-=", "x", "data", "[", "i", "+", "3", "]", "-=", "y", "x", "+=", "data", "[", "i", "+", "2", "]", "y", "+=", "data", "[", "i", "+", "3", "]", "path", "[", "pathIndex", "]", "=", "(", "cmd", ".", "lower", "(", ")", ",", "data", ")", "elif", "cmd", "in", "[", "'s'", ",", "'q'", "]", ":", "x", "+=", "sum", "(", "data", "[", "2", ":", ":", "4", "]", ")", "y", "+=", "sum", "(", "data", "[", "3", ":", ":", "4", "]", ")", "elif", "cmd", "==", "'C'", ":", "for", "i", "in", "range", "(", "i", ",", "len", "(", "data", ")", ",", "6", ")", ":", "data", "[", "i", "]", "-=", "x", "data", "[", "i", "+", "1", "]", "-=", "y", "data", "[", "i", "+", "2", "]", "-=", "x", "data", "[", "i", "+", "3", "]", "-=", "y", "data", "[", "i", "+", "4", "]", "-=", "x", "data", "[", "i", "+", "5", "]", "-=", "y", "x", "+=", "data", "[", "i", "+", "4", "]", "y", "+=", "data", "[", "i", "+", "5", "]", "path", "[", "pathIndex", "]", "=", "(", "'c'", ",", "data", ")", "elif", "cmd", "==", "'c'", ":", "x", "+=", "sum", "(", "data", "[", "4", ":", ":", "6", "]", ")", "y", "+=", "sum", "(", "data", "[", "5", ":", ":", "6", "]", ")", "elif", "cmd", "in", "[", "'z'", ",", "'Z'", "]", ":", "x", ",", "y", "=", "startx", ",", "starty", "path", "[", "pathIndex", "]", "=", "(", "'z'", ",", "data", ")", "# remove empty segments and redundant commands", "# Reuse the data structure 'path' and the coordinate lists, even if we're", "# deleting items, because these deletions are relatively cheap.", "if", "not", "has_round_or_square_linecaps", ":", "# remove empty path segments", "for", "pathIndex", "in", "range", "(", "len", "(", "path", ")", ")", ":", "cmd", ",", "data", "=", "path", "[", "pathIndex", "]", "i", "=", "0", "if", "cmd", "in", "[", "'m'", ",", "'l'", ",", "'t'", "]", ":", "if", "cmd", "==", "'m'", ":", "# It might be tempting to rewrite \"m0 0 ...\" into", "# \"l...\" here. However, this is an unsound", "# optimization in general as \"m0 0 ... z\" is", "# different from \"l...z\".", "#", "# To do such a rewrite, we need to understand the", "# full subpath. This logic happens after this", "# loop.", "i", "=", "2", "while", "i", "<", "len", "(", "data", ")", ":", "if", "data", "[", "i", "]", "==", "data", "[", "i", "+", "1", "]", "==", "0", ":", "del", "data", "[", "i", ":", "i", "+", "2", "]", "_num_path_segments_removed", "+=", "1", "else", ":", "i", "+=", "2", "elif", "cmd", "==", "'c'", ":", "while", "i", "<", "len", "(", "data", ")", ":", "if", "data", "[", "i", "]", "==", "data", "[", "i", "+", "1", "]", "==", "data", "[", "i", "+", "2", "]", "==", "data", "[", "i", "+", "3", "]", "==", "data", "[", "i", "+", "4", "]", "==", "data", "[", "i", "+", "5", "]", "==", "0", ":", "del", "data", "[", "i", ":", "i", "+", "6", "]", "_num_path_segments_removed", "+=", "1", "else", ":", "i", "+=", "6", "elif", "cmd", "==", "'a'", ":", "while", "i", "<", "len", "(", "data", ")", ":", "if", "data", "[", "i", "+", "5", "]", "==", "data", "[", "i", "+", "6", "]", "==", "0", ":", "del", "data", "[", "i", ":", "i", "+", "7", "]", "_num_path_segments_removed", "+=", "1", "else", ":", "i", "+=", "7", "elif", "cmd", "==", "'q'", ":", "while", "i", "<", "len", "(", "data", ")", ":", "if", "data", "[", "i", "]", "==", "data", "[", "i", "+", "1", "]", "==", "data", "[", "i", "+", "2", "]", "==", "data", "[", "i", "+", "3", "]", "==", "0", ":", "del", "data", "[", "i", ":", "i", "+", "4", "]", "_num_path_segments_removed", "+=", "1", "else", ":", "i", "+=", "4", "elif", "cmd", "in", "[", "'h'", ",", "'v'", "]", ":", "oldLen", "=", "len", "(", "data", ")", "path", "[", "pathIndex", "]", "=", "(", "cmd", ",", "[", "coord", "for", "coord", "in", "data", "if", "coord", "!=", "0", "]", ")", "_num_path_segments_removed", "+=", "len", "(", "path", "[", "pathIndex", "]", "[", "1", "]", ")", "-", "oldLen", "# remove no-op commands", "pathIndex", "=", "len", "(", "path", ")", "subpath_needs_anchor", "=", "False", "# NB: We can never rewrite the first m/M command (expect if it", "# is the only command)", "while", "pathIndex", ">", "1", ":", "pathIndex", "-=", "1", "cmd", ",", "data", "=", "path", "[", "pathIndex", "]", "if", "cmd", "==", "'z'", ":", "next_cmd", ",", "next_data", "=", "path", "[", "pathIndex", "-", "1", "]", "if", "next_cmd", "==", "'m'", "and", "len", "(", "next_data", ")", "==", "2", ":", "# mX Yz -> mX Y", "# note the len check on next_data as it is not", "# safe to rewrite \"m0 0 1 1z\" in general (it is a", "# question of where the \"pen\" ends - you can", "# continue a draw on the same subpath after a", "# \"z\").", "del", "path", "[", "pathIndex", "]", "_num_path_segments_removed", "+=", "1", "else", ":", "# it is not safe to rewrite \"m0 0 ...\" to \"l...\"", "# because of this \"z\" command.", "subpath_needs_anchor", "=", "True", "elif", "cmd", "==", "'m'", ":", "if", "len", "(", "path", ")", "-", "1", "==", "pathIndex", "and", "len", "(", "data", ")", "==", "2", ":", "# Ends with an empty move (but no line/draw", "# following it)", "del", "path", "[", "pathIndex", "]", "_num_path_segments_removed", "+=", "1", "continue", "if", "subpath_needs_anchor", ":", "subpath_needs_anchor", "=", "False", "elif", "data", "[", "0", "]", "==", "data", "[", "1", "]", "==", "0", ":", "# unanchored, i.e. we can replace \"m0 0 ...\" with", "# \"l...\" as there is no \"z\" after it.", "path", "[", "pathIndex", "]", "=", "(", "'l'", ",", "data", "[", "2", ":", "]", ")", "_num_path_segments_removed", "+=", "1", "# fixup: Delete subcommands having no coordinates.", "path", "=", "[", "elem", "for", "elem", "in", "path", "if", "len", "(", "elem", "[", "1", "]", ")", ">", "0", "or", "elem", "[", "0", "]", "==", "'z'", "]", "# convert straight curves into lines", "newPath", "=", "[", "path", "[", "0", "]", "]", "for", "(", "cmd", ",", "data", ")", "in", "path", "[", "1", ":", "]", ":", "i", "=", "0", "newData", "=", "data", "if", "cmd", "==", "'c'", ":", "newData", "=", "[", "]", "while", "i", "<", "len", "(", "data", ")", ":", "# since all commands are now relative, we can think of previous point as (0,0)", "# and new point (dx,dy) is (data[i+4],data[i+5])", "# eqn of line will be y = (dy/dx)*x or if dx=0 then eqn of line is x=0", "(", "p1x", ",", "p1y", ")", "=", "(", "data", "[", "i", "]", ",", "data", "[", "i", "+", "1", "]", ")", "(", "p2x", ",", "p2y", ")", "=", "(", "data", "[", "i", "+", "2", "]", ",", "data", "[", "i", "+", "3", "]", ")", "dx", "=", "data", "[", "i", "+", "4", "]", "dy", "=", "data", "[", "i", "+", "5", "]", "foundStraightCurve", "=", "False", "if", "dx", "==", "0", ":", "if", "p1x", "==", "0", "and", "p2x", "==", "0", ":", "foundStraightCurve", "=", "True", "else", ":", "m", "=", "dy", "/", "dx", "if", "p1y", "==", "m", "*", "p1x", "and", "p2y", "==", "m", "*", "p2x", ":", "foundStraightCurve", "=", "True", "if", "foundStraightCurve", ":", "# flush any existing curve coords first", "if", "newData", ":", "newPath", ".", "append", "(", "(", "cmd", ",", "newData", ")", ")", "newData", "=", "[", "]", "# now create a straight line segment", "newPath", ".", "append", "(", "(", "'l'", ",", "[", "dx", ",", "dy", "]", ")", ")", "else", ":", "newData", ".", "extend", "(", "data", "[", "i", ":", "i", "+", "6", "]", ")", "i", "+=", "6", "if", "newData", "or", "cmd", "==", "'z'", "or", "cmd", "==", "'Z'", ":", "newPath", ".", "append", "(", "(", "cmd", ",", "newData", ")", ")", "path", "=", "newPath", "# collapse all consecutive commands of the same type into one command", "prevCmd", "=", "''", "prevData", "=", "[", "]", "newPath", "=", "[", "]", "for", "(", "cmd", ",", "data", ")", "in", "path", ":", "if", "prevCmd", "==", "''", ":", "# initialize with current path cmd and data", "prevCmd", "=", "cmd", "prevData", "=", "data", "else", ":", "# collapse if", "# - cmd is not moveto (explicit moveto commands are not drawn)", "# - the previous and current commands are the same type,", "# - the previous command is moveto and the current is lineto", "# (subsequent moveto pairs are treated as implicit lineto commands)", "if", "cmd", "!=", "'m'", "and", "(", "cmd", "==", "prevCmd", "or", "(", "cmd", "==", "'l'", "and", "prevCmd", "==", "'m'", ")", ")", ":", "prevData", ".", "extend", "(", "data", ")", "# else flush the previous command if it is not the same type as the current command", "else", ":", "newPath", ".", "append", "(", "(", "prevCmd", ",", "prevData", ")", ")", "prevCmd", "=", "cmd", "prevData", "=", "data", "# flush last command and data", "newPath", ".", "append", "(", "(", "prevCmd", ",", "prevData", ")", ")", "path", "=", "newPath", "# convert to shorthand path segments where possible", "newPath", "=", "[", "]", "for", "(", "cmd", ",", "data", ")", "in", "path", ":", "# convert line segments into h,v where possible", "if", "cmd", "==", "'l'", ":", "i", "=", "0", "lineTuples", "=", "[", "]", "while", "i", "<", "len", "(", "data", ")", ":", "if", "data", "[", "i", "]", "==", "0", ":", "# vertical", "if", "lineTuples", ":", "# flush the existing line command", "newPath", ".", "append", "(", "(", "'l'", ",", "lineTuples", ")", ")", "lineTuples", "=", "[", "]", "# append the v and then the remaining line coords", "newPath", ".", "append", "(", "(", "'v'", ",", "[", "data", "[", "i", "+", "1", "]", "]", ")", ")", "_num_path_segments_removed", "+=", "1", "elif", "data", "[", "i", "+", "1", "]", "==", "0", ":", "if", "lineTuples", ":", "# flush the line command, then append the h and then the remaining line coords", "newPath", ".", "append", "(", "(", "'l'", ",", "lineTuples", ")", ")", "lineTuples", "=", "[", "]", "newPath", ".", "append", "(", "(", "'h'", ",", "[", "data", "[", "i", "]", "]", ")", ")", "_num_path_segments_removed", "+=", "1", "else", ":", "lineTuples", ".", "extend", "(", "data", "[", "i", ":", "i", "+", "2", "]", ")", "i", "+=", "2", "if", "lineTuples", ":", "newPath", ".", "append", "(", "(", "'l'", ",", "lineTuples", ")", ")", "# also handle implied relative linetos", "elif", "cmd", "==", "'m'", ":", "i", "=", "2", "lineTuples", "=", "[", "data", "[", "0", "]", ",", "data", "[", "1", "]", "]", "while", "i", "<", "len", "(", "data", ")", ":", "if", "data", "[", "i", "]", "==", "0", ":", "# vertical", "if", "lineTuples", ":", "# flush the existing m/l command", "newPath", ".", "append", "(", "(", "cmd", ",", "lineTuples", ")", ")", "lineTuples", "=", "[", "]", "cmd", "=", "'l'", "# dealing with linetos now", "# append the v and then the remaining line coords", "newPath", ".", "append", "(", "(", "'v'", ",", "[", "data", "[", "i", "+", "1", "]", "]", ")", ")", "_num_path_segments_removed", "+=", "1", "elif", "data", "[", "i", "+", "1", "]", "==", "0", ":", "if", "lineTuples", ":", "# flush the m/l command, then append the h and then the remaining line coords", "newPath", ".", "append", "(", "(", "cmd", ",", "lineTuples", ")", ")", "lineTuples", "=", "[", "]", "cmd", "=", "'l'", "# dealing with linetos now", "newPath", ".", "append", "(", "(", "'h'", ",", "[", "data", "[", "i", "]", "]", ")", ")", "_num_path_segments_removed", "+=", "1", "else", ":", "lineTuples", ".", "extend", "(", "data", "[", "i", ":", "i", "+", "2", "]", ")", "i", "+=", "2", "if", "lineTuples", ":", "newPath", ".", "append", "(", "(", "cmd", ",", "lineTuples", ")", ")", "# convert Bézier curve segments into s where possible", "elif", "cmd", "==", "'c'", ":", "# set up the assumed bezier control point as the current point,", "# i.e. (0,0) since we're using relative coords", "bez_ctl_pt", "=", "(", "0", ",", "0", ")", "# however if the previous command was 's'", "# the assumed control point is a reflection of the previous control point at the current point", "if", "len", "(", "newPath", ")", ":", "(", "prevCmd", ",", "prevData", ")", "=", "newPath", "[", "-", "1", "]", "if", "prevCmd", "==", "'s'", ":", "bez_ctl_pt", "=", "(", "prevData", "[", "-", "2", "]", "-", "prevData", "[", "-", "4", "]", ",", "prevData", "[", "-", "1", "]", "-", "prevData", "[", "-", "3", "]", ")", "i", "=", "0", "curveTuples", "=", "[", "]", "while", "i", "<", "len", "(", "data", ")", ":", "# rotate by 180deg means negate both coordinates", "# if the previous control point is equal then we can substitute a", "# shorthand bezier command", "if", "bez_ctl_pt", "[", "0", "]", "==", "data", "[", "i", "]", "and", "bez_ctl_pt", "[", "1", "]", "==", "data", "[", "i", "+", "1", "]", ":", "if", "curveTuples", ":", "newPath", ".", "append", "(", "(", "'c'", ",", "curveTuples", ")", ")", "curveTuples", "=", "[", "]", "# append the s command", "newPath", ".", "append", "(", "(", "'s'", ",", "[", "data", "[", "i", "+", "2", "]", ",", "data", "[", "i", "+", "3", "]", ",", "data", "[", "i", "+", "4", "]", ",", "data", "[", "i", "+", "5", "]", "]", ")", ")", "_num_path_segments_removed", "+=", "1", "else", ":", "j", "=", "0", "while", "j", "<=", "5", ":", "curveTuples", ".", "append", "(", "data", "[", "i", "+", "j", "]", ")", "j", "+=", "1", "# set up control point for next curve segment", "bez_ctl_pt", "=", "(", "data", "[", "i", "+", "4", "]", "-", "data", "[", "i", "+", "2", "]", ",", "data", "[", "i", "+", "5", "]", "-", "data", "[", "i", "+", "3", "]", ")", "i", "+=", "6", "if", "curveTuples", ":", "newPath", ".", "append", "(", "(", "'c'", ",", "curveTuples", ")", ")", "# convert quadratic curve segments into t where possible", "elif", "cmd", "==", "'q'", ":", "quad_ctl_pt", "=", "(", "0", ",", "0", ")", "i", "=", "0", "curveTuples", "=", "[", "]", "while", "i", "<", "len", "(", "data", ")", ":", "if", "quad_ctl_pt", "[", "0", "]", "==", "data", "[", "i", "]", "and", "quad_ctl_pt", "[", "1", "]", "==", "data", "[", "i", "+", "1", "]", ":", "if", "curveTuples", ":", "newPath", ".", "append", "(", "(", "'q'", ",", "curveTuples", ")", ")", "curveTuples", "=", "[", "]", "# append the t command", "newPath", ".", "append", "(", "(", "'t'", ",", "[", "data", "[", "i", "+", "2", "]", ",", "data", "[", "i", "+", "3", "]", "]", ")", ")", "_num_path_segments_removed", "+=", "1", "else", ":", "j", "=", "0", "while", "j", "<=", "3", ":", "curveTuples", ".", "append", "(", "data", "[", "i", "+", "j", "]", ")", "j", "+=", "1", "quad_ctl_pt", "=", "(", "data", "[", "i", "+", "2", "]", "-", "data", "[", "i", "]", ",", "data", "[", "i", "+", "3", "]", "-", "data", "[", "i", "+", "1", "]", ")", "i", "+=", "4", "if", "curveTuples", ":", "newPath", ".", "append", "(", "(", "'q'", ",", "curveTuples", ")", ")", "else", ":", "newPath", ".", "append", "(", "(", "cmd", ",", "data", ")", ")", "path", "=", "newPath", "# For each m, l, h or v, collapse unnecessary coordinates that run in the same direction", "# i.e. \"h-100-100\" becomes \"h-200\" but \"h300-100\" does not change.", "# If the path has intermediate markers we have to preserve intermediate nodes, though.", "# Reuse the data structure 'path', since we're not adding or removing subcommands.", "# Also reuse the coordinate lists, even if we're deleting items, because these", "# deletions are relatively cheap.", "if", "not", "has_intermediate_markers", ":", "for", "pathIndex", "in", "range", "(", "len", "(", "path", ")", ")", ":", "cmd", ",", "data", "=", "path", "[", "pathIndex", "]", "# h / v expects only one parameter and we start drawing with the first (so we need at least 2)", "if", "cmd", "in", "[", "'h'", ",", "'v'", "]", "and", "len", "(", "data", ")", ">=", "2", ":", "coordIndex", "=", "0", "while", "coordIndex", "+", "1", "<", "len", "(", "data", ")", ":", "if", "is_same_sign", "(", "data", "[", "coordIndex", "]", ",", "data", "[", "coordIndex", "+", "1", "]", ")", ":", "data", "[", "coordIndex", "]", "+=", "data", "[", "coordIndex", "+", "1", "]", "del", "data", "[", "coordIndex", "+", "1", "]", "_num_path_segments_removed", "+=", "1", "else", ":", "coordIndex", "+=", "1", "# l expects two parameters and we start drawing with the first (so we need at least 4)", "elif", "cmd", "==", "'l'", "and", "len", "(", "data", ")", ">=", "4", ":", "coordIndex", "=", "0", "while", "coordIndex", "+", "2", "<", "len", "(", "data", ")", ":", "if", "is_same_direction", "(", "*", "data", "[", "coordIndex", ":", "coordIndex", "+", "4", "]", ")", ":", "data", "[", "coordIndex", "]", "+=", "data", "[", "coordIndex", "+", "2", "]", "data", "[", "coordIndex", "+", "1", "]", "+=", "data", "[", "coordIndex", "+", "3", "]", "del", "data", "[", "coordIndex", "+", "2", "]", "# delete the next two elements", "del", "data", "[", "coordIndex", "+", "2", "]", "_num_path_segments_removed", "+=", "1", "else", ":", "coordIndex", "+=", "2", "# m expects two parameters but we have to skip the first pair as it's not drawn (so we need at least 6)", "elif", "cmd", "==", "'m'", "and", "len", "(", "data", ")", ">=", "6", ":", "coordIndex", "=", "2", "while", "coordIndex", "+", "2", "<", "len", "(", "data", ")", ":", "if", "is_same_direction", "(", "*", "data", "[", "coordIndex", ":", "coordIndex", "+", "4", "]", ")", ":", "data", "[", "coordIndex", "]", "+=", "data", "[", "coordIndex", "+", "2", "]", "data", "[", "coordIndex", "+", "1", "]", "+=", "data", "[", "coordIndex", "+", "3", "]", "del", "data", "[", "coordIndex", "+", "2", "]", "# delete the next two elements", "del", "data", "[", "coordIndex", "+", "2", "]", "_num_path_segments_removed", "+=", "1", "else", ":", "coordIndex", "+=", "2", "# it is possible that we have consecutive h, v, c, t commands now", "# so again collapse all consecutive commands of the same type into one command", "prevCmd", "=", "''", "prevData", "=", "[", "]", "newPath", "=", "[", "path", "[", "0", "]", "]", "for", "(", "cmd", ",", "data", ")", "in", "path", "[", "1", ":", "]", ":", "# flush the previous command if it is not the same type as the current command", "if", "prevCmd", "!=", "''", ":", "if", "cmd", "!=", "prevCmd", "or", "cmd", "==", "'m'", ":", "newPath", ".", "append", "(", "(", "prevCmd", ",", "prevData", ")", ")", "prevCmd", "=", "''", "prevData", "=", "[", "]", "# if the previous and current commands are the same type, collapse", "if", "cmd", "==", "prevCmd", "and", "cmd", "!=", "'m'", ":", "prevData", ".", "extend", "(", "data", ")", "# save last command and data", "else", ":", "prevCmd", "=", "cmd", "prevData", "=", "data", "# flush last command and data", "if", "prevCmd", "!=", "''", ":", "newPath", ".", "append", "(", "(", "prevCmd", ",", "prevData", ")", ")", "path", "=", "newPath", "newPathStr", "=", "serializePath", "(", "path", ",", "options", ")", "# if for whatever reason we actually made the path longer don't use it", "# TODO: maybe we could compare path lengths after each optimization step and use the shortest", "if", "len", "(", "newPathStr", ")", "<=", "len", "(", "oldPathStr", ")", ":", "_num_bytes_saved_in_path_data", "+=", "(", "len", "(", "oldPathStr", ")", "-", "len", "(", "newPathStr", ")", ")", "element", ".", "setAttribute", "(", "'d'", ",", "newPathStr", ")" ]
Cleans the path string (d attribute) of the element
[ "Cleans", "the", "path", "string", "(", "d", "attribute", ")", "of", "the", "element" ]
python
train
ryanleland/Akispy
akispy/__init__.py
https://github.com/ryanleland/Akispy/blob/dbbb85a1d1b027051e11179289cc9067cb90baf6/akispy/__init__.py#L29-L44
def verify_key(self, url): """For verifying your API key. Provide the URL of your site or blog you will be checking spam from. """ response = self._request('verify-key', { 'blog': url, 'key': self._key }) if response.status is 200: # Read response (trimmed of whitespace) return response.read().strip() == "valid" return False
[ "def", "verify_key", "(", "self", ",", "url", ")", ":", "response", "=", "self", ".", "_request", "(", "'verify-key'", ",", "{", "'blog'", ":", "url", ",", "'key'", ":", "self", ".", "_key", "}", ")", "if", "response", ".", "status", "is", "200", ":", "# Read response (trimmed of whitespace)", "return", "response", ".", "read", "(", ")", ".", "strip", "(", ")", "==", "\"valid\"", "return", "False" ]
For verifying your API key. Provide the URL of your site or blog you will be checking spam from.
[ "For", "verifying", "your", "API", "key", ".", "Provide", "the", "URL", "of", "your", "site", "or", "blog", "you", "will", "be", "checking", "spam", "from", "." ]
python
train
jaredLunde/redis_structures
redis_structures/__init__.py
https://github.com/jaredLunde/redis_structures/blob/b9cce5f5c85db5e12c292633ff8d04e3ae053294/redis_structures/__init__.py#L1689-L1704
def update(self, members): """ Adds @members to the set @members: a :class:RedisSet object or #set -> #int the number of @members that were added to the set, excluding pre-existing members """ if isinstance(members, RedisSet): size = self.size return (self.unionstore( self.key_prefix, members.key_prefix) - size) if self.serialized: members = list(map(self._dumps, members)) if members: return self._client.sadd(self.key_prefix, *members) return 0
[ "def", "update", "(", "self", ",", "members", ")", ":", "if", "isinstance", "(", "members", ",", "RedisSet", ")", ":", "size", "=", "self", ".", "size", "return", "(", "self", ".", "unionstore", "(", "self", ".", "key_prefix", ",", "members", ".", "key_prefix", ")", "-", "size", ")", "if", "self", ".", "serialized", ":", "members", "=", "list", "(", "map", "(", "self", ".", "_dumps", ",", "members", ")", ")", "if", "members", ":", "return", "self", ".", "_client", ".", "sadd", "(", "self", ".", "key_prefix", ",", "*", "members", ")", "return", "0" ]
Adds @members to the set @members: a :class:RedisSet object or #set -> #int the number of @members that were added to the set, excluding pre-existing members
[ "Adds", "@members", "to", "the", "set", "@members", ":", "a", ":", "class", ":", "RedisSet", "object", "or", "#set" ]
python
train
LCAV/pylocus
pylocus/simulation.py
https://github.com/LCAV/pylocus/blob/c56a38c251d8a435caf4641a8ae6027ecba2c8c6/pylocus/simulation.py#L67-L99
def create_mask(N, method='all', nmissing=0): """ Create weight mask according to method. :param N: Dimension of square weight matrix. :param method: Method to use (default: 'all'). - none: no missing entries (only diagonal is set to 0 for dwMDS) - first: only randomly delete measurements to first point (zeros in first row/column of matrix) - all: randomly delete measurements in whole matrix :param nmissing: Number of deleted measurements, used by methods 'first' and 'all' :return: Binary weight mask. :rtype: numpy.ndarray """ weights = np.ones((N, N)) weights[range(N), range(N)] = 0 if method == 'none': return weights # create indices object to choose from elif method == 'all': all_indices = np.triu_indices(N, 1) elif method == 'first': all_indices = [np.zeros(N - 1).astype(np.int), np.arange(1, N).astype(np.int)] ntotal = len(all_indices[0]) # randomly choose from indices and set to 0 choice = np.random.choice(ntotal, nmissing, replace=False) chosen = [all_indices[0][choice], all_indices[1][choice]] weights[chosen] = 0 weights[chosen[1], chosen[0]] = 0 return weights
[ "def", "create_mask", "(", "N", ",", "method", "=", "'all'", ",", "nmissing", "=", "0", ")", ":", "weights", "=", "np", ".", "ones", "(", "(", "N", ",", "N", ")", ")", "weights", "[", "range", "(", "N", ")", ",", "range", "(", "N", ")", "]", "=", "0", "if", "method", "==", "'none'", ":", "return", "weights", "# create indices object to choose from", "elif", "method", "==", "'all'", ":", "all_indices", "=", "np", ".", "triu_indices", "(", "N", ",", "1", ")", "elif", "method", "==", "'first'", ":", "all_indices", "=", "[", "np", ".", "zeros", "(", "N", "-", "1", ")", ".", "astype", "(", "np", ".", "int", ")", ",", "np", ".", "arange", "(", "1", ",", "N", ")", ".", "astype", "(", "np", ".", "int", ")", "]", "ntotal", "=", "len", "(", "all_indices", "[", "0", "]", ")", "# randomly choose from indices and set to 0", "choice", "=", "np", ".", "random", ".", "choice", "(", "ntotal", ",", "nmissing", ",", "replace", "=", "False", ")", "chosen", "=", "[", "all_indices", "[", "0", "]", "[", "choice", "]", ",", "all_indices", "[", "1", "]", "[", "choice", "]", "]", "weights", "[", "chosen", "]", "=", "0", "weights", "[", "chosen", "[", "1", "]", ",", "chosen", "[", "0", "]", "]", "=", "0", "return", "weights" ]
Create weight mask according to method. :param N: Dimension of square weight matrix. :param method: Method to use (default: 'all'). - none: no missing entries (only diagonal is set to 0 for dwMDS) - first: only randomly delete measurements to first point (zeros in first row/column of matrix) - all: randomly delete measurements in whole matrix :param nmissing: Number of deleted measurements, used by methods 'first' and 'all' :return: Binary weight mask. :rtype: numpy.ndarray
[ "Create", "weight", "mask", "according", "to", "method", "." ]
python
train
flowersteam/explauto
explauto/models/dataset.py
https://github.com/flowersteam/explauto/blob/cf0f81ecb9f6412f7276a95bd27359000e1e26b6/explauto/models/dataset.py#L39-L54
def nn(self, x, k = 1, radius = np.inf, eps = 0.0, p = 2): """Find the k nearest neighbors of x in the observed input data :arg x: center :arg k: the number of nearest neighbors to return (default: 1) :arg eps: approximate nearest neighbors. the k-th returned value is guaranteed to be no further than (1 + eps) times the distance to the real k-th nearest neighbor. :arg p: Which Minkowski p-norm to use. (default: 2, euclidean) :arg radius: the maximum radius (default: +inf) :return: distance and indexes of found nearest neighbors. """ assert len(x) == self.dim, 'dimension of input {} does not match expected dimension {}.'.format(len(x), self.dim) k_x = min(k, self.size) # Because linear models requires x vector to be extended to [1.0]+x # to accomodate a constant, we store them that way. return self._nn(np.array(x), k_x, radius = radius, eps = eps, p = p)
[ "def", "nn", "(", "self", ",", "x", ",", "k", "=", "1", ",", "radius", "=", "np", ".", "inf", ",", "eps", "=", "0.0", ",", "p", "=", "2", ")", ":", "assert", "len", "(", "x", ")", "==", "self", ".", "dim", ",", "'dimension of input {} does not match expected dimension {}.'", ".", "format", "(", "len", "(", "x", ")", ",", "self", ".", "dim", ")", "k_x", "=", "min", "(", "k", ",", "self", ".", "size", ")", "# Because linear models requires x vector to be extended to [1.0]+x", "# to accomodate a constant, we store them that way.", "return", "self", ".", "_nn", "(", "np", ".", "array", "(", "x", ")", ",", "k_x", ",", "radius", "=", "radius", ",", "eps", "=", "eps", ",", "p", "=", "p", ")" ]
Find the k nearest neighbors of x in the observed input data :arg x: center :arg k: the number of nearest neighbors to return (default: 1) :arg eps: approximate nearest neighbors. the k-th returned value is guaranteed to be no further than (1 + eps) times the distance to the real k-th nearest neighbor. :arg p: Which Minkowski p-norm to use. (default: 2, euclidean) :arg radius: the maximum radius (default: +inf) :return: distance and indexes of found nearest neighbors.
[ "Find", "the", "k", "nearest", "neighbors", "of", "x", "in", "the", "observed", "input", "data", ":", "arg", "x", ":", "center", ":", "arg", "k", ":", "the", "number", "of", "nearest", "neighbors", "to", "return", "(", "default", ":", "1", ")", ":", "arg", "eps", ":", "approximate", "nearest", "neighbors", ".", "the", "k", "-", "th", "returned", "value", "is", "guaranteed", "to", "be", "no", "further", "than", "(", "1", "+", "eps", ")", "times", "the", "distance", "to", "the", "real", "k", "-", "th", "nearest", "neighbor", ".", ":", "arg", "p", ":", "Which", "Minkowski", "p", "-", "norm", "to", "use", ".", "(", "default", ":", "2", "euclidean", ")", ":", "arg", "radius", ":", "the", "maximum", "radius", "(", "default", ":", "+", "inf", ")", ":", "return", ":", "distance", "and", "indexes", "of", "found", "nearest", "neighbors", "." ]
python
train
MillionIntegrals/vel
vel/rl/models/deterministic_policy_model.py
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/models/deterministic_policy_model.py#L156-L164
def create(policy_backbone: ModelFactory, value_backbone: ModelFactory, input_block: typing.Optional[ModelFactory]=None): """ Vel factory function """ if input_block is None: input_block = IdentityFactory() return DeterministicPolicyModelFactory( input_block=input_block, policy_backbone=policy_backbone, value_backbone=value_backbone )
[ "def", "create", "(", "policy_backbone", ":", "ModelFactory", ",", "value_backbone", ":", "ModelFactory", ",", "input_block", ":", "typing", ".", "Optional", "[", "ModelFactory", "]", "=", "None", ")", ":", "if", "input_block", "is", "None", ":", "input_block", "=", "IdentityFactory", "(", ")", "return", "DeterministicPolicyModelFactory", "(", "input_block", "=", "input_block", ",", "policy_backbone", "=", "policy_backbone", ",", "value_backbone", "=", "value_backbone", ")" ]
Vel factory function
[ "Vel", "factory", "function" ]
python
train
mozilla/python-zeppelin
zeppelin/converters/markdown.py
https://github.com/mozilla/python-zeppelin/blob/76ce6b7608ef6cf7b807bd5d850a58ea6a59ef07/zeppelin/converters/markdown.py#L147-L174
def build_markdown_body(self, text): """Generate the body for the Markdown file. - processes each json block one by one - for each block, process: - the creator of the notebook (user) - the date the notebook was created - the date the notebook was last updated - the input by detecting the editor language - the output by detecting the output format """ key_options = { 'dateCreated': self.process_date_created, 'dateUpdated': self.process_date_updated, 'title': self.process_title, 'text': self.process_input } for paragraph in text['paragraphs']: if 'user' in paragraph: self.user = paragraph['user'] for key, handler in key_options.items(): if key in paragraph: handler(paragraph[key]) if self._RESULT_KEY in paragraph: self.process_results(paragraph)
[ "def", "build_markdown_body", "(", "self", ",", "text", ")", ":", "key_options", "=", "{", "'dateCreated'", ":", "self", ".", "process_date_created", ",", "'dateUpdated'", ":", "self", ".", "process_date_updated", ",", "'title'", ":", "self", ".", "process_title", ",", "'text'", ":", "self", ".", "process_input", "}", "for", "paragraph", "in", "text", "[", "'paragraphs'", "]", ":", "if", "'user'", "in", "paragraph", ":", "self", ".", "user", "=", "paragraph", "[", "'user'", "]", "for", "key", ",", "handler", "in", "key_options", ".", "items", "(", ")", ":", "if", "key", "in", "paragraph", ":", "handler", "(", "paragraph", "[", "key", "]", ")", "if", "self", ".", "_RESULT_KEY", "in", "paragraph", ":", "self", ".", "process_results", "(", "paragraph", ")" ]
Generate the body for the Markdown file. - processes each json block one by one - for each block, process: - the creator of the notebook (user) - the date the notebook was created - the date the notebook was last updated - the input by detecting the editor language - the output by detecting the output format
[ "Generate", "the", "body", "for", "the", "Markdown", "file", "." ]
python
train
googleapis/google-cloud-python
logging/google/cloud/logging/_gapic.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/logging/google/cloud/logging/_gapic.py#L518-L534
def _item_to_metric(iterator, log_metric_pb): """Convert a metric protobuf to the native object. :type iterator: :class:`~google.api_core.page_iterator.Iterator` :param iterator: The iterator that is currently in use. :type log_metric_pb: :class:`.logging_metrics_pb2.LogMetric` :param log_metric_pb: Metric protobuf returned from the API. :rtype: :class:`~google.cloud.logging.metric.Metric` :returns: The next metric in the page. """ # NOTE: LogMetric message type does not have an ``Any`` field # so `MessageToDict`` can safely be used. resource = MessageToDict(log_metric_pb) return Metric.from_api_repr(resource, iterator.client)
[ "def", "_item_to_metric", "(", "iterator", ",", "log_metric_pb", ")", ":", "# NOTE: LogMetric message type does not have an ``Any`` field", "# so `MessageToDict`` can safely be used.", "resource", "=", "MessageToDict", "(", "log_metric_pb", ")", "return", "Metric", ".", "from_api_repr", "(", "resource", ",", "iterator", ".", "client", ")" ]
Convert a metric protobuf to the native object. :type iterator: :class:`~google.api_core.page_iterator.Iterator` :param iterator: The iterator that is currently in use. :type log_metric_pb: :class:`.logging_metrics_pb2.LogMetric` :param log_metric_pb: Metric protobuf returned from the API. :rtype: :class:`~google.cloud.logging.metric.Metric` :returns: The next metric in the page.
[ "Convert", "a", "metric", "protobuf", "to", "the", "native", "object", "." ]
python
train
django-fluent/django-fluent-blogs
fluent_blogs/models/query.py
https://github.com/django-fluent/django-fluent-blogs/blob/86b148549a010eaca9a2ea987fe43be250e06c50/fluent_blogs/models/query.py#L72-L144
def query_entries( queryset=None, year=None, month=None, day=None, category=None, category_slug=None, tag=None, tag_slug=None, author=None, author_slug=None, future=False, order=None, orderby=None, limit=None, ): """ Query the entries using a set of predefined filters. This interface is mainly used by the ``get_entries`` template tag. """ if queryset is None: queryset = get_entry_model().objects.all() if appsettings.FLUENT_BLOGS_FILTER_SITE_ID: queryset = queryset.parent_site(settings.SITE_ID) if not future: queryset = queryset.published() if year: queryset = queryset.filter(publication_date__year=year) if month: queryset = queryset.filter(publication_date__month=month) if day: queryset = queryset.filter(publication_date__day=day) # The main category/tag/author filters if category: if isinstance(category, basestring): queryset = queryset.categories(category) elif isinstance(category, (int, long)): queryset = queryset.filter(categories=category) else: raise ValueError("Expected slug or ID for the 'category' parameter") if category_slug: queryset = queryset.categories(category) if tag: if isinstance(tag, basestring): queryset = queryset.tagged(tag) elif isinstance(tag, (int, long)): queryset = queryset.filter(tags=tag) else: raise ValueError("Expected slug or ID for 'tag' parameter.") if tag_slug: queryset = queryset.tagged(tag) if author: if isinstance(author, basestring): queryset = queryset.authors(author) elif isinstance(author, (int, long)): queryset = queryset.filter(author=author) else: raise ValueError("Expected slug or ID for 'author' parameter.") if author_slug: queryset = queryset.authors(author_slug) # Ordering if orderby: queryset = queryset.order_by(*_get_order_by(order, orderby, ENTRY_ORDER_BY_FIELDS)) else: queryset = queryset.order_by('-publication_date') # Limit if limit: queryset = queryset[:limit] return queryset
[ "def", "query_entries", "(", "queryset", "=", "None", ",", "year", "=", "None", ",", "month", "=", "None", ",", "day", "=", "None", ",", "category", "=", "None", ",", "category_slug", "=", "None", ",", "tag", "=", "None", ",", "tag_slug", "=", "None", ",", "author", "=", "None", ",", "author_slug", "=", "None", ",", "future", "=", "False", ",", "order", "=", "None", ",", "orderby", "=", "None", ",", "limit", "=", "None", ",", ")", ":", "if", "queryset", "is", "None", ":", "queryset", "=", "get_entry_model", "(", ")", ".", "objects", ".", "all", "(", ")", "if", "appsettings", ".", "FLUENT_BLOGS_FILTER_SITE_ID", ":", "queryset", "=", "queryset", ".", "parent_site", "(", "settings", ".", "SITE_ID", ")", "if", "not", "future", ":", "queryset", "=", "queryset", ".", "published", "(", ")", "if", "year", ":", "queryset", "=", "queryset", ".", "filter", "(", "publication_date__year", "=", "year", ")", "if", "month", ":", "queryset", "=", "queryset", ".", "filter", "(", "publication_date__month", "=", "month", ")", "if", "day", ":", "queryset", "=", "queryset", ".", "filter", "(", "publication_date__day", "=", "day", ")", "# The main category/tag/author filters", "if", "category", ":", "if", "isinstance", "(", "category", ",", "basestring", ")", ":", "queryset", "=", "queryset", ".", "categories", "(", "category", ")", "elif", "isinstance", "(", "category", ",", "(", "int", ",", "long", ")", ")", ":", "queryset", "=", "queryset", ".", "filter", "(", "categories", "=", "category", ")", "else", ":", "raise", "ValueError", "(", "\"Expected slug or ID for the 'category' parameter\"", ")", "if", "category_slug", ":", "queryset", "=", "queryset", ".", "categories", "(", "category", ")", "if", "tag", ":", "if", "isinstance", "(", "tag", ",", "basestring", ")", ":", "queryset", "=", "queryset", ".", "tagged", "(", "tag", ")", "elif", "isinstance", "(", "tag", ",", "(", "int", ",", "long", ")", ")", ":", "queryset", "=", "queryset", ".", "filter", "(", "tags", "=", "tag", ")", "else", ":", "raise", "ValueError", "(", "\"Expected slug or ID for 'tag' parameter.\"", ")", "if", "tag_slug", ":", "queryset", "=", "queryset", ".", "tagged", "(", "tag", ")", "if", "author", ":", "if", "isinstance", "(", "author", ",", "basestring", ")", ":", "queryset", "=", "queryset", ".", "authors", "(", "author", ")", "elif", "isinstance", "(", "author", ",", "(", "int", ",", "long", ")", ")", ":", "queryset", "=", "queryset", ".", "filter", "(", "author", "=", "author", ")", "else", ":", "raise", "ValueError", "(", "\"Expected slug or ID for 'author' parameter.\"", ")", "if", "author_slug", ":", "queryset", "=", "queryset", ".", "authors", "(", "author_slug", ")", "# Ordering", "if", "orderby", ":", "queryset", "=", "queryset", ".", "order_by", "(", "*", "_get_order_by", "(", "order", ",", "orderby", ",", "ENTRY_ORDER_BY_FIELDS", ")", ")", "else", ":", "queryset", "=", "queryset", ".", "order_by", "(", "'-publication_date'", ")", "# Limit", "if", "limit", ":", "queryset", "=", "queryset", "[", ":", "limit", "]", "return", "queryset" ]
Query the entries using a set of predefined filters. This interface is mainly used by the ``get_entries`` template tag.
[ "Query", "the", "entries", "using", "a", "set", "of", "predefined", "filters", ".", "This", "interface", "is", "mainly", "used", "by", "the", "get_entries", "template", "tag", "." ]
python
train
sibson/vncdotool
vncdotool/rfb.py
https://github.com/sibson/vncdotool/blob/e133a8916efaa0f5ed421e0aa737196624635b0c/vncdotool/rfb.py#L586-L590
def clientCutText(self, message): """The client has new ASCII text in its cut buffer. (aka clipboard) """ self.transport.write(pack("!BxxxI", 6, len(message)) + message)
[ "def", "clientCutText", "(", "self", ",", "message", ")", ":", "self", ".", "transport", ".", "write", "(", "pack", "(", "\"!BxxxI\"", ",", "6", ",", "len", "(", "message", ")", ")", "+", "message", ")" ]
The client has new ASCII text in its cut buffer. (aka clipboard)
[ "The", "client", "has", "new", "ASCII", "text", "in", "its", "cut", "buffer", ".", "(", "aka", "clipboard", ")" ]
python
train
jay-johnson/spylunking
spylunking/scripts/search_splunk.py
https://github.com/jay-johnson/spylunking/blob/95cc86776f04ec5935cf04e291cf18798345d6cb/spylunking/scripts/search_splunk.py#L254-L285
def show_non_search_results( log_rec, code_view=True, json_view=False, show_message_details=False): """show_non_search_results Show non-search results for search jobs like: ``index="antinex" | stats count`` :param log_rec: log record from splunk :param code_view: show as a normal tail -f <log file> view :param json_view: pretty print each log's dictionary :param show_message_details """ log_dict = None try: log_dict = json.loads( log_rec) except Exception as e: log_dict = None # end of try/ex if not log_dict: log.info(( '{}').format( ppj(log_rec))) else: log.info(( '{}').format( ppj(log_dict)))
[ "def", "show_non_search_results", "(", "log_rec", ",", "code_view", "=", "True", ",", "json_view", "=", "False", ",", "show_message_details", "=", "False", ")", ":", "log_dict", "=", "None", "try", ":", "log_dict", "=", "json", ".", "loads", "(", "log_rec", ")", "except", "Exception", "as", "e", ":", "log_dict", "=", "None", "# end of try/ex", "if", "not", "log_dict", ":", "log", ".", "info", "(", "(", "'{}'", ")", ".", "format", "(", "ppj", "(", "log_rec", ")", ")", ")", "else", ":", "log", ".", "info", "(", "(", "'{}'", ")", ".", "format", "(", "ppj", "(", "log_dict", ")", ")", ")" ]
show_non_search_results Show non-search results for search jobs like: ``index="antinex" | stats count`` :param log_rec: log record from splunk :param code_view: show as a normal tail -f <log file> view :param json_view: pretty print each log's dictionary :param show_message_details
[ "show_non_search_results" ]
python
train
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/SConf.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/SConf.py#L478-L529
def BuildNodes(self, nodes): """ Tries to build the given nodes immediately. Returns 1 on success, 0 on error. """ if self.logstream is not None: # override stdout / stderr to write in log file oldStdout = sys.stdout sys.stdout = self.logstream oldStderr = sys.stderr sys.stderr = self.logstream # the engine assumes the current path is the SConstruct directory ... old_fs_dir = SConfFS.getcwd() old_os_dir = os.getcwd() SConfFS.chdir(SConfFS.Top, change_os_dir=1) # Because we take responsibility here for writing out our # own .sconsign info (see SConfBuildTask.execute(), above), # we override the store_info() method with a null place-holder # so we really control how it gets written. for n in nodes: n.store_info = 0 if not hasattr(n, 'attributes'): n.attributes = SCons.Node.Node.Attrs() n.attributes.keep_targetinfo = 1 ret = 1 try: # ToDo: use user options for calc save_max_drift = SConfFS.get_max_drift() SConfFS.set_max_drift(0) tm = SCons.Taskmaster.Taskmaster(nodes, SConfBuildTask) # we don't want to build tests in parallel jobs = SCons.Job.Jobs(1, tm ) jobs.run() for n in nodes: state = n.get_state() if (state != SCons.Node.executed and state != SCons.Node.up_to_date): # the node could not be built. we return 0 in this case ret = 0 finally: SConfFS.set_max_drift(save_max_drift) os.chdir(old_os_dir) SConfFS.chdir(old_fs_dir, change_os_dir=0) if self.logstream is not None: # restore stdout / stderr sys.stdout = oldStdout sys.stderr = oldStderr return ret
[ "def", "BuildNodes", "(", "self", ",", "nodes", ")", ":", "if", "self", ".", "logstream", "is", "not", "None", ":", "# override stdout / stderr to write in log file", "oldStdout", "=", "sys", ".", "stdout", "sys", ".", "stdout", "=", "self", ".", "logstream", "oldStderr", "=", "sys", ".", "stderr", "sys", ".", "stderr", "=", "self", ".", "logstream", "# the engine assumes the current path is the SConstruct directory ...", "old_fs_dir", "=", "SConfFS", ".", "getcwd", "(", ")", "old_os_dir", "=", "os", ".", "getcwd", "(", ")", "SConfFS", ".", "chdir", "(", "SConfFS", ".", "Top", ",", "change_os_dir", "=", "1", ")", "# Because we take responsibility here for writing out our", "# own .sconsign info (see SConfBuildTask.execute(), above),", "# we override the store_info() method with a null place-holder", "# so we really control how it gets written.", "for", "n", "in", "nodes", ":", "n", ".", "store_info", "=", "0", "if", "not", "hasattr", "(", "n", ",", "'attributes'", ")", ":", "n", ".", "attributes", "=", "SCons", ".", "Node", ".", "Node", ".", "Attrs", "(", ")", "n", ".", "attributes", ".", "keep_targetinfo", "=", "1", "ret", "=", "1", "try", ":", "# ToDo: use user options for calc", "save_max_drift", "=", "SConfFS", ".", "get_max_drift", "(", ")", "SConfFS", ".", "set_max_drift", "(", "0", ")", "tm", "=", "SCons", ".", "Taskmaster", ".", "Taskmaster", "(", "nodes", ",", "SConfBuildTask", ")", "# we don't want to build tests in parallel", "jobs", "=", "SCons", ".", "Job", ".", "Jobs", "(", "1", ",", "tm", ")", "jobs", ".", "run", "(", ")", "for", "n", "in", "nodes", ":", "state", "=", "n", ".", "get_state", "(", ")", "if", "(", "state", "!=", "SCons", ".", "Node", ".", "executed", "and", "state", "!=", "SCons", ".", "Node", ".", "up_to_date", ")", ":", "# the node could not be built. we return 0 in this case", "ret", "=", "0", "finally", ":", "SConfFS", ".", "set_max_drift", "(", "save_max_drift", ")", "os", ".", "chdir", "(", "old_os_dir", ")", "SConfFS", ".", "chdir", "(", "old_fs_dir", ",", "change_os_dir", "=", "0", ")", "if", "self", ".", "logstream", "is", "not", "None", ":", "# restore stdout / stderr", "sys", ".", "stdout", "=", "oldStdout", "sys", ".", "stderr", "=", "oldStderr", "return", "ret" ]
Tries to build the given nodes immediately. Returns 1 on success, 0 on error.
[ "Tries", "to", "build", "the", "given", "nodes", "immediately", ".", "Returns", "1", "on", "success", "0", "on", "error", "." ]
python
train
jaraco/wolframalpha
wolframalpha/__init__.py
https://github.com/jaraco/wolframalpha/blob/50bf2e047b698e308a9a88770a23e7e210aa5bcb/wolframalpha/__init__.py#L181-L185
def info(self): """ The pods, assumptions, and warnings of this result. """ return itertools.chain(self.pods, self.assumptions, self.warnings)
[ "def", "info", "(", "self", ")", ":", "return", "itertools", ".", "chain", "(", "self", ".", "pods", ",", "self", ".", "assumptions", ",", "self", ".", "warnings", ")" ]
The pods, assumptions, and warnings of this result.
[ "The", "pods", "assumptions", "and", "warnings", "of", "this", "result", "." ]
python
test
opencobra/cobrapy
cobra/io/sbml.py
https://github.com/opencobra/cobrapy/blob/9d1987cdb3a395cf4125a3439c3b002ff2be2009/cobra/io/sbml.py#L1106-L1146
def _create_bound(model, reaction, bound_type, f_replace, units=None, flux_udef=None): """Creates bound in model for given reaction. Adds the parameters for the bounds to the SBML model. Parameters ---------- model : libsbml.Model SBML model instance reaction : cobra.core.Reaction Cobra reaction instance from which the bounds are read. bound_type : {LOWER_BOUND, UPPER_BOUND} Type of bound f_replace : dict of id replacement functions units : flux units Returns ------- Id of bound parameter. """ value = getattr(reaction, bound_type) if value == config.lower_bound: return LOWER_BOUND_ID elif value == 0: return ZERO_BOUND_ID elif value == config.upper_bound: return UPPER_BOUND_ID elif value == -float("Inf"): return BOUND_MINUS_INF elif value == float("Inf"): return BOUND_PLUS_INF else: # new parameter rid = reaction.id if f_replace and F_REACTION_REV in f_replace: rid = f_replace[F_REACTION_REV](rid) pid = rid + "_" + bound_type _create_parameter(model, pid=pid, value=value, sbo=SBO_FLUX_BOUND, units=units, flux_udef=flux_udef) return pid
[ "def", "_create_bound", "(", "model", ",", "reaction", ",", "bound_type", ",", "f_replace", ",", "units", "=", "None", ",", "flux_udef", "=", "None", ")", ":", "value", "=", "getattr", "(", "reaction", ",", "bound_type", ")", "if", "value", "==", "config", ".", "lower_bound", ":", "return", "LOWER_BOUND_ID", "elif", "value", "==", "0", ":", "return", "ZERO_BOUND_ID", "elif", "value", "==", "config", ".", "upper_bound", ":", "return", "UPPER_BOUND_ID", "elif", "value", "==", "-", "float", "(", "\"Inf\"", ")", ":", "return", "BOUND_MINUS_INF", "elif", "value", "==", "float", "(", "\"Inf\"", ")", ":", "return", "BOUND_PLUS_INF", "else", ":", "# new parameter", "rid", "=", "reaction", ".", "id", "if", "f_replace", "and", "F_REACTION_REV", "in", "f_replace", ":", "rid", "=", "f_replace", "[", "F_REACTION_REV", "]", "(", "rid", ")", "pid", "=", "rid", "+", "\"_\"", "+", "bound_type", "_create_parameter", "(", "model", ",", "pid", "=", "pid", ",", "value", "=", "value", ",", "sbo", "=", "SBO_FLUX_BOUND", ",", "units", "=", "units", ",", "flux_udef", "=", "flux_udef", ")", "return", "pid" ]
Creates bound in model for given reaction. Adds the parameters for the bounds to the SBML model. Parameters ---------- model : libsbml.Model SBML model instance reaction : cobra.core.Reaction Cobra reaction instance from which the bounds are read. bound_type : {LOWER_BOUND, UPPER_BOUND} Type of bound f_replace : dict of id replacement functions units : flux units Returns ------- Id of bound parameter.
[ "Creates", "bound", "in", "model", "for", "given", "reaction", "." ]
python
valid
openstack/networking-cisco
networking_cisco/apps/saf/agent/iptables_driver.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/agent/iptables_driver.py#L155-L192
def update_iptables(self): """Update iptables based on information in the rule_info.""" # Read the iptables iptables_cmds = ['iptables-save', '-c'] all_rules = dsl.execute(iptables_cmds, root_helper=self._root_helper, log_output=False) # For each rule in rule_info update the rule if necessary. new_rules = [] is_modified = False for line in all_rules.split('\n'): new_line = line line_content = line.split() # The spoofing rule which includes mac and ip should have # -s cidr/32 option for ip address. Otherwise no rule # will be modified. if '-s' in line_content: tmp_rule_info = list(self.rule_info) for rule in tmp_rule_info: if (rule.mac in line.lower() and rule.chain.lower() in line.lower() and not self._is_ip_in_rule(rule.ip, line_content)): ip_loc = line_content.index('-s') + 1 line_content[ip_loc] = rule.ip + '/32' new_line = ' '.join(line_content) LOG.debug('Modified %(old_rule)s. ' 'New rule is %(new_rule)s.' % ( {'old_rule': line, 'new_rule': new_line})) is_modified = True new_rules.append(new_line) if is_modified and new_rules: # Updated all the rules. Now commit the new rules. iptables_cmds = ['iptables-restore', '-c'] dsl.execute(iptables_cmds, process_input='\n'.join(new_rules), root_helper=self._root_helper, log_output=False)
[ "def", "update_iptables", "(", "self", ")", ":", "# Read the iptables", "iptables_cmds", "=", "[", "'iptables-save'", ",", "'-c'", "]", "all_rules", "=", "dsl", ".", "execute", "(", "iptables_cmds", ",", "root_helper", "=", "self", ".", "_root_helper", ",", "log_output", "=", "False", ")", "# For each rule in rule_info update the rule if necessary.", "new_rules", "=", "[", "]", "is_modified", "=", "False", "for", "line", "in", "all_rules", ".", "split", "(", "'\\n'", ")", ":", "new_line", "=", "line", "line_content", "=", "line", ".", "split", "(", ")", "# The spoofing rule which includes mac and ip should have", "# -s cidr/32 option for ip address. Otherwise no rule", "# will be modified.", "if", "'-s'", "in", "line_content", ":", "tmp_rule_info", "=", "list", "(", "self", ".", "rule_info", ")", "for", "rule", "in", "tmp_rule_info", ":", "if", "(", "rule", ".", "mac", "in", "line", ".", "lower", "(", ")", "and", "rule", ".", "chain", ".", "lower", "(", ")", "in", "line", ".", "lower", "(", ")", "and", "not", "self", ".", "_is_ip_in_rule", "(", "rule", ".", "ip", ",", "line_content", ")", ")", ":", "ip_loc", "=", "line_content", ".", "index", "(", "'-s'", ")", "+", "1", "line_content", "[", "ip_loc", "]", "=", "rule", ".", "ip", "+", "'/32'", "new_line", "=", "' '", ".", "join", "(", "line_content", ")", "LOG", ".", "debug", "(", "'Modified %(old_rule)s. '", "'New rule is %(new_rule)s.'", "%", "(", "{", "'old_rule'", ":", "line", ",", "'new_rule'", ":", "new_line", "}", ")", ")", "is_modified", "=", "True", "new_rules", ".", "append", "(", "new_line", ")", "if", "is_modified", "and", "new_rules", ":", "# Updated all the rules. Now commit the new rules.", "iptables_cmds", "=", "[", "'iptables-restore'", ",", "'-c'", "]", "dsl", ".", "execute", "(", "iptables_cmds", ",", "process_input", "=", "'\\n'", ".", "join", "(", "new_rules", ")", ",", "root_helper", "=", "self", ".", "_root_helper", ",", "log_output", "=", "False", ")" ]
Update iptables based on information in the rule_info.
[ "Update", "iptables", "based", "on", "information", "in", "the", "rule_info", "." ]
python
train
SmileyChris/easy-thumbnails
easy_thumbnails/management/__init__.py
https://github.com/SmileyChris/easy-thumbnails/blob/b08ab44883bf7b221a98dadb9b589cb95d35b0bf/easy_thumbnails/management/__init__.py#L94-L101
def delete_thumbnails(relative_source_path, root=None, basedir=None, subdir=None, prefix=None): """ Delete all thumbnails for a source image. """ thumbs = thumbnails_for_file(relative_source_path, root, basedir, subdir, prefix) return _delete_using_thumbs_list(thumbs)
[ "def", "delete_thumbnails", "(", "relative_source_path", ",", "root", "=", "None", ",", "basedir", "=", "None", ",", "subdir", "=", "None", ",", "prefix", "=", "None", ")", ":", "thumbs", "=", "thumbnails_for_file", "(", "relative_source_path", ",", "root", ",", "basedir", ",", "subdir", ",", "prefix", ")", "return", "_delete_using_thumbs_list", "(", "thumbs", ")" ]
Delete all thumbnails for a source image.
[ "Delete", "all", "thumbnails", "for", "a", "source", "image", "." ]
python
train
soravux/scoop
scoop/discovery/minusconf.py
https://github.com/soravux/scoop/blob/d391dfa62f47e49d48328ee9cf08aa114256fd33/scoop/discovery/minusconf.py#L243-L248
def start_blocking(self): """ Start the advertiser in the background, but wait until it is ready """ self._cav_started.clear() self.start() self._cav_started.wait()
[ "def", "start_blocking", "(", "self", ")", ":", "self", ".", "_cav_started", ".", "clear", "(", ")", "self", ".", "start", "(", ")", "self", ".", "_cav_started", ".", "wait", "(", ")" ]
Start the advertiser in the background, but wait until it is ready
[ "Start", "the", "advertiser", "in", "the", "background", "but", "wait", "until", "it", "is", "ready" ]
python
train
Ex-Mente/auxi.0
auxi/core/helpers.py
https://github.com/Ex-Mente/auxi.0/blob/2dcdae74154f136f8ca58289fe5b20772f215046/auxi/core/helpers.py#L23-L34
def get_date(date): """ Get the date from a value that could be a date object or a string. :param date: The date object or string. :returns: The date object. """ if type(date) is str: return datetime.strptime(date, '%Y-%m-%d').date() else: return date
[ "def", "get_date", "(", "date", ")", ":", "if", "type", "(", "date", ")", "is", "str", ":", "return", "datetime", ".", "strptime", "(", "date", ",", "'%Y-%m-%d'", ")", ".", "date", "(", ")", "else", ":", "return", "date" ]
Get the date from a value that could be a date object or a string. :param date: The date object or string. :returns: The date object.
[ "Get", "the", "date", "from", "a", "value", "that", "could", "be", "a", "date", "object", "or", "a", "string", "." ]
python
valid
pandas-dev/pandas
pandas/core/arrays/datetimes.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimes.py#L2114-L2140
def _maybe_localize_point(ts, is_none, is_not_none, freq, tz): """ Localize a start or end Timestamp to the timezone of the corresponding start or end Timestamp Parameters ---------- ts : start or end Timestamp to potentially localize is_none : argument that should be None is_not_none : argument that should not be None freq : Tick, DateOffset, or None tz : str, timezone object or None Returns ------- ts : Timestamp """ # Make sure start and end are timezone localized if: # 1) freq = a Timedelta-like frequency (Tick) # 2) freq = None i.e. generating a linspaced range if isinstance(freq, Tick) or freq is None: localize_args = {'tz': tz, 'ambiguous': False} else: localize_args = {'tz': None} if is_none is None and is_not_none is not None: ts = ts.tz_localize(**localize_args) return ts
[ "def", "_maybe_localize_point", "(", "ts", ",", "is_none", ",", "is_not_none", ",", "freq", ",", "tz", ")", ":", "# Make sure start and end are timezone localized if:", "# 1) freq = a Timedelta-like frequency (Tick)", "# 2) freq = None i.e. generating a linspaced range", "if", "isinstance", "(", "freq", ",", "Tick", ")", "or", "freq", "is", "None", ":", "localize_args", "=", "{", "'tz'", ":", "tz", ",", "'ambiguous'", ":", "False", "}", "else", ":", "localize_args", "=", "{", "'tz'", ":", "None", "}", "if", "is_none", "is", "None", "and", "is_not_none", "is", "not", "None", ":", "ts", "=", "ts", ".", "tz_localize", "(", "*", "*", "localize_args", ")", "return", "ts" ]
Localize a start or end Timestamp to the timezone of the corresponding start or end Timestamp Parameters ---------- ts : start or end Timestamp to potentially localize is_none : argument that should be None is_not_none : argument that should not be None freq : Tick, DateOffset, or None tz : str, timezone object or None Returns ------- ts : Timestamp
[ "Localize", "a", "start", "or", "end", "Timestamp", "to", "the", "timezone", "of", "the", "corresponding", "start", "or", "end", "Timestamp" ]
python
train
Nike-Inc/cerberus-python-client
cerberus/client.py
https://github.com/Nike-Inc/cerberus-python-client/blob/ef38356822e722fcb6a6ed4a1b38a5b493e753ae/cerberus/client.py#L242-L253
def get_sdb_by_id(self, sdb_id): """ Return the details for the given safe deposit box id Keyword arguments: sdb_id -- this is the id of the safe deposit box, not the path. """ sdb_resp = get_with_retry(self.cerberus_url + '/v2/safe-deposit-box/' + sdb_id, headers=self.HEADERS) throw_if_bad_response(sdb_resp) return sdb_resp.json()
[ "def", "get_sdb_by_id", "(", "self", ",", "sdb_id", ")", ":", "sdb_resp", "=", "get_with_retry", "(", "self", ".", "cerberus_url", "+", "'/v2/safe-deposit-box/'", "+", "sdb_id", ",", "headers", "=", "self", ".", "HEADERS", ")", "throw_if_bad_response", "(", "sdb_resp", ")", "return", "sdb_resp", ".", "json", "(", ")" ]
Return the details for the given safe deposit box id Keyword arguments: sdb_id -- this is the id of the safe deposit box, not the path.
[ "Return", "the", "details", "for", "the", "given", "safe", "deposit", "box", "id" ]
python
train
stevearc/dql
dql/util.py
https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/util.py#L56-L88
def resolve(val): """ Convert a pyparsing value to the python type """ name = val.getName() if name == "number": try: return int(val.number) except ValueError: return Decimal(val.number) elif name == "str": return unwrap(val.str) elif name == "null": return None elif name == "binary": return Binary(val.binary[2:-1]) elif name == "set": if val.set == "()": return set() return set([resolve(v) for v in val.set]) elif name == "bool": return val.bool == "TRUE" elif name == "list": return [resolve(v) for v in val.list] elif name == "dict": dict_val = {} for k, v in val.dict: dict_val[resolve(k)] = resolve(v) return dict_val elif name == "ts_function": return dt_to_ts(eval_function(val.ts_function)) elif name == "ts_expression": return dt_to_ts(eval_expression(val)) else: raise SyntaxError("Unable to resolve value '%s'" % val)
[ "def", "resolve", "(", "val", ")", ":", "name", "=", "val", ".", "getName", "(", ")", "if", "name", "==", "\"number\"", ":", "try", ":", "return", "int", "(", "val", ".", "number", ")", "except", "ValueError", ":", "return", "Decimal", "(", "val", ".", "number", ")", "elif", "name", "==", "\"str\"", ":", "return", "unwrap", "(", "val", ".", "str", ")", "elif", "name", "==", "\"null\"", ":", "return", "None", "elif", "name", "==", "\"binary\"", ":", "return", "Binary", "(", "val", ".", "binary", "[", "2", ":", "-", "1", "]", ")", "elif", "name", "==", "\"set\"", ":", "if", "val", ".", "set", "==", "\"()\"", ":", "return", "set", "(", ")", "return", "set", "(", "[", "resolve", "(", "v", ")", "for", "v", "in", "val", ".", "set", "]", ")", "elif", "name", "==", "\"bool\"", ":", "return", "val", ".", "bool", "==", "\"TRUE\"", "elif", "name", "==", "\"list\"", ":", "return", "[", "resolve", "(", "v", ")", "for", "v", "in", "val", ".", "list", "]", "elif", "name", "==", "\"dict\"", ":", "dict_val", "=", "{", "}", "for", "k", ",", "v", "in", "val", ".", "dict", ":", "dict_val", "[", "resolve", "(", "k", ")", "]", "=", "resolve", "(", "v", ")", "return", "dict_val", "elif", "name", "==", "\"ts_function\"", ":", "return", "dt_to_ts", "(", "eval_function", "(", "val", ".", "ts_function", ")", ")", "elif", "name", "==", "\"ts_expression\"", ":", "return", "dt_to_ts", "(", "eval_expression", "(", "val", ")", ")", "else", ":", "raise", "SyntaxError", "(", "\"Unable to resolve value '%s'\"", "%", "val", ")" ]
Convert a pyparsing value to the python type
[ "Convert", "a", "pyparsing", "value", "to", "the", "python", "type" ]
python
train
rodynnz/xccdf
src/xccdf/models/description.py
https://github.com/rodynnz/xccdf/blob/1b9dc2f06b5cce8db2a54c5f95a8f6bcf5cb6981/src/xccdf/models/description.py#L51-L67
def update_xml_element(self): """ Updates the xml element contents to matches the instance contents. :returns: Updated XML element. :rtype: lxml.etree._Element """ super(Description, self).update_xml_element() if hasattr(self, 'lang'): self.xml_element.set( '{http://www.w3.org/XML/1998/namespace}lang', self.lang) if hasattr(self, 'override'): self.xml_element.set('override', str(self.override)) return self.xml_element
[ "def", "update_xml_element", "(", "self", ")", ":", "super", "(", "Description", ",", "self", ")", ".", "update_xml_element", "(", ")", "if", "hasattr", "(", "self", ",", "'lang'", ")", ":", "self", ".", "xml_element", ".", "set", "(", "'{http://www.w3.org/XML/1998/namespace}lang'", ",", "self", ".", "lang", ")", "if", "hasattr", "(", "self", ",", "'override'", ")", ":", "self", ".", "xml_element", ".", "set", "(", "'override'", ",", "str", "(", "self", ".", "override", ")", ")", "return", "self", ".", "xml_element" ]
Updates the xml element contents to matches the instance contents. :returns: Updated XML element. :rtype: lxml.etree._Element
[ "Updates", "the", "xml", "element", "contents", "to", "matches", "the", "instance", "contents", "." ]
python
train
dustinmm80/healthy
checks.py
https://github.com/dustinmm80/healthy/blob/b59016c3f578ca45b6ce857a2d5c4584b8542288/checks.py#L94-L107
def check_python_classifiers(package_info, *args): """ Does the package have Python classifiers? :param package_info: package_info dictionary :return: Tuple (is the condition True or False?, reason if it is False else None, score to be applied) """ classifiers = package_info.get('classifiers') reason = "Python classifiers missing" result = False if len([c for c in classifiers if c.startswith('Programming Language :: Python ::')]) > 0: result = True return result, reason, HAS_PYTHON_CLASSIFIERS
[ "def", "check_python_classifiers", "(", "package_info", ",", "*", "args", ")", ":", "classifiers", "=", "package_info", ".", "get", "(", "'classifiers'", ")", "reason", "=", "\"Python classifiers missing\"", "result", "=", "False", "if", "len", "(", "[", "c", "for", "c", "in", "classifiers", "if", "c", ".", "startswith", "(", "'Programming Language :: Python ::'", ")", "]", ")", ">", "0", ":", "result", "=", "True", "return", "result", ",", "reason", ",", "HAS_PYTHON_CLASSIFIERS" ]
Does the package have Python classifiers? :param package_info: package_info dictionary :return: Tuple (is the condition True or False?, reason if it is False else None, score to be applied)
[ "Does", "the", "package", "have", "Python", "classifiers?", ":", "param", "package_info", ":", "package_info", "dictionary", ":", "return", ":", "Tuple", "(", "is", "the", "condition", "True", "or", "False?", "reason", "if", "it", "is", "False", "else", "None", "score", "to", "be", "applied", ")" ]
python
train
shakefu/pyconfig
pyconfig/__init__.py
https://github.com/shakefu/pyconfig/blob/000cb127db51e03cb4070aae6943e956193cbad5/pyconfig/__init__.py#L414-L472
def load(self, prefix=None, depth=None): """ Return a dictionary of settings loaded from etcd. """ prefix = prefix or self.prefix prefix = '/' + prefix.strip('/') + '/' if depth is None: depth = self.inherit_depth if not self.configured: log.debug("etcd not available") return if self.watching: log.info("Starting watcher for %r", prefix) self.start_watching() log.info("Loading from etcd %r", prefix) try: result = self.client.get(prefix) except self.module.EtcdKeyNotFound: result = None if not result: log.info("No configuration found") return {} # Iterate over the returned keys from etcd update = {} for item in result.children: key = item.key value = item.value # Try to parse them as JSON strings, just in case it works try: value = pytool.json.from_json(value) except: pass # Make the key lower-case if we're not case-sensitive if not self.case_sensitive: key = key.lower() # Strip off the prefix that we're using if key.startswith(prefix): key = key[len(prefix):] # Store the key/value to update the config update[key] = value # Access cached settings directly to avoid recursion inherited = Config().settings.get(self.inherit_key, update.get(self.inherit_key, None)) if depth > 0 and inherited: log.info(" ... inheriting ...") inherited = self.load(inherited, depth - 1) or {} inherited.update(update) update = inherited return update
[ "def", "load", "(", "self", ",", "prefix", "=", "None", ",", "depth", "=", "None", ")", ":", "prefix", "=", "prefix", "or", "self", ".", "prefix", "prefix", "=", "'/'", "+", "prefix", ".", "strip", "(", "'/'", ")", "+", "'/'", "if", "depth", "is", "None", ":", "depth", "=", "self", ".", "inherit_depth", "if", "not", "self", ".", "configured", ":", "log", ".", "debug", "(", "\"etcd not available\"", ")", "return", "if", "self", ".", "watching", ":", "log", ".", "info", "(", "\"Starting watcher for %r\"", ",", "prefix", ")", "self", ".", "start_watching", "(", ")", "log", ".", "info", "(", "\"Loading from etcd %r\"", ",", "prefix", ")", "try", ":", "result", "=", "self", ".", "client", ".", "get", "(", "prefix", ")", "except", "self", ".", "module", ".", "EtcdKeyNotFound", ":", "result", "=", "None", "if", "not", "result", ":", "log", ".", "info", "(", "\"No configuration found\"", ")", "return", "{", "}", "# Iterate over the returned keys from etcd", "update", "=", "{", "}", "for", "item", "in", "result", ".", "children", ":", "key", "=", "item", ".", "key", "value", "=", "item", ".", "value", "# Try to parse them as JSON strings, just in case it works", "try", ":", "value", "=", "pytool", ".", "json", ".", "from_json", "(", "value", ")", "except", ":", "pass", "# Make the key lower-case if we're not case-sensitive", "if", "not", "self", ".", "case_sensitive", ":", "key", "=", "key", ".", "lower", "(", ")", "# Strip off the prefix that we're using", "if", "key", ".", "startswith", "(", "prefix", ")", ":", "key", "=", "key", "[", "len", "(", "prefix", ")", ":", "]", "# Store the key/value to update the config", "update", "[", "key", "]", "=", "value", "# Access cached settings directly to avoid recursion", "inherited", "=", "Config", "(", ")", ".", "settings", ".", "get", "(", "self", ".", "inherit_key", ",", "update", ".", "get", "(", "self", ".", "inherit_key", ",", "None", ")", ")", "if", "depth", ">", "0", "and", "inherited", ":", "log", ".", "info", "(", "\" ... inheriting ...\"", ")", "inherited", "=", "self", ".", "load", "(", "inherited", ",", "depth", "-", "1", ")", "or", "{", "}", "inherited", ".", "update", "(", "update", ")", "update", "=", "inherited", "return", "update" ]
Return a dictionary of settings loaded from etcd.
[ "Return", "a", "dictionary", "of", "settings", "loaded", "from", "etcd", "." ]
python
valid
ralphbean/taskw
taskw/utils.py
https://github.com/ralphbean/taskw/blob/11e2f9132eaedd157f514538de9b5f3b69c30a52/taskw/utils.py#L125-L140
def encode_task_experimental(task): """ Convert a dict-like task to its string representation Used for adding a task via `task add` """ # First, clean the task: task = task.copy() if 'tags' in task: task['tags'] = ','.join(task['tags']) for k in task: task[k] = encode_task_value(k, task[k]) # Then, format it as a string return [ "%s:\"%s\"" % (k, v) if v else "%s:" % (k, ) for k, v in sorted(task.items(), key=itemgetter(0)) ]
[ "def", "encode_task_experimental", "(", "task", ")", ":", "# First, clean the task:", "task", "=", "task", ".", "copy", "(", ")", "if", "'tags'", "in", "task", ":", "task", "[", "'tags'", "]", "=", "','", ".", "join", "(", "task", "[", "'tags'", "]", ")", "for", "k", "in", "task", ":", "task", "[", "k", "]", "=", "encode_task_value", "(", "k", ",", "task", "[", "k", "]", ")", "# Then, format it as a string", "return", "[", "\"%s:\\\"%s\\\"\"", "%", "(", "k", ",", "v", ")", "if", "v", "else", "\"%s:\"", "%", "(", "k", ",", ")", "for", "k", ",", "v", "in", "sorted", "(", "task", ".", "items", "(", ")", ",", "key", "=", "itemgetter", "(", "0", ")", ")", "]" ]
Convert a dict-like task to its string representation Used for adding a task via `task add`
[ "Convert", "a", "dict", "-", "like", "task", "to", "its", "string", "representation", "Used", "for", "adding", "a", "task", "via", "task", "add" ]
python
train
IdentityPython/SATOSA
src/satosa/frontends/saml2.py
https://github.com/IdentityPython/SATOSA/blob/49da5d4c0ac1a5ebf1a71b4f7aaf04f0e52d8fdb/src/satosa/frontends/saml2.py#L522-L544
def _build_idp_config_endpoints(self, config, providers): """ Builds the final frontend module config :type config: dict[str, Any] :type providers: list[str] :rtype: dict[str, Any] :param config: The module config :param providers: A list of backend names :return: The final config """ # Add an endpoint to each provider idp_endpoints = [] for endp_category in self.endpoints: for func, endpoint in self.endpoints[endp_category].items(): for provider in providers: _endpoint = "{base}/{provider}/{endpoint}".format( base=self.base_url, provider=provider, endpoint=endpoint) idp_endpoints.append((_endpoint, func)) config["service"]["idp"]["endpoints"][endp_category] = idp_endpoints return config
[ "def", "_build_idp_config_endpoints", "(", "self", ",", "config", ",", "providers", ")", ":", "# Add an endpoint to each provider", "idp_endpoints", "=", "[", "]", "for", "endp_category", "in", "self", ".", "endpoints", ":", "for", "func", ",", "endpoint", "in", "self", ".", "endpoints", "[", "endp_category", "]", ".", "items", "(", ")", ":", "for", "provider", "in", "providers", ":", "_endpoint", "=", "\"{base}/{provider}/{endpoint}\"", ".", "format", "(", "base", "=", "self", ".", "base_url", ",", "provider", "=", "provider", ",", "endpoint", "=", "endpoint", ")", "idp_endpoints", ".", "append", "(", "(", "_endpoint", ",", "func", ")", ")", "config", "[", "\"service\"", "]", "[", "\"idp\"", "]", "[", "\"endpoints\"", "]", "[", "endp_category", "]", "=", "idp_endpoints", "return", "config" ]
Builds the final frontend module config :type config: dict[str, Any] :type providers: list[str] :rtype: dict[str, Any] :param config: The module config :param providers: A list of backend names :return: The final config
[ "Builds", "the", "final", "frontend", "module", "config" ]
python
train
saltstack/salt
salt/master.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L1794-L1818
def revoke_auth(self, load): ''' Allow a minion to request revocation of its own key :param dict load: The minion payload :rtype: dict :return: If the load is invalid, it may be returned. No key operation is performed. :rtype: bool :return: True if key was revoked, False if not ''' load = self.__verify_load(load, ('id', 'tok')) if not self.opts.get('allow_minion_key_revoke', False): log.warning( 'Minion %s requested key revoke, but allow_minion_key_revoke ' 'is set to False', load['id'] ) return load if load is False: return load else: return self.masterapi.revoke_auth(load)
[ "def", "revoke_auth", "(", "self", ",", "load", ")", ":", "load", "=", "self", ".", "__verify_load", "(", "load", ",", "(", "'id'", ",", "'tok'", ")", ")", "if", "not", "self", ".", "opts", ".", "get", "(", "'allow_minion_key_revoke'", ",", "False", ")", ":", "log", ".", "warning", "(", "'Minion %s requested key revoke, but allow_minion_key_revoke '", "'is set to False'", ",", "load", "[", "'id'", "]", ")", "return", "load", "if", "load", "is", "False", ":", "return", "load", "else", ":", "return", "self", ".", "masterapi", ".", "revoke_auth", "(", "load", ")" ]
Allow a minion to request revocation of its own key :param dict load: The minion payload :rtype: dict :return: If the load is invalid, it may be returned. No key operation is performed. :rtype: bool :return: True if key was revoked, False if not
[ "Allow", "a", "minion", "to", "request", "revocation", "of", "its", "own", "key" ]
python
train
fulfilio/python-magento
magento/catalog.py
https://github.com/fulfilio/python-magento/blob/720ec136a6e438a9ee4ee92848a9820b91732750/magento/catalog.py#L272-L286
def create(self, product_type, attribute_set_id, sku, data): """ Create Product and return ID :param product_type: String type of product :param attribute_set_id: ID of attribute set :param sku: SKU of the product :param data: Dictionary of data :return: INT id of product created """ return int(self.call( 'catalog_product.create', [product_type, attribute_set_id, sku, data] ) )
[ "def", "create", "(", "self", ",", "product_type", ",", "attribute_set_id", ",", "sku", ",", "data", ")", ":", "return", "int", "(", "self", ".", "call", "(", "'catalog_product.create'", ",", "[", "product_type", ",", "attribute_set_id", ",", "sku", ",", "data", "]", ")", ")" ]
Create Product and return ID :param product_type: String type of product :param attribute_set_id: ID of attribute set :param sku: SKU of the product :param data: Dictionary of data :return: INT id of product created
[ "Create", "Product", "and", "return", "ID" ]
python
train
Ex-Mente/auxi.0
auxi/modelling/business/models.py
https://github.com/Ex-Mente/auxi.0/blob/2dcdae74154f136f8ca58289fe5b20772f215046/auxi/modelling/business/models.py#L65-L78
def create_entity(self, name, gl_structure, description=None): """ Create an entity and add it to the model. :param name: The entity name. :param gl_structure: The entity's general ledger structure. :param description: The entity description. :returns: The created entity. """ new_entity = Entity(name, gl_structure, description=description) self.entities.append(new_entity) return new_entity
[ "def", "create_entity", "(", "self", ",", "name", ",", "gl_structure", ",", "description", "=", "None", ")", ":", "new_entity", "=", "Entity", "(", "name", ",", "gl_structure", ",", "description", "=", "description", ")", "self", ".", "entities", ".", "append", "(", "new_entity", ")", "return", "new_entity" ]
Create an entity and add it to the model. :param name: The entity name. :param gl_structure: The entity's general ledger structure. :param description: The entity description. :returns: The created entity.
[ "Create", "an", "entity", "and", "add", "it", "to", "the", "model", "." ]
python
valid
fermiPy/fermipy
fermipy/fits_utils.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/fits_utils.py#L125-L176
def read_projection_from_fits(fitsfile, extname=None): """ Load a WCS or HPX projection. """ f = fits.open(fitsfile) nhdu = len(f) # Try and get the energy bounds try: ebins = find_and_read_ebins(f) except: ebins = None if extname is None: # If there is an image in the Primary HDU we can return a WCS-based # projection if f[0].header['NAXIS'] != 0: proj = WCS(f[0].header) return proj, f, f[0] else: if f[extname].header['XTENSION'] == 'IMAGE': proj = WCS(f[extname].header) return proj, f, f[extname] elif extname in ['SKYMAP', 'SKYMAP2']: proj = HPX.create_from_hdu(f[extname], ebins) return proj, f, f[extname] elif f[extname].header['XTENSION'] == 'BINTABLE': try: if f[extname].header['PIXTYPE'] == 'HEALPIX': proj = HPX.create_from_hdu(f[extname], ebins) return proj, f, f[extname] except: pass return None, f, None # Loop on HDU and look for either an image or a table with HEALPix data for i in range(1, nhdu): # if there is an image we can return a WCS-based projection if f[i].header['XTENSION'] == 'IMAGE': proj = WCS(f[i].header) return proj, f, f[i] elif f[i].header['XTENSION'] == 'BINTABLE': if f[i].name in ['SKYMAP', 'SKYMAP2']: proj = HPX.create_from_hdu(f[i], ebins) return proj, f, f[i] try: if f[i].header['PIXTYPE'] == 'HEALPIX': proj = HPX.create_from_hdu(f[i], ebins) return proj, f, f[i] except: pass return None, f, None
[ "def", "read_projection_from_fits", "(", "fitsfile", ",", "extname", "=", "None", ")", ":", "f", "=", "fits", ".", "open", "(", "fitsfile", ")", "nhdu", "=", "len", "(", "f", ")", "# Try and get the energy bounds", "try", ":", "ebins", "=", "find_and_read_ebins", "(", "f", ")", "except", ":", "ebins", "=", "None", "if", "extname", "is", "None", ":", "# If there is an image in the Primary HDU we can return a WCS-based", "# projection", "if", "f", "[", "0", "]", ".", "header", "[", "'NAXIS'", "]", "!=", "0", ":", "proj", "=", "WCS", "(", "f", "[", "0", "]", ".", "header", ")", "return", "proj", ",", "f", ",", "f", "[", "0", "]", "else", ":", "if", "f", "[", "extname", "]", ".", "header", "[", "'XTENSION'", "]", "==", "'IMAGE'", ":", "proj", "=", "WCS", "(", "f", "[", "extname", "]", ".", "header", ")", "return", "proj", ",", "f", ",", "f", "[", "extname", "]", "elif", "extname", "in", "[", "'SKYMAP'", ",", "'SKYMAP2'", "]", ":", "proj", "=", "HPX", ".", "create_from_hdu", "(", "f", "[", "extname", "]", ",", "ebins", ")", "return", "proj", ",", "f", ",", "f", "[", "extname", "]", "elif", "f", "[", "extname", "]", ".", "header", "[", "'XTENSION'", "]", "==", "'BINTABLE'", ":", "try", ":", "if", "f", "[", "extname", "]", ".", "header", "[", "'PIXTYPE'", "]", "==", "'HEALPIX'", ":", "proj", "=", "HPX", ".", "create_from_hdu", "(", "f", "[", "extname", "]", ",", "ebins", ")", "return", "proj", ",", "f", ",", "f", "[", "extname", "]", "except", ":", "pass", "return", "None", ",", "f", ",", "None", "# Loop on HDU and look for either an image or a table with HEALPix data", "for", "i", "in", "range", "(", "1", ",", "nhdu", ")", ":", "# if there is an image we can return a WCS-based projection", "if", "f", "[", "i", "]", ".", "header", "[", "'XTENSION'", "]", "==", "'IMAGE'", ":", "proj", "=", "WCS", "(", "f", "[", "i", "]", ".", "header", ")", "return", "proj", ",", "f", ",", "f", "[", "i", "]", "elif", "f", "[", "i", "]", ".", "header", "[", "'XTENSION'", "]", "==", "'BINTABLE'", ":", "if", "f", "[", "i", "]", ".", "name", "in", "[", "'SKYMAP'", ",", "'SKYMAP2'", "]", ":", "proj", "=", "HPX", ".", "create_from_hdu", "(", "f", "[", "i", "]", ",", "ebins", ")", "return", "proj", ",", "f", ",", "f", "[", "i", "]", "try", ":", "if", "f", "[", "i", "]", ".", "header", "[", "'PIXTYPE'", "]", "==", "'HEALPIX'", ":", "proj", "=", "HPX", ".", "create_from_hdu", "(", "f", "[", "i", "]", ",", "ebins", ")", "return", "proj", ",", "f", ",", "f", "[", "i", "]", "except", ":", "pass", "return", "None", ",", "f", ",", "None" ]
Load a WCS or HPX projection.
[ "Load", "a", "WCS", "or", "HPX", "projection", "." ]
python
train
nerdvegas/rez
src/rez/vendor/amqp/connection.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/amqp/connection.py#L532-L536
def _blocked(self, args): """RabbitMQ Extension.""" reason = args.read_shortstr() if self.on_blocked: return self.on_blocked(reason)
[ "def", "_blocked", "(", "self", ",", "args", ")", ":", "reason", "=", "args", ".", "read_shortstr", "(", ")", "if", "self", ".", "on_blocked", ":", "return", "self", ".", "on_blocked", "(", "reason", ")" ]
RabbitMQ Extension.
[ "RabbitMQ", "Extension", "." ]
python
train
a1ezzz/wasp-general
wasp_general/network/messenger/coders.py
https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/network/messenger/coders.py#L128-L157
def decode(self, envelope, session, target=None, modification_code=None, **kwargs): """ Methods checks envelope for 'modification_code' existence and removes it. :param envelope: original envelope :param session: original session :param target: flag, that specifies whether code must be searched and removed at the start or at the end :param modification_code: code to search/remove :param kwargs: additional arguments :return: WMessengerTextEnvelope or WMessengerBytesEnvelope (depends on the original envelope) """ self.__args_check(envelope, target, modification_code) message = envelope.message() if len(message) < len(modification_code): raise ValueError('Invalid message length') if isinstance(envelope, WMessengerTextEnvelope): target_envelope_cls = WMessengerTextEnvelope else: # isinstance(envelope, WMessengerBytesEnvelope) target_envelope_cls = WMessengerBytesEnvelope if target == WMessengerFixedModificationLayer.Target.head: if message[:len(modification_code)] != modification_code: raise ValueError('Invalid header in message') return target_envelope_cls(message[len(modification_code):], meta=envelope) else: # target == WMessengerFixedModificationLayer.Target.tail if message[-len(modification_code):] != modification_code: raise ValueError('Invalid tail in message') return target_envelope_cls(message[:-len(modification_code)], meta=envelope)
[ "def", "decode", "(", "self", ",", "envelope", ",", "session", ",", "target", "=", "None", ",", "modification_code", "=", "None", ",", "*", "*", "kwargs", ")", ":", "self", ".", "__args_check", "(", "envelope", ",", "target", ",", "modification_code", ")", "message", "=", "envelope", ".", "message", "(", ")", "if", "len", "(", "message", ")", "<", "len", "(", "modification_code", ")", ":", "raise", "ValueError", "(", "'Invalid message length'", ")", "if", "isinstance", "(", "envelope", ",", "WMessengerTextEnvelope", ")", ":", "target_envelope_cls", "=", "WMessengerTextEnvelope", "else", ":", "# isinstance(envelope, WMessengerBytesEnvelope)", "target_envelope_cls", "=", "WMessengerBytesEnvelope", "if", "target", "==", "WMessengerFixedModificationLayer", ".", "Target", ".", "head", ":", "if", "message", "[", ":", "len", "(", "modification_code", ")", "]", "!=", "modification_code", ":", "raise", "ValueError", "(", "'Invalid header in message'", ")", "return", "target_envelope_cls", "(", "message", "[", "len", "(", "modification_code", ")", ":", "]", ",", "meta", "=", "envelope", ")", "else", ":", "# target == WMessengerFixedModificationLayer.Target.tail", "if", "message", "[", "-", "len", "(", "modification_code", ")", ":", "]", "!=", "modification_code", ":", "raise", "ValueError", "(", "'Invalid tail in message'", ")", "return", "target_envelope_cls", "(", "message", "[", ":", "-", "len", "(", "modification_code", ")", "]", ",", "meta", "=", "envelope", ")" ]
Methods checks envelope for 'modification_code' existence and removes it. :param envelope: original envelope :param session: original session :param target: flag, that specifies whether code must be searched and removed at the start or at the end :param modification_code: code to search/remove :param kwargs: additional arguments :return: WMessengerTextEnvelope or WMessengerBytesEnvelope (depends on the original envelope)
[ "Methods", "checks", "envelope", "for", "modification_code", "existence", "and", "removes", "it", "." ]
python
train
oasiswork/zimsoap
zimsoap/zobjects.py
https://github.com/oasiswork/zimsoap/blob/d1ea2eb4d50f263c9a16e5549af03f1eff3e295e/zimsoap/zobjects.py#L364-L380
def to_selector(self): """ For some reason, the selector for <identity> is <identity id="1234" /> rather than <identity by="id"></identity> """ for i in self.SELECTORS: if hasattr(self, i): val = getattr(self, i) selector = i break return {selector: val}
[ "def", "to_selector", "(", "self", ")", ":", "for", "i", "in", "self", ".", "SELECTORS", ":", "if", "hasattr", "(", "self", ",", "i", ")", ":", "val", "=", "getattr", "(", "self", ",", "i", ")", "selector", "=", "i", "break", "return", "{", "selector", ":", "val", "}" ]
For some reason, the selector for <identity> is <identity id="1234" /> rather than <identity by="id"></identity>
[ "For", "some", "reason", "the", "selector", "for", "<identity", ">", "is" ]
python
train
fracpete/python-weka-wrapper3
python/weka/classifiers.py
https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/classifiers.py#L2027-L2040
def print_classification(self, cls, inst, index): """ Prints the classification to the buffer. :param cls: the classifier :type cls: Classifier :param inst: the test instance :type inst: Instance :param index: the 0-based index of the test instance :type index: int """ javabridge.call( self.jobject, "printClassification", "(Lweka/classifiers/Classifier;Lweka/core/Instance;I)V", cls.jobject, inst.jobject, index)
[ "def", "print_classification", "(", "self", ",", "cls", ",", "inst", ",", "index", ")", ":", "javabridge", ".", "call", "(", "self", ".", "jobject", ",", "\"printClassification\"", ",", "\"(Lweka/classifiers/Classifier;Lweka/core/Instance;I)V\"", ",", "cls", ".", "jobject", ",", "inst", ".", "jobject", ",", "index", ")" ]
Prints the classification to the buffer. :param cls: the classifier :type cls: Classifier :param inst: the test instance :type inst: Instance :param index: the 0-based index of the test instance :type index: int
[ "Prints", "the", "classification", "to", "the", "buffer", "." ]
python
train
snare/voltron
voltron/api.py
https://github.com/snare/voltron/blob/4ee3cbe6f7c1e38303f5dc6114c48b60217253c3/voltron/api.py#L236-L244
def from_json(self, data): """ Initialise an API message from a JSON representation. """ try: d = json.loads(data) except ValueError: raise InvalidMessageException() self.from_dict(d)
[ "def", "from_json", "(", "self", ",", "data", ")", ":", "try", ":", "d", "=", "json", ".", "loads", "(", "data", ")", "except", "ValueError", ":", "raise", "InvalidMessageException", "(", ")", "self", ".", "from_dict", "(", "d", ")" ]
Initialise an API message from a JSON representation.
[ "Initialise", "an", "API", "message", "from", "a", "JSON", "representation", "." ]
python
train
gwastro/pycbc
pycbc/workflow/core.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/workflow/core.py#L800-L825
def save_config(self, fname, output_dir, cp=None): """ Writes configuration file to disk and returns a pycbc.workflow.File instance for the configuration file. Parameters ----------- fname : string The filename of the configuration file written to disk. output_dir : string The directory where the file is written to disk. cp : ConfigParser object The ConfigParser object to write. If None then uses self.cp. Returns ------- FileList The FileList object with the configuration file. """ cp = self.cp if cp is None else cp ini_file_path = os.path.join(output_dir, fname) with open(ini_file_path, "wb") as fp: cp.write(fp) ini_file = FileList([File(self.ifos, "", self.analysis_time, file_url="file://" + ini_file_path)]) return ini_file
[ "def", "save_config", "(", "self", ",", "fname", ",", "output_dir", ",", "cp", "=", "None", ")", ":", "cp", "=", "self", ".", "cp", "if", "cp", "is", "None", "else", "cp", "ini_file_path", "=", "os", ".", "path", ".", "join", "(", "output_dir", ",", "fname", ")", "with", "open", "(", "ini_file_path", ",", "\"wb\"", ")", "as", "fp", ":", "cp", ".", "write", "(", "fp", ")", "ini_file", "=", "FileList", "(", "[", "File", "(", "self", ".", "ifos", ",", "\"\"", ",", "self", ".", "analysis_time", ",", "file_url", "=", "\"file://\"", "+", "ini_file_path", ")", "]", ")", "return", "ini_file" ]
Writes configuration file to disk and returns a pycbc.workflow.File instance for the configuration file. Parameters ----------- fname : string The filename of the configuration file written to disk. output_dir : string The directory where the file is written to disk. cp : ConfigParser object The ConfigParser object to write. If None then uses self.cp. Returns ------- FileList The FileList object with the configuration file.
[ "Writes", "configuration", "file", "to", "disk", "and", "returns", "a", "pycbc", ".", "workflow", ".", "File", "instance", "for", "the", "configuration", "file", "." ]
python
train
yhat/pandasql
pandasql/sqldf.py
https://github.com/yhat/pandasql/blob/e799c6f53be9653e8998a25adb5e2f1643442699/pandasql/sqldf.py#L110-L117
def extract_table_names(query): """ Extract table names from an SQL query. """ # a good old fashioned regex. turns out this worked better than actually parsing the code tables_blocks = re.findall(r'(?:FROM|JOIN)\s+(\w+(?:\s*,\s*\w+)*)', query, re.IGNORECASE) tables = [tbl for block in tables_blocks for tbl in re.findall(r'\w+', block)] return set(tables)
[ "def", "extract_table_names", "(", "query", ")", ":", "# a good old fashioned regex. turns out this worked better than actually parsing the code", "tables_blocks", "=", "re", ".", "findall", "(", "r'(?:FROM|JOIN)\\s+(\\w+(?:\\s*,\\s*\\w+)*)'", ",", "query", ",", "re", ".", "IGNORECASE", ")", "tables", "=", "[", "tbl", "for", "block", "in", "tables_blocks", "for", "tbl", "in", "re", ".", "findall", "(", "r'\\w+'", ",", "block", ")", "]", "return", "set", "(", "tables", ")" ]
Extract table names from an SQL query.
[ "Extract", "table", "names", "from", "an", "SQL", "query", "." ]
python
train
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_0/security/security_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/security/security_client.py#L98-L119
def remove_access_control_lists(self, security_namespace_id, tokens=None, recurse=None): """RemoveAccessControlLists. Remove access control lists under the specfied security namespace. :param str security_namespace_id: Security namespace identifier. :param str tokens: One or more comma-separated security tokens :param bool recurse: If true and this is a hierarchical namespace, also remove child ACLs of the specified tokens. :rtype: bool """ route_values = {} if security_namespace_id is not None: route_values['securityNamespaceId'] = self._serialize.url('security_namespace_id', security_namespace_id, 'str') query_parameters = {} if tokens is not None: query_parameters['tokens'] = self._serialize.query('tokens', tokens, 'str') if recurse is not None: query_parameters['recurse'] = self._serialize.query('recurse', recurse, 'bool') response = self._send(http_method='DELETE', location_id='18a2ad18-7571-46ae-bec7-0c7da1495885', version='5.0', route_values=route_values, query_parameters=query_parameters) return self._deserialize('bool', response)
[ "def", "remove_access_control_lists", "(", "self", ",", "security_namespace_id", ",", "tokens", "=", "None", ",", "recurse", "=", "None", ")", ":", "route_values", "=", "{", "}", "if", "security_namespace_id", "is", "not", "None", ":", "route_values", "[", "'securityNamespaceId'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'security_namespace_id'", ",", "security_namespace_id", ",", "'str'", ")", "query_parameters", "=", "{", "}", "if", "tokens", "is", "not", "None", ":", "query_parameters", "[", "'tokens'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'tokens'", ",", "tokens", ",", "'str'", ")", "if", "recurse", "is", "not", "None", ":", "query_parameters", "[", "'recurse'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'recurse'", ",", "recurse", ",", "'bool'", ")", "response", "=", "self", ".", "_send", "(", "http_method", "=", "'DELETE'", ",", "location_id", "=", "'18a2ad18-7571-46ae-bec7-0c7da1495885'", ",", "version", "=", "'5.0'", ",", "route_values", "=", "route_values", ",", "query_parameters", "=", "query_parameters", ")", "return", "self", ".", "_deserialize", "(", "'bool'", ",", "response", ")" ]
RemoveAccessControlLists. Remove access control lists under the specfied security namespace. :param str security_namespace_id: Security namespace identifier. :param str tokens: One or more comma-separated security tokens :param bool recurse: If true and this is a hierarchical namespace, also remove child ACLs of the specified tokens. :rtype: bool
[ "RemoveAccessControlLists", ".", "Remove", "access", "control", "lists", "under", "the", "specfied", "security", "namespace", ".", ":", "param", "str", "security_namespace_id", ":", "Security", "namespace", "identifier", ".", ":", "param", "str", "tokens", ":", "One", "or", "more", "comma", "-", "separated", "security", "tokens", ":", "param", "bool", "recurse", ":", "If", "true", "and", "this", "is", "a", "hierarchical", "namespace", "also", "remove", "child", "ACLs", "of", "the", "specified", "tokens", ".", ":", "rtype", ":", "bool" ]
python
train
boriel/zxbasic
zxbparser.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/zxbparser.py#L1948-L1960
def p_return(p): """ statement : RETURN """ if not FUNCTION_LEVEL: # At less one level, otherwise, this return is from a GOSUB p[0] = make_sentence('RETURN') return if FUNCTION_LEVEL[-1].kind != KIND.sub: syntax_error(p.lineno(1), 'Syntax Error: Functions must RETURN a value, or use EXIT FUNCTION instead.') p[0] = None return p[0] = make_sentence('RETURN', FUNCTION_LEVEL[-1])
[ "def", "p_return", "(", "p", ")", ":", "if", "not", "FUNCTION_LEVEL", ":", "# At less one level, otherwise, this return is from a GOSUB", "p", "[", "0", "]", "=", "make_sentence", "(", "'RETURN'", ")", "return", "if", "FUNCTION_LEVEL", "[", "-", "1", "]", ".", "kind", "!=", "KIND", ".", "sub", ":", "syntax_error", "(", "p", ".", "lineno", "(", "1", ")", ",", "'Syntax Error: Functions must RETURN a value, or use EXIT FUNCTION instead.'", ")", "p", "[", "0", "]", "=", "None", "return", "p", "[", "0", "]", "=", "make_sentence", "(", "'RETURN'", ",", "FUNCTION_LEVEL", "[", "-", "1", "]", ")" ]
statement : RETURN
[ "statement", ":", "RETURN" ]
python
train
jstitch/MambuPy
MambuPy/mambuutil.py
https://github.com/jstitch/MambuPy/blob/2af98cc12e7ed5ec183b3e97644e880e70b79ee8/MambuPy/mambuutil.py#L831-L959
def backup_db(callback, bool_func, output_fname, *args, **kwargs): """Backup Mambu Database via REST API. Makes two calls to Mambu API: - a POST to request a backup to be made - a GET, once the backup is ready, to download the latest backup * callback is a string to a callback URL Mambu will internally call when the backup is ready to download. You should have a webservice there to warn you when the backup is ready. * bool_func is a function you use against your own code to test if the said backup is ready. This function backup_db manages both the logic of the request of a backup and the downloading of it too, so bool_func allows you to have some way on your side to know when this function will download the backup. The thing is you have to build a webservice (for the callback) making some kind of flag turn that your bool_func will read and know when to say True, telling backup_db to begin the download of the backup. * output_fname the name of the file that will hold the downloaded backup. PLEASE MIND that Mambu sends a ZIP file here. * user, pwd and url allow you to change the Mambu permissions for the getmambuurl internally called here. * verbose is a boolean flag for verbosity. * retries number of retries for bool_func or -1 for keep waiting. * force_download_latest boolean, True to force download even if no callback is called. False to throw error if callback isn't received after retries. * returns a dictionary with info about the download -latest boolean flag, if the db downloaded was the latest or not """ from datetime import datetime try: verbose = kwargs['verbose'] except KeyError: verbose = False try: retries = kwargs['retries'] except KeyError: retries = -1 try: force_download_latest = bool(kwargs['force_download_latest']) except KeyError: force_download_latest = False if verbose: log = open('/tmp/log_mambu_backup','a') log.write(datetime.now().strftime('%Y-%m-%d %H:%M:%S') + " - Mambu DB Backup\n") log.flush() user = kwargs.pop('user', apiuser) pwd = kwargs.pop('pwd', apipwd) data = {'callback' : callback} try: posturl = iriToUri(getmambuurl(*args, **kwargs) + "database/backup") if verbose: log.write("open url: "+posturl+"\n") log.flush() resp = requests.post(posturl, data=data, headers={'content-type': 'application/json'}, auth=(apiuser, apipwd)) except Exception as ex: mess = "Error requesting backup: %s" % repr(ex) if verbose: log.write(mess + "\n") log.close() raise MambuError(mess) if resp.status_code != 200: mess = "Error posting request for backup: %s" % resp.content if verbose: log.write(mess + "\n") log.close() raise MambuCommError(mess) data['latest'] = True while retries and not bool_func(): if verbose: log.write("waiting...\n") log.flush() sleep(10) retries -= 1 if retries < 0: retries = -1 if not retries: mess = "Tired of waiting, giving up..." if verbose: log.write(mess + "\n") log.flush() if not force_download_latest: if verbose: log.close() raise MambuError(mess) else: data['latest'] = False sleep(30) geturl = iriToUri(getmambuurl(*args, **kwargs) + "database/backup/LATEST") if verbose: log.write("open url: "+geturl+"\n") log.flush() resp = requests.get(geturl, auth=(apiuser, apipwd)) if resp.status_code != 200: mess = "Error getting database backup: %s" % resp.content if verbose: log.write(mess + "\n") log.close() raise MambuCommError(mess) if verbose: log.write("saving...\n") log.flush() with open(output_fname, "w") as fw: fw.write(resp.content) if verbose: log.write("DONE!\n") log.close() return data
[ "def", "backup_db", "(", "callback", ",", "bool_func", ",", "output_fname", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "from", "datetime", "import", "datetime", "try", ":", "verbose", "=", "kwargs", "[", "'verbose'", "]", "except", "KeyError", ":", "verbose", "=", "False", "try", ":", "retries", "=", "kwargs", "[", "'retries'", "]", "except", "KeyError", ":", "retries", "=", "-", "1", "try", ":", "force_download_latest", "=", "bool", "(", "kwargs", "[", "'force_download_latest'", "]", ")", "except", "KeyError", ":", "force_download_latest", "=", "False", "if", "verbose", ":", "log", "=", "open", "(", "'/tmp/log_mambu_backup'", ",", "'a'", ")", "log", ".", "write", "(", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "'%Y-%m-%d %H:%M:%S'", ")", "+", "\" - Mambu DB Backup\\n\"", ")", "log", ".", "flush", "(", ")", "user", "=", "kwargs", ".", "pop", "(", "'user'", ",", "apiuser", ")", "pwd", "=", "kwargs", ".", "pop", "(", "'pwd'", ",", "apipwd", ")", "data", "=", "{", "'callback'", ":", "callback", "}", "try", ":", "posturl", "=", "iriToUri", "(", "getmambuurl", "(", "*", "args", ",", "*", "*", "kwargs", ")", "+", "\"database/backup\"", ")", "if", "verbose", ":", "log", ".", "write", "(", "\"open url: \"", "+", "posturl", "+", "\"\\n\"", ")", "log", ".", "flush", "(", ")", "resp", "=", "requests", ".", "post", "(", "posturl", ",", "data", "=", "data", ",", "headers", "=", "{", "'content-type'", ":", "'application/json'", "}", ",", "auth", "=", "(", "apiuser", ",", "apipwd", ")", ")", "except", "Exception", "as", "ex", ":", "mess", "=", "\"Error requesting backup: %s\"", "%", "repr", "(", "ex", ")", "if", "verbose", ":", "log", ".", "write", "(", "mess", "+", "\"\\n\"", ")", "log", ".", "close", "(", ")", "raise", "MambuError", "(", "mess", ")", "if", "resp", ".", "status_code", "!=", "200", ":", "mess", "=", "\"Error posting request for backup: %s\"", "%", "resp", ".", "content", "if", "verbose", ":", "log", ".", "write", "(", "mess", "+", "\"\\n\"", ")", "log", ".", "close", "(", ")", "raise", "MambuCommError", "(", "mess", ")", "data", "[", "'latest'", "]", "=", "True", "while", "retries", "and", "not", "bool_func", "(", ")", ":", "if", "verbose", ":", "log", ".", "write", "(", "\"waiting...\\n\"", ")", "log", ".", "flush", "(", ")", "sleep", "(", "10", ")", "retries", "-=", "1", "if", "retries", "<", "0", ":", "retries", "=", "-", "1", "if", "not", "retries", ":", "mess", "=", "\"Tired of waiting, giving up...\"", "if", "verbose", ":", "log", ".", "write", "(", "mess", "+", "\"\\n\"", ")", "log", ".", "flush", "(", ")", "if", "not", "force_download_latest", ":", "if", "verbose", ":", "log", ".", "close", "(", ")", "raise", "MambuError", "(", "mess", ")", "else", ":", "data", "[", "'latest'", "]", "=", "False", "sleep", "(", "30", ")", "geturl", "=", "iriToUri", "(", "getmambuurl", "(", "*", "args", ",", "*", "*", "kwargs", ")", "+", "\"database/backup/LATEST\"", ")", "if", "verbose", ":", "log", ".", "write", "(", "\"open url: \"", "+", "geturl", "+", "\"\\n\"", ")", "log", ".", "flush", "(", ")", "resp", "=", "requests", ".", "get", "(", "geturl", ",", "auth", "=", "(", "apiuser", ",", "apipwd", ")", ")", "if", "resp", ".", "status_code", "!=", "200", ":", "mess", "=", "\"Error getting database backup: %s\"", "%", "resp", ".", "content", "if", "verbose", ":", "log", ".", "write", "(", "mess", "+", "\"\\n\"", ")", "log", ".", "close", "(", ")", "raise", "MambuCommError", "(", "mess", ")", "if", "verbose", ":", "log", ".", "write", "(", "\"saving...\\n\"", ")", "log", ".", "flush", "(", ")", "with", "open", "(", "output_fname", ",", "\"w\"", ")", "as", "fw", ":", "fw", ".", "write", "(", "resp", ".", "content", ")", "if", "verbose", ":", "log", ".", "write", "(", "\"DONE!\\n\"", ")", "log", ".", "close", "(", ")", "return", "data" ]
Backup Mambu Database via REST API. Makes two calls to Mambu API: - a POST to request a backup to be made - a GET, once the backup is ready, to download the latest backup * callback is a string to a callback URL Mambu will internally call when the backup is ready to download. You should have a webservice there to warn you when the backup is ready. * bool_func is a function you use against your own code to test if the said backup is ready. This function backup_db manages both the logic of the request of a backup and the downloading of it too, so bool_func allows you to have some way on your side to know when this function will download the backup. The thing is you have to build a webservice (for the callback) making some kind of flag turn that your bool_func will read and know when to say True, telling backup_db to begin the download of the backup. * output_fname the name of the file that will hold the downloaded backup. PLEASE MIND that Mambu sends a ZIP file here. * user, pwd and url allow you to change the Mambu permissions for the getmambuurl internally called here. * verbose is a boolean flag for verbosity. * retries number of retries for bool_func or -1 for keep waiting. * force_download_latest boolean, True to force download even if no callback is called. False to throw error if callback isn't received after retries. * returns a dictionary with info about the download -latest boolean flag, if the db downloaded was the latest or not
[ "Backup", "Mambu", "Database", "via", "REST", "API", "." ]
python
train
petrjasek/eve-elastic
eve_elastic/elastic.py
https://github.com/petrjasek/eve-elastic/blob/f146f31b348d22ac5559cf78717b3bb02efcb2d7/eve_elastic/elastic.py#L418-L429
def get_index_by_alias(self, alias): """Get index name for given alias. If there is no alias assume it's an index. :param alias: alias name """ try: info = self.es.indices.get_alias(name=alias) return next(iter(info.keys())) except elasticsearch.exceptions.NotFoundError: return alias
[ "def", "get_index_by_alias", "(", "self", ",", "alias", ")", ":", "try", ":", "info", "=", "self", ".", "es", ".", "indices", ".", "get_alias", "(", "name", "=", "alias", ")", "return", "next", "(", "iter", "(", "info", ".", "keys", "(", ")", ")", ")", "except", "elasticsearch", ".", "exceptions", ".", "NotFoundError", ":", "return", "alias" ]
Get index name for given alias. If there is no alias assume it's an index. :param alias: alias name
[ "Get", "index", "name", "for", "given", "alias", "." ]
python
train
klahnakoski/pyLibrary
jx_base/query.py
https://github.com/klahnakoski/pyLibrary/blob/fa2dcbc48fda8d26999baef400e9a98149e0b982/jx_base/query.py#L373-L421
def _normalize_select_no_context(select, schema=None): """ SAME NORMALIZE, BUT NO SOURCE OF COLUMNS """ if not _Column: _late_import() if is_text(select): select = Data(value=select) else: select = wrap(select) output = select.copy() if not select.value: output.name = coalesce(select.name, select.aggregate) if output.name: output.value = jx_expression(".", schema=schema) else: return Null elif is_text(select.value): if select.value.endswith(".*"): name = select.value[:-2].lstrip(".") output.name = coalesce(select.name, name) output.value = LeavesOp(Variable(name), prefix=coalesce(select.prefix, name)) else: if select.value == ".": output.name = coalesce(select.name, select.aggregate, ".") output.value = jx_expression(select.value, schema=schema) elif select.value == "*": output.name = coalesce(select.name, select.aggregate, ".") output.value = LeavesOp(Variable(".")) else: output.name = coalesce(select.name, select.value.lstrip("."), select.aggregate) output.value = jx_expression(select.value, schema=schema) elif is_number(output.value): if not output.name: output.name = text_type(output.value) output.value = jx_expression(select.value, schema=schema) else: output.value = jx_expression(select.value, schema=schema) if not output.name: Log.error("expecting select to have a name: {{select}}", select= select) if output.name.endswith(".*"): Log.error("{{name|quote}} is invalid select", name=output.name) output.aggregate = coalesce(canonical_aggregates[select.aggregate].name, select.aggregate, "none") output.default = coalesce(select.default, canonical_aggregates[output.aggregate].default) return output
[ "def", "_normalize_select_no_context", "(", "select", ",", "schema", "=", "None", ")", ":", "if", "not", "_Column", ":", "_late_import", "(", ")", "if", "is_text", "(", "select", ")", ":", "select", "=", "Data", "(", "value", "=", "select", ")", "else", ":", "select", "=", "wrap", "(", "select", ")", "output", "=", "select", ".", "copy", "(", ")", "if", "not", "select", ".", "value", ":", "output", ".", "name", "=", "coalesce", "(", "select", ".", "name", ",", "select", ".", "aggregate", ")", "if", "output", ".", "name", ":", "output", ".", "value", "=", "jx_expression", "(", "\".\"", ",", "schema", "=", "schema", ")", "else", ":", "return", "Null", "elif", "is_text", "(", "select", ".", "value", ")", ":", "if", "select", ".", "value", ".", "endswith", "(", "\".*\"", ")", ":", "name", "=", "select", ".", "value", "[", ":", "-", "2", "]", ".", "lstrip", "(", "\".\"", ")", "output", ".", "name", "=", "coalesce", "(", "select", ".", "name", ",", "name", ")", "output", ".", "value", "=", "LeavesOp", "(", "Variable", "(", "name", ")", ",", "prefix", "=", "coalesce", "(", "select", ".", "prefix", ",", "name", ")", ")", "else", ":", "if", "select", ".", "value", "==", "\".\"", ":", "output", ".", "name", "=", "coalesce", "(", "select", ".", "name", ",", "select", ".", "aggregate", ",", "\".\"", ")", "output", ".", "value", "=", "jx_expression", "(", "select", ".", "value", ",", "schema", "=", "schema", ")", "elif", "select", ".", "value", "==", "\"*\"", ":", "output", ".", "name", "=", "coalesce", "(", "select", ".", "name", ",", "select", ".", "aggregate", ",", "\".\"", ")", "output", ".", "value", "=", "LeavesOp", "(", "Variable", "(", "\".\"", ")", ")", "else", ":", "output", ".", "name", "=", "coalesce", "(", "select", ".", "name", ",", "select", ".", "value", ".", "lstrip", "(", "\".\"", ")", ",", "select", ".", "aggregate", ")", "output", ".", "value", "=", "jx_expression", "(", "select", ".", "value", ",", "schema", "=", "schema", ")", "elif", "is_number", "(", "output", ".", "value", ")", ":", "if", "not", "output", ".", "name", ":", "output", ".", "name", "=", "text_type", "(", "output", ".", "value", ")", "output", ".", "value", "=", "jx_expression", "(", "select", ".", "value", ",", "schema", "=", "schema", ")", "else", ":", "output", ".", "value", "=", "jx_expression", "(", "select", ".", "value", ",", "schema", "=", "schema", ")", "if", "not", "output", ".", "name", ":", "Log", ".", "error", "(", "\"expecting select to have a name: {{select}}\"", ",", "select", "=", "select", ")", "if", "output", ".", "name", ".", "endswith", "(", "\".*\"", ")", ":", "Log", ".", "error", "(", "\"{{name|quote}} is invalid select\"", ",", "name", "=", "output", ".", "name", ")", "output", ".", "aggregate", "=", "coalesce", "(", "canonical_aggregates", "[", "select", ".", "aggregate", "]", ".", "name", ",", "select", ".", "aggregate", ",", "\"none\"", ")", "output", ".", "default", "=", "coalesce", "(", "select", ".", "default", ",", "canonical_aggregates", "[", "output", ".", "aggregate", "]", ".", "default", ")", "return", "output" ]
SAME NORMALIZE, BUT NO SOURCE OF COLUMNS
[ "SAME", "NORMALIZE", "BUT", "NO", "SOURCE", "OF", "COLUMNS" ]
python
train
Capitains/MyCapytain
MyCapytain/resources/texts/local/capitains/cts.py
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resources/texts/local/capitains/cts.py#L349-L361
def childIds(self): """ Children of the passage :rtype: None, CtsReference :returns: Dictionary of chidren, where key are subreferences """ if self.depth >= len(self.citation.root): return [] elif self._children is not None: return self._children else: self._children = self.getReffs() return self._children
[ "def", "childIds", "(", "self", ")", ":", "if", "self", ".", "depth", ">=", "len", "(", "self", ".", "citation", ".", "root", ")", ":", "return", "[", "]", "elif", "self", ".", "_children", "is", "not", "None", ":", "return", "self", ".", "_children", "else", ":", "self", ".", "_children", "=", "self", ".", "getReffs", "(", ")", "return", "self", ".", "_children" ]
Children of the passage :rtype: None, CtsReference :returns: Dictionary of chidren, where key are subreferences
[ "Children", "of", "the", "passage" ]
python
train
openthread/openthread
tools/harness-thci/OpenThread_WpanCtl.py
https://github.com/openthread/openthread/blob/0208d10563aa21c518092985c78ecf9cd223ab74/tools/harness-thci/OpenThread_WpanCtl.py#L995-L1027
def setNetworkKey(self, key): """set Thread Network master key Args: key: Thread Network master key used in secure the MLE/802.15.4 packet Returns: True: successful to set the Thread Network master key False: fail to set the Thread Network master key """ masterKey = '' print '%s call setNetworkKey' % self.port try: if not isinstance(key, str): masterKey = self.__convertLongToString(key) # prpend '0' at the beginning if len(masterKey) < 32: masterKey = masterKey.zfill(32) cmd = WPANCTL_CMD + 'setprop Network:Key %s' % masterKey datasetCmd = WPANCTL_CMD + 'setprop Dataset:MasterKey %s' % masterKey else: masterKey = key cmd = WPANCTL_CMD + 'setprop Network:Key %s' % masterKey datasetCmd = WPANCTL_CMD + 'setprop Dataset:MasterKey %s' % masterKey self.networkKey = masterKey self.hasActiveDatasetToCommit = True return self.__sendCommand(cmd)[0] != 'Fail' and self.__sendCommand(datasetCmd)[0] != 'Fail' except Exception, e: ModuleHelper.WriteIntoDebugLogger('setNetworkkey() Error: ' + str(e))
[ "def", "setNetworkKey", "(", "self", ",", "key", ")", ":", "masterKey", "=", "''", "print", "'%s call setNetworkKey'", "%", "self", ".", "port", "try", ":", "if", "not", "isinstance", "(", "key", ",", "str", ")", ":", "masterKey", "=", "self", ".", "__convertLongToString", "(", "key", ")", "# prpend '0' at the beginning", "if", "len", "(", "masterKey", ")", "<", "32", ":", "masterKey", "=", "masterKey", ".", "zfill", "(", "32", ")", "cmd", "=", "WPANCTL_CMD", "+", "'setprop Network:Key %s'", "%", "masterKey", "datasetCmd", "=", "WPANCTL_CMD", "+", "'setprop Dataset:MasterKey %s'", "%", "masterKey", "else", ":", "masterKey", "=", "key", "cmd", "=", "WPANCTL_CMD", "+", "'setprop Network:Key %s'", "%", "masterKey", "datasetCmd", "=", "WPANCTL_CMD", "+", "'setprop Dataset:MasterKey %s'", "%", "masterKey", "self", ".", "networkKey", "=", "masterKey", "self", ".", "hasActiveDatasetToCommit", "=", "True", "return", "self", ".", "__sendCommand", "(", "cmd", ")", "[", "0", "]", "!=", "'Fail'", "and", "self", ".", "__sendCommand", "(", "datasetCmd", ")", "[", "0", "]", "!=", "'Fail'", "except", "Exception", ",", "e", ":", "ModuleHelper", ".", "WriteIntoDebugLogger", "(", "'setNetworkkey() Error: '", "+", "str", "(", "e", ")", ")" ]
set Thread Network master key Args: key: Thread Network master key used in secure the MLE/802.15.4 packet Returns: True: successful to set the Thread Network master key False: fail to set the Thread Network master key
[ "set", "Thread", "Network", "master", "key" ]
python
train
mozilla-b2g/fxos-certsuite
mcts/securitysuite/ssl.py
https://github.com/mozilla-b2g/fxos-certsuite/blob/152a76c7c4c9d908524cf6e6fc25a498058f363d/mcts/securitysuite/ssl.py#L308-L353
def run(cls, version=None): """ Test runner method; is called by parent class defined in suite.py. :param version: B2G version string to test against :return: bool PASS/FAIL status """ try: dumper = certdump() versions = dumper.nssversion_via_marionette() except Exception as e: # TODO: too broad exception cls.log_status('FAIL', 'Failed to gather information from the device via Marionette: %s' % e) return False if version is None: cls.log_status('FAIL', 'NSS version check requires a B2G version.\nReported component versions:\n%s' % ( '\n'.join(["%s: %s" % (k, versions[k]) for k in versions]))) return False reported_version = versions['NSS_Version'] if version not in nssversion.b2g_version_to_hginfo: cls.log_status('FAIL', 'No version comparison data for B2G %s.\nReported NSS component versions:\n%s' % ( version, '\n'.join(["%s: %s" % (k, versions[k]) for k in versions]))) return False expected_version = nssversion.b2g_version_to_hginfo[version]['release_nss_version'] # Fail if reported version is a downgrade if nssversion.first_older_than_second(reported_version, expected_version): cls.log_status('FAIL', 'NSS downgrade detected. Expecting at least version %s.\n' 'Reported versions:\n%s' % ( expected_version, '\n'.join(["%s: %s" % (k, versions[k]) for k in versions]))) return False # Pass if NSS version was upgraded. if nssversion.first_older_than_second(expected_version, reported_version): cls.log_status('PASS', 'NSS more recent than release version %s. Reported component versions:\n%s' % ( expected_version, '\n'.join(["%s: %s" % (k, versions[k]) for k in versions]))) return True # Else device has reported the expected version. cls.log_status('PASS', 'NSS version reported as expected. Reported component versions:\n%s' % ( '\n'.join(["%s: %s" % (k, versions[k]) for k in versions]))) return True
[ "def", "run", "(", "cls", ",", "version", "=", "None", ")", ":", "try", ":", "dumper", "=", "certdump", "(", ")", "versions", "=", "dumper", ".", "nssversion_via_marionette", "(", ")", "except", "Exception", "as", "e", ":", "# TODO: too broad exception", "cls", ".", "log_status", "(", "'FAIL'", ",", "'Failed to gather information from the device via Marionette: %s'", "%", "e", ")", "return", "False", "if", "version", "is", "None", ":", "cls", ".", "log_status", "(", "'FAIL'", ",", "'NSS version check requires a B2G version.\\nReported component versions:\\n%s'", "%", "(", "'\\n'", ".", "join", "(", "[", "\"%s: %s\"", "%", "(", "k", ",", "versions", "[", "k", "]", ")", "for", "k", "in", "versions", "]", ")", ")", ")", "return", "False", "reported_version", "=", "versions", "[", "'NSS_Version'", "]", "if", "version", "not", "in", "nssversion", ".", "b2g_version_to_hginfo", ":", "cls", ".", "log_status", "(", "'FAIL'", ",", "'No version comparison data for B2G %s.\\nReported NSS component versions:\\n%s'", "%", "(", "version", ",", "'\\n'", ".", "join", "(", "[", "\"%s: %s\"", "%", "(", "k", ",", "versions", "[", "k", "]", ")", "for", "k", "in", "versions", "]", ")", ")", ")", "return", "False", "expected_version", "=", "nssversion", ".", "b2g_version_to_hginfo", "[", "version", "]", "[", "'release_nss_version'", "]", "# Fail if reported version is a downgrade", "if", "nssversion", ".", "first_older_than_second", "(", "reported_version", ",", "expected_version", ")", ":", "cls", ".", "log_status", "(", "'FAIL'", ",", "'NSS downgrade detected. Expecting at least version %s.\\n'", "'Reported versions:\\n%s'", "%", "(", "expected_version", ",", "'\\n'", ".", "join", "(", "[", "\"%s: %s\"", "%", "(", "k", ",", "versions", "[", "k", "]", ")", "for", "k", "in", "versions", "]", ")", ")", ")", "return", "False", "# Pass if NSS version was upgraded.", "if", "nssversion", ".", "first_older_than_second", "(", "expected_version", ",", "reported_version", ")", ":", "cls", ".", "log_status", "(", "'PASS'", ",", "'NSS more recent than release version %s. Reported component versions:\\n%s'", "%", "(", "expected_version", ",", "'\\n'", ".", "join", "(", "[", "\"%s: %s\"", "%", "(", "k", ",", "versions", "[", "k", "]", ")", "for", "k", "in", "versions", "]", ")", ")", ")", "return", "True", "# Else device has reported the expected version.", "cls", ".", "log_status", "(", "'PASS'", ",", "'NSS version reported as expected. Reported component versions:\\n%s'", "%", "(", "'\\n'", ".", "join", "(", "[", "\"%s: %s\"", "%", "(", "k", ",", "versions", "[", "k", "]", ")", "for", "k", "in", "versions", "]", ")", ")", ")", "return", "True" ]
Test runner method; is called by parent class defined in suite.py. :param version: B2G version string to test against :return: bool PASS/FAIL status
[ "Test", "runner", "method", ";", "is", "called", "by", "parent", "class", "defined", "in", "suite", ".", "py", ".", ":", "param", "version", ":", "B2G", "version", "string", "to", "test", "against", ":", "return", ":", "bool", "PASS", "/", "FAIL", "status" ]
python
train
elastic/elasticsearch-py
elasticsearch/client/cluster.py
https://github.com/elastic/elasticsearch-py/blob/2aab285c8f506f3863cbdaba3c90a685c510ba00/elasticsearch/client/cluster.py#L53-L77
def state(self, metric=None, index=None, params=None): """ Get a comprehensive state information of the whole cluster. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-state.html>`_ :arg metric: Limit the information returned to the specified metrics :arg index: A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) :arg expand_wildcards: Whether to expand wildcard expression to concrete indices that are open, closed or both., default 'open', valid choices are: 'open', 'closed', 'none', 'all' :arg flat_settings: Return settings in flat format (default: false) :arg ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) :arg local: Return local information, do not retrieve the state from master node (default: false) :arg master_timeout: Specify timeout for connection to master """ if index and not metric: metric = '_all' return self.transport.perform_request('GET', _make_path('_cluster', 'state', metric, index), params=params)
[ "def", "state", "(", "self", ",", "metric", "=", "None", ",", "index", "=", "None", ",", "params", "=", "None", ")", ":", "if", "index", "and", "not", "metric", ":", "metric", "=", "'_all'", "return", "self", ".", "transport", ".", "perform_request", "(", "'GET'", ",", "_make_path", "(", "'_cluster'", ",", "'state'", ",", "metric", ",", "index", ")", ",", "params", "=", "params", ")" ]
Get a comprehensive state information of the whole cluster. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-state.html>`_ :arg metric: Limit the information returned to the specified metrics :arg index: A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) :arg expand_wildcards: Whether to expand wildcard expression to concrete indices that are open, closed or both., default 'open', valid choices are: 'open', 'closed', 'none', 'all' :arg flat_settings: Return settings in flat format (default: false) :arg ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) :arg local: Return local information, do not retrieve the state from master node (default: false) :arg master_timeout: Specify timeout for connection to master
[ "Get", "a", "comprehensive", "state", "information", "of", "the", "whole", "cluster", ".", "<http", ":", "//", "www", ".", "elastic", ".", "co", "/", "guide", "/", "en", "/", "elasticsearch", "/", "reference", "/", "current", "/", "cluster", "-", "state", ".", "html", ">", "_" ]
python
train
scikit-umfpack/scikit-umfpack
scikits/umfpack/umfpack.py
https://github.com/scikit-umfpack/scikit-umfpack/blob/a2102ef92f4dd060138e72bb5d7c444f8ec49cbc/scikits/umfpack/umfpack.py#L532-L577
def numeric(self, mtx): """ Perform numeric object (LU decomposition) computation using the symbolic decomposition. The symbolic decomposition is (re)computed if necessary. """ self.free_numeric() if self._symbolic is None: self.symbolic(mtx) indx = self._getIndx(mtx) failCount = 0 while 1: if self.isReal: status, self._numeric\ = self.funs.numeric(mtx.indptr, indx, mtx.data, self._symbolic, self.control, self.info) else: real, imag = mtx.data.real.copy(), mtx.data.imag.copy() status, self._numeric\ = self.funs.numeric(mtx.indptr, indx, real, imag, self._symbolic, self.control, self.info) if status != UMFPACK_OK: if status == UMFPACK_WARNING_singular_matrix: warnings.warn('Singular matrix', UmfpackWarning) break elif status in (UMFPACK_ERROR_different_pattern, UMFPACK_ERROR_invalid_Symbolic_object): # Try again. warnings.warn('Recomputing symbolic', UmfpackWarning) self.symbolic(mtx) failCount += 1 else: failCount += 100 else: break if failCount >= 2: raise RuntimeError('%s failed with %s' % (self.funs.numeric, umfStatus[status]))
[ "def", "numeric", "(", "self", ",", "mtx", ")", ":", "self", ".", "free_numeric", "(", ")", "if", "self", ".", "_symbolic", "is", "None", ":", "self", ".", "symbolic", "(", "mtx", ")", "indx", "=", "self", ".", "_getIndx", "(", "mtx", ")", "failCount", "=", "0", "while", "1", ":", "if", "self", ".", "isReal", ":", "status", ",", "self", ".", "_numeric", "=", "self", ".", "funs", ".", "numeric", "(", "mtx", ".", "indptr", ",", "indx", ",", "mtx", ".", "data", ",", "self", ".", "_symbolic", ",", "self", ".", "control", ",", "self", ".", "info", ")", "else", ":", "real", ",", "imag", "=", "mtx", ".", "data", ".", "real", ".", "copy", "(", ")", ",", "mtx", ".", "data", ".", "imag", ".", "copy", "(", ")", "status", ",", "self", ".", "_numeric", "=", "self", ".", "funs", ".", "numeric", "(", "mtx", ".", "indptr", ",", "indx", ",", "real", ",", "imag", ",", "self", ".", "_symbolic", ",", "self", ".", "control", ",", "self", ".", "info", ")", "if", "status", "!=", "UMFPACK_OK", ":", "if", "status", "==", "UMFPACK_WARNING_singular_matrix", ":", "warnings", ".", "warn", "(", "'Singular matrix'", ",", "UmfpackWarning", ")", "break", "elif", "status", "in", "(", "UMFPACK_ERROR_different_pattern", ",", "UMFPACK_ERROR_invalid_Symbolic_object", ")", ":", "# Try again.", "warnings", ".", "warn", "(", "'Recomputing symbolic'", ",", "UmfpackWarning", ")", "self", ".", "symbolic", "(", "mtx", ")", "failCount", "+=", "1", "else", ":", "failCount", "+=", "100", "else", ":", "break", "if", "failCount", ">=", "2", ":", "raise", "RuntimeError", "(", "'%s failed with %s'", "%", "(", "self", ".", "funs", ".", "numeric", ",", "umfStatus", "[", "status", "]", ")", ")" ]
Perform numeric object (LU decomposition) computation using the symbolic decomposition. The symbolic decomposition is (re)computed if necessary.
[ "Perform", "numeric", "object", "(", "LU", "decomposition", ")", "computation", "using", "the", "symbolic", "decomposition", ".", "The", "symbolic", "decomposition", "is", "(", "re", ")", "computed", "if", "necessary", "." ]
python
train
pivotal-energy-solutions/django-datatable-view
datatableview/columns.py
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/columns.py#L562-L570
def _get_flat_db_sources(self, model): """ Return a flattened representation of the individual ``sources`` lists. """ sources = [] for source in self.sources: for sub_source in self.expand_source(source): target_field = self.resolve_source(model, sub_source) if target_field: sources.append(sub_source) return sources
[ "def", "_get_flat_db_sources", "(", "self", ",", "model", ")", ":", "sources", "=", "[", "]", "for", "source", "in", "self", ".", "sources", ":", "for", "sub_source", "in", "self", ".", "expand_source", "(", "source", ")", ":", "target_field", "=", "self", ".", "resolve_source", "(", "model", ",", "sub_source", ")", "if", "target_field", ":", "sources", ".", "append", "(", "sub_source", ")", "return", "sources" ]
Return a flattened representation of the individual ``sources`` lists.
[ "Return", "a", "flattened", "representation", "of", "the", "individual", "sources", "lists", "." ]
python
train
openvax/mhctools
mhctools/base_predictor.py
https://github.com/openvax/mhctools/blob/b329b4dccd60fae41296816b8cbfe15d6ca07e67/mhctools/base_predictor.py#L151-L176
def _check_peptide_inputs(self, peptides): """ Check peptide sequences to make sure they are valid for this predictor. """ require_iterable_of(peptides, string_types) check_X = not self.allow_X_in_peptides check_lower = not self.allow_lowercase_in_peptides check_min_length = self.min_peptide_length is not None min_length = self.min_peptide_length check_max_length = self.max_peptide_length is not None max_length = self.max_peptide_length for p in peptides: if not p.isalpha(): raise ValueError("Invalid characters in peptide '%s'" % p) elif check_X and "X" in p: raise ValueError("Invalid character 'X' in peptide '%s'" % p) elif check_lower and not p.isupper(): raise ValueError("Invalid lowercase letters in peptide '%s'" % p) elif check_min_length and len(p) < min_length: raise ValueError( "Peptide '%s' too short (%d chars), must be at least %d" % ( p, len(p), min_length)) elif check_max_length and len(p) > max_length: raise ValueError( "Peptide '%s' too long (%d chars), must be at least %d" % ( p, len(p), max_length))
[ "def", "_check_peptide_inputs", "(", "self", ",", "peptides", ")", ":", "require_iterable_of", "(", "peptides", ",", "string_types", ")", "check_X", "=", "not", "self", ".", "allow_X_in_peptides", "check_lower", "=", "not", "self", ".", "allow_lowercase_in_peptides", "check_min_length", "=", "self", ".", "min_peptide_length", "is", "not", "None", "min_length", "=", "self", ".", "min_peptide_length", "check_max_length", "=", "self", ".", "max_peptide_length", "is", "not", "None", "max_length", "=", "self", ".", "max_peptide_length", "for", "p", "in", "peptides", ":", "if", "not", "p", ".", "isalpha", "(", ")", ":", "raise", "ValueError", "(", "\"Invalid characters in peptide '%s'\"", "%", "p", ")", "elif", "check_X", "and", "\"X\"", "in", "p", ":", "raise", "ValueError", "(", "\"Invalid character 'X' in peptide '%s'\"", "%", "p", ")", "elif", "check_lower", "and", "not", "p", ".", "isupper", "(", ")", ":", "raise", "ValueError", "(", "\"Invalid lowercase letters in peptide '%s'\"", "%", "p", ")", "elif", "check_min_length", "and", "len", "(", "p", ")", "<", "min_length", ":", "raise", "ValueError", "(", "\"Peptide '%s' too short (%d chars), must be at least %d\"", "%", "(", "p", ",", "len", "(", "p", ")", ",", "min_length", ")", ")", "elif", "check_max_length", "and", "len", "(", "p", ")", ">", "max_length", ":", "raise", "ValueError", "(", "\"Peptide '%s' too long (%d chars), must be at least %d\"", "%", "(", "p", ",", "len", "(", "p", ")", ",", "max_length", ")", ")" ]
Check peptide sequences to make sure they are valid for this predictor.
[ "Check", "peptide", "sequences", "to", "make", "sure", "they", "are", "valid", "for", "this", "predictor", "." ]
python
valid
juicer/juicer
juicer/utils/__init__.py
https://github.com/juicer/juicer/blob/0c9f0fd59e293d45df6b46e81f675d33221c600d/juicer/utils/__init__.py#L436-L449
def save_url_as(url, save_as): """ Download the file `url` and save it to the local disk as `save_as`. """ remote = requests.get(url, verify=False) if not remote.status_code == Constants.PULP_GET_OK: raise JuicerPulpError("A %s error occurred trying to get %s" % (remote.status_code, url)) with open(save_as, 'wb') as data: data.write(remote.content)
[ "def", "save_url_as", "(", "url", ",", "save_as", ")", ":", "remote", "=", "requests", ".", "get", "(", "url", ",", "verify", "=", "False", ")", "if", "not", "remote", ".", "status_code", "==", "Constants", ".", "PULP_GET_OK", ":", "raise", "JuicerPulpError", "(", "\"A %s error occurred trying to get %s\"", "%", "(", "remote", ".", "status_code", ",", "url", ")", ")", "with", "open", "(", "save_as", ",", "'wb'", ")", "as", "data", ":", "data", ".", "write", "(", "remote", ".", "content", ")" ]
Download the file `url` and save it to the local disk as `save_as`.
[ "Download", "the", "file", "url", "and", "save", "it", "to", "the", "local", "disk", "as", "save_as", "." ]
python
train
blockstack/blockstack-core
blockstack/lib/operations/__init__.py
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/operations/__init__.py#L212-L223
def op_decanonicalize(op_name, canonical_op): """ Get the current representation of a parsed operation's data, given the canonical representation Meant for backwards-compatibility """ global DECANONICALIZE_METHODS if op_name not in DECANONICALIZE_METHODS: # no decanonicalization needed return canonical_op else: return DECANONICALIZE_METHODS[op_name](canonical_op)
[ "def", "op_decanonicalize", "(", "op_name", ",", "canonical_op", ")", ":", "global", "DECANONICALIZE_METHODS", "if", "op_name", "not", "in", "DECANONICALIZE_METHODS", ":", "# no decanonicalization needed", "return", "canonical_op", "else", ":", "return", "DECANONICALIZE_METHODS", "[", "op_name", "]", "(", "canonical_op", ")" ]
Get the current representation of a parsed operation's data, given the canonical representation Meant for backwards-compatibility
[ "Get", "the", "current", "representation", "of", "a", "parsed", "operation", "s", "data", "given", "the", "canonical", "representation", "Meant", "for", "backwards", "-", "compatibility" ]
python
train
sony/nnabla
python/src/nnabla/utils/image_utils/pypng_utils.py
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/utils/image_utils/pypng_utils.py#L79-L122
def imread(path, grayscale=False, size=None, interpolate="bilinear", channel_first=False, as_uint16=False, num_channels=-1): """ Read image by pypng module. Args: path (str or 'file object'): File path or object to read. grayscale (bool): size (tupple of int): (width, height). If None, output img shape depends on the files to read. channel_first (bool): This argument specifies the shape of img is whether (height, width, channel) or (channel, height, width). Default value is False, which means the img shape is (height, width, channel). interpolate (str): must be one of ["nearest", "box", "bilinear", "hamming", "bicubic", "lanczos"]. as_uint16 (bool): If True, this function reads image as uint16. num_channels (int): channel size of output array. Default is -1 which preserves raw image shape. Returns: numpy.ndarray """ _imread_before(grayscale, num_channels) f = path if hasattr(path, "read") else open(path, "rb") r = png.Reader(file=f) width, height, pixels, metadata = r.asDirect() bit_depth = metadata.get("bitdepth") if bit_depth not in [8, 16]: raise ValueError("The bit-depth of the image you want to read is unsupported ({}bit)." "Currently, pypng backend`s imread supports only [8, 16] bit-depth." "the path for this image is {}".format(bit_depth, path)) img = read_result_to_ndarray( pixels, width, height, metadata, grayscale, as_uint16, num_channels) return _imread_after(img, size, interpolate, channel_first, imresize)
[ "def", "imread", "(", "path", ",", "grayscale", "=", "False", ",", "size", "=", "None", ",", "interpolate", "=", "\"bilinear\"", ",", "channel_first", "=", "False", ",", "as_uint16", "=", "False", ",", "num_channels", "=", "-", "1", ")", ":", "_imread_before", "(", "grayscale", ",", "num_channels", ")", "f", "=", "path", "if", "hasattr", "(", "path", ",", "\"read\"", ")", "else", "open", "(", "path", ",", "\"rb\"", ")", "r", "=", "png", ".", "Reader", "(", "file", "=", "f", ")", "width", ",", "height", ",", "pixels", ",", "metadata", "=", "r", ".", "asDirect", "(", ")", "bit_depth", "=", "metadata", ".", "get", "(", "\"bitdepth\"", ")", "if", "bit_depth", "not", "in", "[", "8", ",", "16", "]", ":", "raise", "ValueError", "(", "\"The bit-depth of the image you want to read is unsupported ({}bit).\"", "\"Currently, pypng backend`s imread supports only [8, 16] bit-depth.\"", "\"the path for this image is {}\"", ".", "format", "(", "bit_depth", ",", "path", ")", ")", "img", "=", "read_result_to_ndarray", "(", "pixels", ",", "width", ",", "height", ",", "metadata", ",", "grayscale", ",", "as_uint16", ",", "num_channels", ")", "return", "_imread_after", "(", "img", ",", "size", ",", "interpolate", ",", "channel_first", ",", "imresize", ")" ]
Read image by pypng module. Args: path (str or 'file object'): File path or object to read. grayscale (bool): size (tupple of int): (width, height). If None, output img shape depends on the files to read. channel_first (bool): This argument specifies the shape of img is whether (height, width, channel) or (channel, height, width). Default value is False, which means the img shape is (height, width, channel). interpolate (str): must be one of ["nearest", "box", "bilinear", "hamming", "bicubic", "lanczos"]. as_uint16 (bool): If True, this function reads image as uint16. num_channels (int): channel size of output array. Default is -1 which preserves raw image shape. Returns: numpy.ndarray
[ "Read", "image", "by", "pypng", "module", "." ]
python
train
ninuxorg/nodeshot
nodeshot/networking/connectors/models/device_connector.py
https://github.com/ninuxorg/nodeshot/blob/2466f0a55f522b2696026f196436ce7ba3f1e5c6/nodeshot/networking/connectors/models/device_connector.py#L131-L156
def _validate_config(self): """ ensure REQUIRED_CONFIG_KEYS are filled """ # exit if no backend specified if not self.backend: return # exit if no required config keys if len(self.REQUIRED_CONFIG_KEYS) < 1: return self.config = self.config or {} # default to empty dict of no config required_keys_set = set(self.REQUIRED_CONFIG_KEYS) config_keys_set = set(self.config.keys()) missing_required_keys = required_keys_set - config_keys_set unrecognized_keys = config_keys_set - required_keys_set # if any missing required key raise ValidationError if len(missing_required_keys) > 0: # converts list in comma separated string missing_keys_string = ', '.join(missing_required_keys) # django error raise ValidationError(_('Missing required config keys: "%s"') % missing_keys_string) elif len(unrecognized_keys) > 0: # converts list in comma separated string unrecognized_keys_string = ', '.join(unrecognized_keys) # django error raise ValidationError(_('Unrecognized config keys: "%s"') % unrecognized_keys_string)
[ "def", "_validate_config", "(", "self", ")", ":", "# exit if no backend specified", "if", "not", "self", ".", "backend", ":", "return", "# exit if no required config keys", "if", "len", "(", "self", ".", "REQUIRED_CONFIG_KEYS", ")", "<", "1", ":", "return", "self", ".", "config", "=", "self", ".", "config", "or", "{", "}", "# default to empty dict of no config", "required_keys_set", "=", "set", "(", "self", ".", "REQUIRED_CONFIG_KEYS", ")", "config_keys_set", "=", "set", "(", "self", ".", "config", ".", "keys", "(", ")", ")", "missing_required_keys", "=", "required_keys_set", "-", "config_keys_set", "unrecognized_keys", "=", "config_keys_set", "-", "required_keys_set", "# if any missing required key raise ValidationError", "if", "len", "(", "missing_required_keys", ")", ">", "0", ":", "# converts list in comma separated string", "missing_keys_string", "=", "', '", ".", "join", "(", "missing_required_keys", ")", "# django error", "raise", "ValidationError", "(", "_", "(", "'Missing required config keys: \"%s\"'", ")", "%", "missing_keys_string", ")", "elif", "len", "(", "unrecognized_keys", ")", ">", "0", ":", "# converts list in comma separated string", "unrecognized_keys_string", "=", "', '", ".", "join", "(", "unrecognized_keys", ")", "# django error", "raise", "ValidationError", "(", "_", "(", "'Unrecognized config keys: \"%s\"'", ")", "%", "unrecognized_keys_string", ")" ]
ensure REQUIRED_CONFIG_KEYS are filled
[ "ensure", "REQUIRED_CONFIG_KEYS", "are", "filled" ]
python
train
frasertweedale/ledgertools
ltlib/ui.py
https://github.com/frasertweedale/ledgertools/blob/a695f8667d72253e5448693c12f0282d09902aaa/ltlib/ui.py#L102-L148
def filter_pastdate(string, default=None): """Coerce to a date not beyond the current date If only a day is given, assumes the current month if that day has passed or is the current day, otherwise assumes the previous month. If a day and month are given, but no year, assumes the current year if the given date has passed (or is today), otherwise the previous year. """ if not string and default is not None: return default today = datetime.date.today() # split the string try: parts = map(int, re.split('\D+', string)) # split the string except ValueError: raise InvalidInputError("invalid date; use format: DD [MM [YYYY]]") if len(parts) < 1 or len(parts) > 3: raise InvalidInputError("invalid date; use format: DD [MM [YYYY]]") if len(parts) == 1: # no month or year given; append month parts.append(today.month - 1 if parts[0] > today.day else today.month) if parts[1] < 1: parts[1] = 12 if len(parts) == 2: # no year given; append year if parts[1] > today.month \ or parts[1] == today.month and parts[0] > today.day: parts.append(today.year - 1) else: parts.append(today.year) parts.reverse() try: date = datetime.date(*parts) if date > today: raise InvalidInputError("cannot choose a date in the future") return date except ValueError: print parts raise InvalidInputError("invalid date; use format: DD [MM [YYYY]]")
[ "def", "filter_pastdate", "(", "string", ",", "default", "=", "None", ")", ":", "if", "not", "string", "and", "default", "is", "not", "None", ":", "return", "default", "today", "=", "datetime", ".", "date", ".", "today", "(", ")", "# split the string", "try", ":", "parts", "=", "map", "(", "int", ",", "re", ".", "split", "(", "'\\D+'", ",", "string", ")", ")", "# split the string", "except", "ValueError", ":", "raise", "InvalidInputError", "(", "\"invalid date; use format: DD [MM [YYYY]]\"", ")", "if", "len", "(", "parts", ")", "<", "1", "or", "len", "(", "parts", ")", ">", "3", ":", "raise", "InvalidInputError", "(", "\"invalid date; use format: DD [MM [YYYY]]\"", ")", "if", "len", "(", "parts", ")", "==", "1", ":", "# no month or year given; append month", "parts", ".", "append", "(", "today", ".", "month", "-", "1", "if", "parts", "[", "0", "]", ">", "today", ".", "day", "else", "today", ".", "month", ")", "if", "parts", "[", "1", "]", "<", "1", ":", "parts", "[", "1", "]", "=", "12", "if", "len", "(", "parts", ")", "==", "2", ":", "# no year given; append year", "if", "parts", "[", "1", "]", ">", "today", ".", "month", "or", "parts", "[", "1", "]", "==", "today", ".", "month", "and", "parts", "[", "0", "]", ">", "today", ".", "day", ":", "parts", ".", "append", "(", "today", ".", "year", "-", "1", ")", "else", ":", "parts", ".", "append", "(", "today", ".", "year", ")", "parts", ".", "reverse", "(", ")", "try", ":", "date", "=", "datetime", ".", "date", "(", "*", "parts", ")", "if", "date", ">", "today", ":", "raise", "InvalidInputError", "(", "\"cannot choose a date in the future\"", ")", "return", "date", "except", "ValueError", ":", "print", "parts", "raise", "InvalidInputError", "(", "\"invalid date; use format: DD [MM [YYYY]]\"", ")" ]
Coerce to a date not beyond the current date If only a day is given, assumes the current month if that day has passed or is the current day, otherwise assumes the previous month. If a day and month are given, but no year, assumes the current year if the given date has passed (or is today), otherwise the previous year.
[ "Coerce", "to", "a", "date", "not", "beyond", "the", "current", "date" ]
python
train
pypa/pipenv
pipenv/patched/pipfile/api.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/pipfile/api.py#L176-L181
def lock(self): """Returns a JSON representation of the Pipfile.""" data = self.data data['_meta']['hash'] = {"sha256": self.hash} data['_meta']['pipfile-spec'] = 6 return json.dumps(data, indent=4, separators=(',', ': '))
[ "def", "lock", "(", "self", ")", ":", "data", "=", "self", ".", "data", "data", "[", "'_meta'", "]", "[", "'hash'", "]", "=", "{", "\"sha256\"", ":", "self", ".", "hash", "}", "data", "[", "'_meta'", "]", "[", "'pipfile-spec'", "]", "=", "6", "return", "json", ".", "dumps", "(", "data", ",", "indent", "=", "4", ",", "separators", "=", "(", "','", ",", "': '", ")", ")" ]
Returns a JSON representation of the Pipfile.
[ "Returns", "a", "JSON", "representation", "of", "the", "Pipfile", "." ]
python
train
DLR-RM/RAFCON
source/rafcon/core/states/container_state.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/core/states/container_state.py#L1503-L1533
def get_inputs_for_state(self, state): """Retrieves all input data of a state. If several data flows are connected to an input port the most current data is used for the specific input port. :param state: the state of which the input data is determined :return: the input data of the target state """ result_dict = {} tmp_dict = self.get_default_input_values_for_state(state) result_dict.update(tmp_dict) for input_port_key, value in state.input_data_ports.items(): # for all input keys fetch the correct data_flow connection and read data into the result_dict actual_value = None actual_value_time = 0 for data_flow_key, data_flow in self.data_flows.items(): if data_flow.to_key == input_port_key: if data_flow.to_state == state.state_id: # fetch data from the scoped_data list: the key is the data_port_key + the state_id key = str(data_flow.from_key) + data_flow.from_state if key in self.scoped_data: if actual_value is None or actual_value_time < self.scoped_data[key].timestamp: actual_value = deepcopy(self.scoped_data[key].value) actual_value_time = self.scoped_data[key].timestamp if actual_value is not None: result_dict[value.name] = actual_value return result_dict
[ "def", "get_inputs_for_state", "(", "self", ",", "state", ")", ":", "result_dict", "=", "{", "}", "tmp_dict", "=", "self", ".", "get_default_input_values_for_state", "(", "state", ")", "result_dict", ".", "update", "(", "tmp_dict", ")", "for", "input_port_key", ",", "value", "in", "state", ".", "input_data_ports", ".", "items", "(", ")", ":", "# for all input keys fetch the correct data_flow connection and read data into the result_dict", "actual_value", "=", "None", "actual_value_time", "=", "0", "for", "data_flow_key", ",", "data_flow", "in", "self", ".", "data_flows", ".", "items", "(", ")", ":", "if", "data_flow", ".", "to_key", "==", "input_port_key", ":", "if", "data_flow", ".", "to_state", "==", "state", ".", "state_id", ":", "# fetch data from the scoped_data list: the key is the data_port_key + the state_id", "key", "=", "str", "(", "data_flow", ".", "from_key", ")", "+", "data_flow", ".", "from_state", "if", "key", "in", "self", ".", "scoped_data", ":", "if", "actual_value", "is", "None", "or", "actual_value_time", "<", "self", ".", "scoped_data", "[", "key", "]", ".", "timestamp", ":", "actual_value", "=", "deepcopy", "(", "self", ".", "scoped_data", "[", "key", "]", ".", "value", ")", "actual_value_time", "=", "self", ".", "scoped_data", "[", "key", "]", ".", "timestamp", "if", "actual_value", "is", "not", "None", ":", "result_dict", "[", "value", ".", "name", "]", "=", "actual_value", "return", "result_dict" ]
Retrieves all input data of a state. If several data flows are connected to an input port the most current data is used for the specific input port. :param state: the state of which the input data is determined :return: the input data of the target state
[ "Retrieves", "all", "input", "data", "of", "a", "state", ".", "If", "several", "data", "flows", "are", "connected", "to", "an", "input", "port", "the", "most", "current", "data", "is", "used", "for", "the", "specific", "input", "port", "." ]
python
train
cloud-custodian/cloud-custodian
c7n/policy.py
https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/c7n/policy.py#L426-L481
def run(self, event, lambda_context): """Run policy in push mode against given event. Lambda automatically generates cloud watch logs, and metrics for us, albeit with some deficienies, metrics no longer count against valid resources matches, but against execution. If metrics execution option is enabled, custodian will generate metrics per normal. """ from c7n.actions import EventAction mode = self.policy.data.get('mode', {}) if not bool(mode.get("log", True)): root = logging.getLogger() map(root.removeHandler, root.handlers[:]) root.handlers = [logging.NullHandler()] resources = self.resolve_resources(event) if not resources: return resources resources = self.policy.resource_manager.filter_resources( resources, event) if 'debug' in event: self.policy.log.info("Filtered resources %d" % len(resources)) if not resources: self.policy.log.info( "policy: %s resources: %s no resources matched" % ( self.policy.name, self.policy.resource_type)) return with self.policy.ctx: self.policy.ctx.metrics.put_metric( 'ResourceCount', len(resources), 'Count', Scope="Policy", buffer=False) if 'debug' in event: self.policy.log.info( "Invoking actions %s", self.policy.resource_manager.actions) self.policy._write_file( 'resources.json', utils.dumps(resources, indent=2)) for action in self.policy.resource_manager.actions: self.policy.log.info( "policy: %s invoking action: %s resources: %d", self.policy.name, action.name, len(resources)) if isinstance(action, EventAction): results = action.process(resources, event) else: results = action.process(resources) self.policy._write_file( "action-%s" % action.name, utils.dumps(results)) return resources
[ "def", "run", "(", "self", ",", "event", ",", "lambda_context", ")", ":", "from", "c7n", ".", "actions", "import", "EventAction", "mode", "=", "self", ".", "policy", ".", "data", ".", "get", "(", "'mode'", ",", "{", "}", ")", "if", "not", "bool", "(", "mode", ".", "get", "(", "\"log\"", ",", "True", ")", ")", ":", "root", "=", "logging", ".", "getLogger", "(", ")", "map", "(", "root", ".", "removeHandler", ",", "root", ".", "handlers", "[", ":", "]", ")", "root", ".", "handlers", "=", "[", "logging", ".", "NullHandler", "(", ")", "]", "resources", "=", "self", ".", "resolve_resources", "(", "event", ")", "if", "not", "resources", ":", "return", "resources", "resources", "=", "self", ".", "policy", ".", "resource_manager", ".", "filter_resources", "(", "resources", ",", "event", ")", "if", "'debug'", "in", "event", ":", "self", ".", "policy", ".", "log", ".", "info", "(", "\"Filtered resources %d\"", "%", "len", "(", "resources", ")", ")", "if", "not", "resources", ":", "self", ".", "policy", ".", "log", ".", "info", "(", "\"policy: %s resources: %s no resources matched\"", "%", "(", "self", ".", "policy", ".", "name", ",", "self", ".", "policy", ".", "resource_type", ")", ")", "return", "with", "self", ".", "policy", ".", "ctx", ":", "self", ".", "policy", ".", "ctx", ".", "metrics", ".", "put_metric", "(", "'ResourceCount'", ",", "len", "(", "resources", ")", ",", "'Count'", ",", "Scope", "=", "\"Policy\"", ",", "buffer", "=", "False", ")", "if", "'debug'", "in", "event", ":", "self", ".", "policy", ".", "log", ".", "info", "(", "\"Invoking actions %s\"", ",", "self", ".", "policy", ".", "resource_manager", ".", "actions", ")", "self", ".", "policy", ".", "_write_file", "(", "'resources.json'", ",", "utils", ".", "dumps", "(", "resources", ",", "indent", "=", "2", ")", ")", "for", "action", "in", "self", ".", "policy", ".", "resource_manager", ".", "actions", ":", "self", ".", "policy", ".", "log", ".", "info", "(", "\"policy: %s invoking action: %s resources: %d\"", ",", "self", ".", "policy", ".", "name", ",", "action", ".", "name", ",", "len", "(", "resources", ")", ")", "if", "isinstance", "(", "action", ",", "EventAction", ")", ":", "results", "=", "action", ".", "process", "(", "resources", ",", "event", ")", "else", ":", "results", "=", "action", ".", "process", "(", "resources", ")", "self", ".", "policy", ".", "_write_file", "(", "\"action-%s\"", "%", "action", ".", "name", ",", "utils", ".", "dumps", "(", "results", ")", ")", "return", "resources" ]
Run policy in push mode against given event. Lambda automatically generates cloud watch logs, and metrics for us, albeit with some deficienies, metrics no longer count against valid resources matches, but against execution. If metrics execution option is enabled, custodian will generate metrics per normal.
[ "Run", "policy", "in", "push", "mode", "against", "given", "event", "." ]
python
train
Rapptz/discord.py
discord/ext/tasks/__init__.py
https://github.com/Rapptz/discord.py/blob/05d4f7f9620ef33635d6ac965b26528e09cdaf5b/discord/ext/tasks/__init__.py#L218-L236
def after_loop(self, coro): """A function that also acts as a decorator to register a coroutine to be called after the loop finished running. Parameters ------------ coro: :term:`py:awaitable` The coroutine to register after the loop finishes. Raises ------- TypeError The function was not a coroutine. """ if not (inspect.iscoroutinefunction(coro) or inspect.isawaitable(coro)): raise TypeError('Expected coroutine or awaitable, received {0.__name__!r}.'.format(type(coro))) self._after_loop = coro
[ "def", "after_loop", "(", "self", ",", "coro", ")", ":", "if", "not", "(", "inspect", ".", "iscoroutinefunction", "(", "coro", ")", "or", "inspect", ".", "isawaitable", "(", "coro", ")", ")", ":", "raise", "TypeError", "(", "'Expected coroutine or awaitable, received {0.__name__!r}.'", ".", "format", "(", "type", "(", "coro", ")", ")", ")", "self", ".", "_after_loop", "=", "coro" ]
A function that also acts as a decorator to register a coroutine to be called after the loop finished running. Parameters ------------ coro: :term:`py:awaitable` The coroutine to register after the loop finishes. Raises ------- TypeError The function was not a coroutine.
[ "A", "function", "that", "also", "acts", "as", "a", "decorator", "to", "register", "a", "coroutine", "to", "be", "called", "after", "the", "loop", "finished", "running", "." ]
python
train
dacut/python-aws-sig
awssig/sigv4.py
https://github.com/dacut/python-aws-sig/blob/7f6054dca4b32e67ca3d39db31c1b4be5efe54bd/awssig/sigv4.py#L496-L532
def normalize_query_parameters(query_string): """ normalize_query_parameters(query_string) -> dict Converts a query string into a dictionary mapping parameter names to a list of the sorted values. This ensurses that the query string follows % encoding rules according to RFC 3986 and checks for duplicate keys. A ValueError exception is raised if a percent encoding is invalid. """ if query_string == "": return {} components = query_string.split("&") result = {} for component in components: try: key, value = component.split("=", 1) except ValueError: key = component value = "" if component == "": # Empty component; skip it. continue key = normalize_uri_path_component(key) value = normalize_uri_path_component(value) if key in result: result[key].append(value) else: result[key] = [value] return dict([(key, sorted(values)) for key, values in iteritems(result)])
[ "def", "normalize_query_parameters", "(", "query_string", ")", ":", "if", "query_string", "==", "\"\"", ":", "return", "{", "}", "components", "=", "query_string", ".", "split", "(", "\"&\"", ")", "result", "=", "{", "}", "for", "component", "in", "components", ":", "try", ":", "key", ",", "value", "=", "component", ".", "split", "(", "\"=\"", ",", "1", ")", "except", "ValueError", ":", "key", "=", "component", "value", "=", "\"\"", "if", "component", "==", "\"\"", ":", "# Empty component; skip it.", "continue", "key", "=", "normalize_uri_path_component", "(", "key", ")", "value", "=", "normalize_uri_path_component", "(", "value", ")", "if", "key", "in", "result", ":", "result", "[", "key", "]", ".", "append", "(", "value", ")", "else", ":", "result", "[", "key", "]", "=", "[", "value", "]", "return", "dict", "(", "[", "(", "key", ",", "sorted", "(", "values", ")", ")", "for", "key", ",", "values", "in", "iteritems", "(", "result", ")", "]", ")" ]
normalize_query_parameters(query_string) -> dict Converts a query string into a dictionary mapping parameter names to a list of the sorted values. This ensurses that the query string follows % encoding rules according to RFC 3986 and checks for duplicate keys. A ValueError exception is raised if a percent encoding is invalid.
[ "normalize_query_parameters", "(", "query_string", ")", "-", ">", "dict" ]
python
train
bpannier/simpletr64
simpletr64/actions/fritz.py
https://github.com/bpannier/simpletr64/blob/31081139f4e6c85084a56de1617df73927135466/simpletr64/actions/fritz.py#L161-L221
def getCallList(self, timeout=1): """Get the list of phone calls made Example of a phone call result: :: [{'Count': None, 'Name': None, 'CalledNumber': '030868709971', 'Numbertype': 'sip', 'Duration': '0:01', 'Caller': '015155255399', 'Called': 'SIP: 030868729971', 'Date': '02.01.14 13:14', 'Device': 'Anrufbeantworter','Path': None, 'Port': '40', 'Type': '1', 'Id': '15'}] Types: * 1 - answered * 2 - missed * 3 - outgoing :param float timeout: the timeout to wait for the action to be executed :return: the list of made phone calls :rtype: list[dict[str: str]] """ namespace = Fritz.getServiceType("getCallList") uri = self.getControlURL(namespace) results = self.execute(uri, namespace, "GetCallList") # setup proxies proxies = {} if self.httpProxy: proxies = {"https": self.httpProxy} if self.httpsProxy: proxies = {"http": self.httpsProxy} # get the content request = requests.get(results["NewCallListURL"], proxies=proxies, timeout=float(timeout)) if request.status_code != 200: errorStr = DeviceTR64._extractErrorString(request) raise ValueError('Could not get CPE definitions "' + results["NewCallListURL"] + '" : ' + str(request.status_code) + ' - ' + request.reason + " -- " + errorStr) # parse xml try: root = ET.fromstring(request.text.encode('utf-8')) except Exception as e: raise ValueError("Could not parse call list '" + results["NewCallListURL"] + "': " + str(e)) calls = [] for child in root.getchildren(): if child.tag.lower() == "call": callParameters = {} for callChild in child.getchildren(): callParameters[callChild.tag] = callChild.text calls.append(callParameters) return calls
[ "def", "getCallList", "(", "self", ",", "timeout", "=", "1", ")", ":", "namespace", "=", "Fritz", ".", "getServiceType", "(", "\"getCallList\"", ")", "uri", "=", "self", ".", "getControlURL", "(", "namespace", ")", "results", "=", "self", ".", "execute", "(", "uri", ",", "namespace", ",", "\"GetCallList\"", ")", "# setup proxies", "proxies", "=", "{", "}", "if", "self", ".", "httpProxy", ":", "proxies", "=", "{", "\"https\"", ":", "self", ".", "httpProxy", "}", "if", "self", ".", "httpsProxy", ":", "proxies", "=", "{", "\"http\"", ":", "self", ".", "httpsProxy", "}", "# get the content", "request", "=", "requests", ".", "get", "(", "results", "[", "\"NewCallListURL\"", "]", ",", "proxies", "=", "proxies", ",", "timeout", "=", "float", "(", "timeout", ")", ")", "if", "request", ".", "status_code", "!=", "200", ":", "errorStr", "=", "DeviceTR64", ".", "_extractErrorString", "(", "request", ")", "raise", "ValueError", "(", "'Could not get CPE definitions \"'", "+", "results", "[", "\"NewCallListURL\"", "]", "+", "'\" : '", "+", "str", "(", "request", ".", "status_code", ")", "+", "' - '", "+", "request", ".", "reason", "+", "\" -- \"", "+", "errorStr", ")", "# parse xml", "try", ":", "root", "=", "ET", ".", "fromstring", "(", "request", ".", "text", ".", "encode", "(", "'utf-8'", ")", ")", "except", "Exception", "as", "e", ":", "raise", "ValueError", "(", "\"Could not parse call list '\"", "+", "results", "[", "\"NewCallListURL\"", "]", "+", "\"': \"", "+", "str", "(", "e", ")", ")", "calls", "=", "[", "]", "for", "child", "in", "root", ".", "getchildren", "(", ")", ":", "if", "child", ".", "tag", ".", "lower", "(", ")", "==", "\"call\"", ":", "callParameters", "=", "{", "}", "for", "callChild", "in", "child", ".", "getchildren", "(", ")", ":", "callParameters", "[", "callChild", ".", "tag", "]", "=", "callChild", ".", "text", "calls", ".", "append", "(", "callParameters", ")", "return", "calls" ]
Get the list of phone calls made Example of a phone call result: :: [{'Count': None, 'Name': None, 'CalledNumber': '030868709971', 'Numbertype': 'sip', 'Duration': '0:01', 'Caller': '015155255399', 'Called': 'SIP: 030868729971', 'Date': '02.01.14 13:14', 'Device': 'Anrufbeantworter','Path': None, 'Port': '40', 'Type': '1', 'Id': '15'}] Types: * 1 - answered * 2 - missed * 3 - outgoing :param float timeout: the timeout to wait for the action to be executed :return: the list of made phone calls :rtype: list[dict[str: str]]
[ "Get", "the", "list", "of", "phone", "calls", "made" ]
python
train
markokr/rarfile
dumprar.py
https://github.com/markokr/rarfile/blob/2704344e8d7a1658c96c8ed8f449d7ba01bedea3/dumprar.py#L223-L266
def show_item_v3(h): """Show any RAR3 record. """ st = rar3_type(h.type) xprint("%s: hdrlen=%d datlen=%d", st, h.header_size, h.add_size) if h.type in (rf.RAR_BLOCK_FILE, rf.RAR_BLOCK_SUB): if h.host_os == rf.RAR_OS_UNIX: s_mode = "0%o" % h.mode else: s_mode = "0x%x" % h.mode xprint(" flags=0x%04x:%s", h.flags, get_file_flags(h.flags)) if h.host_os >= 0 and h.host_os < len(os_list): s_os = os_list[h.host_os] else: s_os = "?" xprint(" os=%d:%s ver=%d mode=%s meth=%c cmp=%d dec=%d vol=%d", h.host_os, s_os, h.extract_version, s_mode, h.compress_type, h.compress_size, h.file_size, h.volume) ucrc = (h.CRC + (1 << 32)) & ((1 << 32) - 1) xprint(" crc=0x%08x (%d) date_time=%s", ucrc, h.CRC, fmt_time(h.date_time)) xprint(" name=%s", h.filename) if h.mtime: xprint(" mtime=%s", fmt_time(h.mtime)) if h.ctime: xprint(" ctime=%s", fmt_time(h.ctime)) if h.atime: xprint(" atime=%s", fmt_time(h.atime)) if h.arctime: xprint(" arctime=%s", fmt_time(h.arctime)) elif h.type == rf.RAR_BLOCK_MAIN: xprint(" flags=0x%04x:%s", h.flags, render_flags(h.flags, main_bits)) elif h.type == rf.RAR_BLOCK_ENDARC: xprint(" flags=0x%04x:%s", h.flags, render_flags(h.flags, endarc_bits)) elif h.type == rf.RAR_BLOCK_MARK: xprint(" flags=0x%04x:", h.flags) else: xprint(" flags=0x%04x:%s", h.flags, render_flags(h.flags, generic_bits)) if h.comment is not None: cm = repr(h.comment) if cm[0] == 'u': cm = cm[1:] xprint(" comment=%s", cm)
[ "def", "show_item_v3", "(", "h", ")", ":", "st", "=", "rar3_type", "(", "h", ".", "type", ")", "xprint", "(", "\"%s: hdrlen=%d datlen=%d\"", ",", "st", ",", "h", ".", "header_size", ",", "h", ".", "add_size", ")", "if", "h", ".", "type", "in", "(", "rf", ".", "RAR_BLOCK_FILE", ",", "rf", ".", "RAR_BLOCK_SUB", ")", ":", "if", "h", ".", "host_os", "==", "rf", ".", "RAR_OS_UNIX", ":", "s_mode", "=", "\"0%o\"", "%", "h", ".", "mode", "else", ":", "s_mode", "=", "\"0x%x\"", "%", "h", ".", "mode", "xprint", "(", "\" flags=0x%04x:%s\"", ",", "h", ".", "flags", ",", "get_file_flags", "(", "h", ".", "flags", ")", ")", "if", "h", ".", "host_os", ">=", "0", "and", "h", ".", "host_os", "<", "len", "(", "os_list", ")", ":", "s_os", "=", "os_list", "[", "h", ".", "host_os", "]", "else", ":", "s_os", "=", "\"?\"", "xprint", "(", "\" os=%d:%s ver=%d mode=%s meth=%c cmp=%d dec=%d vol=%d\"", ",", "h", ".", "host_os", ",", "s_os", ",", "h", ".", "extract_version", ",", "s_mode", ",", "h", ".", "compress_type", ",", "h", ".", "compress_size", ",", "h", ".", "file_size", ",", "h", ".", "volume", ")", "ucrc", "=", "(", "h", ".", "CRC", "+", "(", "1", "<<", "32", ")", ")", "&", "(", "(", "1", "<<", "32", ")", "-", "1", ")", "xprint", "(", "\" crc=0x%08x (%d) date_time=%s\"", ",", "ucrc", ",", "h", ".", "CRC", ",", "fmt_time", "(", "h", ".", "date_time", ")", ")", "xprint", "(", "\" name=%s\"", ",", "h", ".", "filename", ")", "if", "h", ".", "mtime", ":", "xprint", "(", "\" mtime=%s\"", ",", "fmt_time", "(", "h", ".", "mtime", ")", ")", "if", "h", ".", "ctime", ":", "xprint", "(", "\" ctime=%s\"", ",", "fmt_time", "(", "h", ".", "ctime", ")", ")", "if", "h", ".", "atime", ":", "xprint", "(", "\" atime=%s\"", ",", "fmt_time", "(", "h", ".", "atime", ")", ")", "if", "h", ".", "arctime", ":", "xprint", "(", "\" arctime=%s\"", ",", "fmt_time", "(", "h", ".", "arctime", ")", ")", "elif", "h", ".", "type", "==", "rf", ".", "RAR_BLOCK_MAIN", ":", "xprint", "(", "\" flags=0x%04x:%s\"", ",", "h", ".", "flags", ",", "render_flags", "(", "h", ".", "flags", ",", "main_bits", ")", ")", "elif", "h", ".", "type", "==", "rf", ".", "RAR_BLOCK_ENDARC", ":", "xprint", "(", "\" flags=0x%04x:%s\"", ",", "h", ".", "flags", ",", "render_flags", "(", "h", ".", "flags", ",", "endarc_bits", ")", ")", "elif", "h", ".", "type", "==", "rf", ".", "RAR_BLOCK_MARK", ":", "xprint", "(", "\" flags=0x%04x:\"", ",", "h", ".", "flags", ")", "else", ":", "xprint", "(", "\" flags=0x%04x:%s\"", ",", "h", ".", "flags", ",", "render_flags", "(", "h", ".", "flags", ",", "generic_bits", ")", ")", "if", "h", ".", "comment", "is", "not", "None", ":", "cm", "=", "repr", "(", "h", ".", "comment", ")", "if", "cm", "[", "0", "]", "==", "'u'", ":", "cm", "=", "cm", "[", "1", ":", "]", "xprint", "(", "\" comment=%s\"", ",", "cm", ")" ]
Show any RAR3 record.
[ "Show", "any", "RAR3", "record", "." ]
python
train
avelino/bottle-auth
bottle_auth/core/escape.py
https://github.com/avelino/bottle-auth/blob/db07e526864aeac05ee68444b47e5db29540ce18/bottle_auth/core/escape.py#L188-L200
def to_basestring(value): """Converts a string argument to a subclass of basestring. In python2, byte and unicode strings are mostly interchangeable, so functions that deal with a user-supplied argument in combination with ascii string constants can use either and should return the type the user supplied. In python3, the two types are not interchangeable, so this method is needed to convert byte strings to unicode. """ if isinstance(value, _BASESTRING_TYPES): return value assert isinstance(value, bytes) return value.decode("utf-8")
[ "def", "to_basestring", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "_BASESTRING_TYPES", ")", ":", "return", "value", "assert", "isinstance", "(", "value", ",", "bytes", ")", "return", "value", ".", "decode", "(", "\"utf-8\"", ")" ]
Converts a string argument to a subclass of basestring. In python2, byte and unicode strings are mostly interchangeable, so functions that deal with a user-supplied argument in combination with ascii string constants can use either and should return the type the user supplied. In python3, the two types are not interchangeable, so this method is needed to convert byte strings to unicode.
[ "Converts", "a", "string", "argument", "to", "a", "subclass", "of", "basestring", "." ]
python
test
ofa/django-bouncy
django_bouncy/utils.py
https://github.com/ofa/django-bouncy/blob/a386dfa8c4ce59bd18978a3537c03cd6ad07bf06/django_bouncy/utils.py#L117-L150
def approve_subscription(data): """ Function to approve a SNS subscription with Amazon We don't do a ton of verification here, past making sure that the endpoint we're told to go to to verify the subscription is on the correct host """ url = data['SubscribeURL'] domain = urlparse(url).netloc pattern = getattr( settings, 'BOUNCY_SUBSCRIBE_DOMAIN_REGEX', r"sns.[a-z0-9\-]+.amazonaws.com$" ) if not re.search(pattern, domain): logger.error('Invalid Subscription Domain %s', url) return HttpResponseBadRequest('Improper Subscription Domain') try: result = urlopen(url).read() logger.info('Subscription Request Sent %s', url) except urllib.HTTPError as error: result = error.read() logger.warning('HTTP Error Creating Subscription %s', str(result)) signals.subscription.send( sender='bouncy_approve_subscription', result=result, notification=data ) # Return a 200 Status Code return HttpResponse(six.u(result))
[ "def", "approve_subscription", "(", "data", ")", ":", "url", "=", "data", "[", "'SubscribeURL'", "]", "domain", "=", "urlparse", "(", "url", ")", ".", "netloc", "pattern", "=", "getattr", "(", "settings", ",", "'BOUNCY_SUBSCRIBE_DOMAIN_REGEX'", ",", "r\"sns.[a-z0-9\\-]+.amazonaws.com$\"", ")", "if", "not", "re", ".", "search", "(", "pattern", ",", "domain", ")", ":", "logger", ".", "error", "(", "'Invalid Subscription Domain %s'", ",", "url", ")", "return", "HttpResponseBadRequest", "(", "'Improper Subscription Domain'", ")", "try", ":", "result", "=", "urlopen", "(", "url", ")", ".", "read", "(", ")", "logger", ".", "info", "(", "'Subscription Request Sent %s'", ",", "url", ")", "except", "urllib", ".", "HTTPError", "as", "error", ":", "result", "=", "error", ".", "read", "(", ")", "logger", ".", "warning", "(", "'HTTP Error Creating Subscription %s'", ",", "str", "(", "result", ")", ")", "signals", ".", "subscription", ".", "send", "(", "sender", "=", "'bouncy_approve_subscription'", ",", "result", "=", "result", ",", "notification", "=", "data", ")", "# Return a 200 Status Code", "return", "HttpResponse", "(", "six", ".", "u", "(", "result", ")", ")" ]
Function to approve a SNS subscription with Amazon We don't do a ton of verification here, past making sure that the endpoint we're told to go to to verify the subscription is on the correct host
[ "Function", "to", "approve", "a", "SNS", "subscription", "with", "Amazon" ]
python
train
jasonrbriggs/stomp.py
stomp/adapter/multicast.py
https://github.com/jasonrbriggs/stomp.py/blob/643843c5fbf25fd24339dd0e69a9411c3d8b94c7/stomp/adapter/multicast.py#L62-L86
def process_frame(self, f, frame_str): """ :param Frame f: Frame object :param bytes frame_str: Raw frame content """ frame_type = f.cmd.lower() if frame_type in ['disconnect']: return if frame_type == 'send': frame_type = 'message' f.cmd = 'MESSAGE' if frame_type in ['connected', 'message', 'receipt', 'error', 'heartbeat']: if frame_type == 'message': if f.headers['destination'] not in self.subscriptions.values(): return (f.headers, f.body) = self.notify('before_message', f.headers, f.body) self.notify(frame_type, f.headers, f.body) if 'receipt' in f.headers: receipt_frame = Frame('RECEIPT', {'receipt-id': f.headers['receipt']}) lines = convert_frame(receipt_frame) self.send(encode(pack(lines))) log.debug("Received frame: %r, headers=%r, body=%r", f.cmd, f.headers, f.body)
[ "def", "process_frame", "(", "self", ",", "f", ",", "frame_str", ")", ":", "frame_type", "=", "f", ".", "cmd", ".", "lower", "(", ")", "if", "frame_type", "in", "[", "'disconnect'", "]", ":", "return", "if", "frame_type", "==", "'send'", ":", "frame_type", "=", "'message'", "f", ".", "cmd", "=", "'MESSAGE'", "if", "frame_type", "in", "[", "'connected'", ",", "'message'", ",", "'receipt'", ",", "'error'", ",", "'heartbeat'", "]", ":", "if", "frame_type", "==", "'message'", ":", "if", "f", ".", "headers", "[", "'destination'", "]", "not", "in", "self", ".", "subscriptions", ".", "values", "(", ")", ":", "return", "(", "f", ".", "headers", ",", "f", ".", "body", ")", "=", "self", ".", "notify", "(", "'before_message'", ",", "f", ".", "headers", ",", "f", ".", "body", ")", "self", ".", "notify", "(", "frame_type", ",", "f", ".", "headers", ",", "f", ".", "body", ")", "if", "'receipt'", "in", "f", ".", "headers", ":", "receipt_frame", "=", "Frame", "(", "'RECEIPT'", ",", "{", "'receipt-id'", ":", "f", ".", "headers", "[", "'receipt'", "]", "}", ")", "lines", "=", "convert_frame", "(", "receipt_frame", ")", "self", ".", "send", "(", "encode", "(", "pack", "(", "lines", ")", ")", ")", "log", ".", "debug", "(", "\"Received frame: %r, headers=%r, body=%r\"", ",", "f", ".", "cmd", ",", "f", ".", "headers", ",", "f", ".", "body", ")" ]
:param Frame f: Frame object :param bytes frame_str: Raw frame content
[ ":", "param", "Frame", "f", ":", "Frame", "object", ":", "param", "bytes", "frame_str", ":", "Raw", "frame", "content" ]
python
train
h2oai/h2o-3
h2o-py/h2o/estimators/estimator_base.py
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/estimators/estimator_base.py#L79-L86
def join(self): """Wait until job's completion.""" self._future = False self._job.poll() model_key = self._job.dest_key self._job = None model_json = h2o.api("GET /%d/Models/%s" % (self._rest_version, model_key))["models"][0] self._resolve_model(model_key, model_json)
[ "def", "join", "(", "self", ")", ":", "self", ".", "_future", "=", "False", "self", ".", "_job", ".", "poll", "(", ")", "model_key", "=", "self", ".", "_job", ".", "dest_key", "self", ".", "_job", "=", "None", "model_json", "=", "h2o", ".", "api", "(", "\"GET /%d/Models/%s\"", "%", "(", "self", ".", "_rest_version", ",", "model_key", ")", ")", "[", "\"models\"", "]", "[", "0", "]", "self", ".", "_resolve_model", "(", "model_key", ",", "model_json", ")" ]
Wait until job's completion.
[ "Wait", "until", "job", "s", "completion", "." ]
python
test
CZ-NIC/yangson
yangson/schemadata.py
https://github.com/CZ-NIC/yangson/blob/a4b9464041fa8b28f6020a420ababf18fddf5d4a/yangson/schemadata.py#L449-L457
def derived_from(self, identity: QualName) -> MutableSet[QualName]: """Return list of identities transitively derived from `identity`.""" try: res = self.identity_adjs[identity].derivs except KeyError: return set() for id in res.copy(): res |= self.derived_from(id) return res
[ "def", "derived_from", "(", "self", ",", "identity", ":", "QualName", ")", "->", "MutableSet", "[", "QualName", "]", ":", "try", ":", "res", "=", "self", ".", "identity_adjs", "[", "identity", "]", ".", "derivs", "except", "KeyError", ":", "return", "set", "(", ")", "for", "id", "in", "res", ".", "copy", "(", ")", ":", "res", "|=", "self", ".", "derived_from", "(", "id", ")", "return", "res" ]
Return list of identities transitively derived from `identity`.
[ "Return", "list", "of", "identities", "transitively", "derived", "from", "identity", "." ]
python
train
mitsei/dlkit
dlkit/runtime/impls/proxy/managers.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/runtime/impls/proxy/managers.py#L93-L114
def get_proxy_session(self): """Gets a ``ProxySession`` which is responsible for acquiring authentication credentials on behalf of a service client. :return: a proxy session for this service :rtype: ``osid.proxy.ProxySession`` :raise: ``OperationFailed`` -- unable to complete request :raise: ``Unimplemented`` -- ``supports_proxy()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_proxy()`` is ``true``.* """ if not self.supports_proxy(): raise Unimplemented() try: from . import sessions except ImportError: raise # OperationFailed() try: session = sessions.ProxySession() except AttributeError: raise # OperationFailed() return session
[ "def", "get_proxy_session", "(", "self", ")", ":", "if", "not", "self", ".", "supports_proxy", "(", ")", ":", "raise", "Unimplemented", "(", ")", "try", ":", "from", ".", "import", "sessions", "except", "ImportError", ":", "raise", "# OperationFailed()", "try", ":", "session", "=", "sessions", ".", "ProxySession", "(", ")", "except", "AttributeError", ":", "raise", "# OperationFailed()", "return", "session" ]
Gets a ``ProxySession`` which is responsible for acquiring authentication credentials on behalf of a service client. :return: a proxy session for this service :rtype: ``osid.proxy.ProxySession`` :raise: ``OperationFailed`` -- unable to complete request :raise: ``Unimplemented`` -- ``supports_proxy()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_proxy()`` is ``true``.*
[ "Gets", "a", "ProxySession", "which", "is", "responsible", "for", "acquiring", "authentication", "credentials", "on", "behalf", "of", "a", "service", "client", "." ]
python
train
roycoding/slots
slots/slots.py
https://github.com/roycoding/slots/blob/1ed9b203fa02002c09b9dad73e2a97c04a45ef20/slots/slots.py#L345-L358
def regret(self): ''' Calculate expected regret, where expected regret is maximum optimal reward - sum of collected rewards, i.e. expected regret = T*max_k(mean_k) - sum_(t=1-->T) (reward_t) Returns ------- float ''' return (sum(self.pulls)*np.max(np.nan_to_num(self.wins/self.pulls)) - sum(self.wins)) / sum(self.pulls)
[ "def", "regret", "(", "self", ")", ":", "return", "(", "sum", "(", "self", ".", "pulls", ")", "*", "np", ".", "max", "(", "np", ".", "nan_to_num", "(", "self", ".", "wins", "/", "self", ".", "pulls", ")", ")", "-", "sum", "(", "self", ".", "wins", ")", ")", "/", "sum", "(", "self", ".", "pulls", ")" ]
Calculate expected regret, where expected regret is maximum optimal reward - sum of collected rewards, i.e. expected regret = T*max_k(mean_k) - sum_(t=1-->T) (reward_t) Returns ------- float
[ "Calculate", "expected", "regret", "where", "expected", "regret", "is", "maximum", "optimal", "reward", "-", "sum", "of", "collected", "rewards", "i", ".", "e", "." ]
python
train
spulec/moto
moto/kms/responses.py
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/kms/responses.py#L225-L233
def encrypt(self): """ We perform no encryption, we just encode the value as base64 and then decode it in decrypt(). """ value = self.parameters.get("Plaintext") if isinstance(value, six.text_type): value = value.encode('utf-8') return json.dumps({"CiphertextBlob": base64.b64encode(value).decode("utf-8"), 'KeyId': 'key_id'})
[ "def", "encrypt", "(", "self", ")", ":", "value", "=", "self", ".", "parameters", ".", "get", "(", "\"Plaintext\"", ")", "if", "isinstance", "(", "value", ",", "six", ".", "text_type", ")", ":", "value", "=", "value", ".", "encode", "(", "'utf-8'", ")", "return", "json", ".", "dumps", "(", "{", "\"CiphertextBlob\"", ":", "base64", ".", "b64encode", "(", "value", ")", ".", "decode", "(", "\"utf-8\"", ")", ",", "'KeyId'", ":", "'key_id'", "}", ")" ]
We perform no encryption, we just encode the value as base64 and then decode it in decrypt().
[ "We", "perform", "no", "encryption", "we", "just", "encode", "the", "value", "as", "base64", "and", "then", "decode", "it", "in", "decrypt", "()", "." ]
python
train
rwl/godot
godot/dot_data_parser.py
https://github.com/rwl/godot/blob/013687c9e8983d2aa2ceebb8a76c5c4f1e37c90f/godot/dot_data_parser.py#L51-L66
def _proc_node_stmt(self, toks): """ Return (ADD_NODE, node_name, options) """ opts = toks[1] dummy_node = Node("dummy") # Coerce attribute types. for key, value in opts.iteritems(): trait = dummy_node.trait(key) if trait is not None: if trait.is_trait_type( Float ): opts[key] = float( value ) elif trait.is_trait_type( Tuple ): opts[key] = tuple( [float(c) for c in value.split(",")] ) return super(GodotDataParser, self)._proc_node_stmt(toks)
[ "def", "_proc_node_stmt", "(", "self", ",", "toks", ")", ":", "opts", "=", "toks", "[", "1", "]", "dummy_node", "=", "Node", "(", "\"dummy\"", ")", "# Coerce attribute types.", "for", "key", ",", "value", "in", "opts", ".", "iteritems", "(", ")", ":", "trait", "=", "dummy_node", ".", "trait", "(", "key", ")", "if", "trait", "is", "not", "None", ":", "if", "trait", ".", "is_trait_type", "(", "Float", ")", ":", "opts", "[", "key", "]", "=", "float", "(", "value", ")", "elif", "trait", ".", "is_trait_type", "(", "Tuple", ")", ":", "opts", "[", "key", "]", "=", "tuple", "(", "[", "float", "(", "c", ")", "for", "c", "in", "value", ".", "split", "(", "\",\"", ")", "]", ")", "return", "super", "(", "GodotDataParser", ",", "self", ")", ".", "_proc_node_stmt", "(", "toks", ")" ]
Return (ADD_NODE, node_name, options)
[ "Return", "(", "ADD_NODE", "node_name", "options", ")" ]
python
test
cariad/py-wpconfigr
wpconfigr/__main__.py
https://github.com/cariad/py-wpconfigr/blob/8f25bb849b72ce95957566544a2be8445316c818/wpconfigr/__main__.py#L12-L76
def run_from_cli(): """ Perform an update instigated from a CLI. """ arg_parser = argparse.ArgumentParser( description='Read and write properties in a wp-config.php file. ' 'Include a --value argument to set the value, omit it to ' 'read the value of the specified key.', prog='python -m wpconfigr') arg_parser.add_argument('--filename', help='wp-config.php filename', required=True) arg_parser.add_argument('--key', help='Property key', required=True) arg_parser.add_argument('--value', help='New property value', required=False) arg_parser.add_argument('--log-level', default='CRITICAL', help='Log level', required=False) arg_parser.add_argument('--set-true', action='store_true', help='Set the value as boolean true') arg_parser.add_argument('--set-false', action='store_true', help='Set the value as boolean false') args = arg_parser.parse_args() if args.set_true and args.set_false: arg_parser.error('Cannot set --set-true and --set-false.') if args.value and args.set_true: arg_parser.error('Cannot set --value and --set-true.') if args.value and args.set_false: arg_parser.error('Cannot set --value and --set-false.') basicConfig(level=str(args.log_level).upper()) updater = WpConfigFile(filename=args.filename) if args.set_true: value = True elif args.set_false: value = False else: value = args.value if value is not None: updater.set(key=args.key, value=value) else: got = updater.get(key=args.key) if got: print(got)
[ "def", "run_from_cli", "(", ")", ":", "arg_parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'Read and write properties in a wp-config.php file. '", "'Include a --value argument to set the value, omit it to '", "'read the value of the specified key.'", ",", "prog", "=", "'python -m wpconfigr'", ")", "arg_parser", ".", "add_argument", "(", "'--filename'", ",", "help", "=", "'wp-config.php filename'", ",", "required", "=", "True", ")", "arg_parser", ".", "add_argument", "(", "'--key'", ",", "help", "=", "'Property key'", ",", "required", "=", "True", ")", "arg_parser", ".", "add_argument", "(", "'--value'", ",", "help", "=", "'New property value'", ",", "required", "=", "False", ")", "arg_parser", ".", "add_argument", "(", "'--log-level'", ",", "default", "=", "'CRITICAL'", ",", "help", "=", "'Log level'", ",", "required", "=", "False", ")", "arg_parser", ".", "add_argument", "(", "'--set-true'", ",", "action", "=", "'store_true'", ",", "help", "=", "'Set the value as boolean true'", ")", "arg_parser", ".", "add_argument", "(", "'--set-false'", ",", "action", "=", "'store_true'", ",", "help", "=", "'Set the value as boolean false'", ")", "args", "=", "arg_parser", ".", "parse_args", "(", ")", "if", "args", ".", "set_true", "and", "args", ".", "set_false", ":", "arg_parser", ".", "error", "(", "'Cannot set --set-true and --set-false.'", ")", "if", "args", ".", "value", "and", "args", ".", "set_true", ":", "arg_parser", ".", "error", "(", "'Cannot set --value and --set-true.'", ")", "if", "args", ".", "value", "and", "args", ".", "set_false", ":", "arg_parser", ".", "error", "(", "'Cannot set --value and --set-false.'", ")", "basicConfig", "(", "level", "=", "str", "(", "args", ".", "log_level", ")", ".", "upper", "(", ")", ")", "updater", "=", "WpConfigFile", "(", "filename", "=", "args", ".", "filename", ")", "if", "args", ".", "set_true", ":", "value", "=", "True", "elif", "args", ".", "set_false", ":", "value", "=", "False", "else", ":", "value", "=", "args", ".", "value", "if", "value", "is", "not", "None", ":", "updater", ".", "set", "(", "key", "=", "args", ".", "key", ",", "value", "=", "value", ")", "else", ":", "got", "=", "updater", ".", "get", "(", "key", "=", "args", ".", "key", ")", "if", "got", ":", "print", "(", "got", ")" ]
Perform an update instigated from a CLI.
[ "Perform", "an", "update", "instigated", "from", "a", "CLI", "." ]
python
train
ArchiveTeam/wpull
wpull/util.py
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/util.py#L64-L73
def seek_file_end(file): '''Seek to the end of the file.''' try: file.seek(0, 2) except ValueError: # gzip files don't support seek from end while True: data = file.read(4096) if not data: break
[ "def", "seek_file_end", "(", "file", ")", ":", "try", ":", "file", ".", "seek", "(", "0", ",", "2", ")", "except", "ValueError", ":", "# gzip files don't support seek from end", "while", "True", ":", "data", "=", "file", ".", "read", "(", "4096", ")", "if", "not", "data", ":", "break" ]
Seek to the end of the file.
[ "Seek", "to", "the", "end", "of", "the", "file", "." ]
python
train
DataDog/integrations-core
sqlserver/datadog_checks/sqlserver/sqlserver.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/sqlserver/datadog_checks/sqlserver/sqlserver.py#L566-L584
def proc_check_guard(self, instance, sql): """ check to see if the guard SQL returns a single column containing 0 or 1 We return true if 1, else False """ self.open_db_connections(instance, self.PROC_GUARD_DB_KEY) cursor = self.get_cursor(instance, self.PROC_GUARD_DB_KEY) should_run = False try: cursor.execute(sql, ()) result = cursor.fetchone() should_run = result[0] == 1 except Exception as e: self.log.error("Failed to run proc_only_if sql {} : {}".format(sql, e)) self.close_cursor(cursor) self.close_db_connections(instance, self.PROC_GUARD_DB_KEY) return should_run
[ "def", "proc_check_guard", "(", "self", ",", "instance", ",", "sql", ")", ":", "self", ".", "open_db_connections", "(", "instance", ",", "self", ".", "PROC_GUARD_DB_KEY", ")", "cursor", "=", "self", ".", "get_cursor", "(", "instance", ",", "self", ".", "PROC_GUARD_DB_KEY", ")", "should_run", "=", "False", "try", ":", "cursor", ".", "execute", "(", "sql", ",", "(", ")", ")", "result", "=", "cursor", ".", "fetchone", "(", ")", "should_run", "=", "result", "[", "0", "]", "==", "1", "except", "Exception", "as", "e", ":", "self", ".", "log", ".", "error", "(", "\"Failed to run proc_only_if sql {} : {}\"", ".", "format", "(", "sql", ",", "e", ")", ")", "self", ".", "close_cursor", "(", "cursor", ")", "self", ".", "close_db_connections", "(", "instance", ",", "self", ".", "PROC_GUARD_DB_KEY", ")", "return", "should_run" ]
check to see if the guard SQL returns a single column containing 0 or 1 We return true if 1, else False
[ "check", "to", "see", "if", "the", "guard", "SQL", "returns", "a", "single", "column", "containing", "0", "or", "1", "We", "return", "true", "if", "1", "else", "False" ]
python
train
cgarciae/dataget
dataget/__coconut__.py
https://github.com/cgarciae/dataget/blob/04d3d9c68ebdcbed103605731a1be0f26e1c36fa/dataget/__coconut__.py#L244-L246
def index(self, elem): """Find the index of elem in the reversed iterator.""" return _coconut.len(self._iter) - self._iter.index(elem) - 1
[ "def", "index", "(", "self", ",", "elem", ")", ":", "return", "_coconut", ".", "len", "(", "self", ".", "_iter", ")", "-", "self", ".", "_iter", ".", "index", "(", "elem", ")", "-", "1" ]
Find the index of elem in the reversed iterator.
[ "Find", "the", "index", "of", "elem", "in", "the", "reversed", "iterator", "." ]
python
train
zblz/naima
naima/radiative.py
https://github.com/zblz/naima/blob/d6a6781d73bf58fd8269e8b0e3b70be22723cd5b/naima/radiative.py#L945-L958
def _F(x, gam): """ Eqs. A6, A7 of Baring et al. (1999) """ beta = np.sqrt(1 - gam ** -2) B = 1 + 0.5 * (gam ** 2 - 1) C = 10 * x * gam * beta * (2 + gam * beta) C /= 1 + x ** 2 * (gam ** 2 - 1) F_1 = (17 - 3 * x ** 2 / (2 - x) ** 2 - C) * np.sqrt(1 - x) F_2 = 12 * (2 - x) - 7 * x ** 2 / (2 - x) - 3 * x ** 4 / (2 - x) ** 3 F_3 = np.log((1 + np.sqrt(1 - x)) / np.sqrt(x)) return B * F_1 + F_2 * F_3
[ "def", "_F", "(", "x", ",", "gam", ")", ":", "beta", "=", "np", ".", "sqrt", "(", "1", "-", "gam", "**", "-", "2", ")", "B", "=", "1", "+", "0.5", "*", "(", "gam", "**", "2", "-", "1", ")", "C", "=", "10", "*", "x", "*", "gam", "*", "beta", "*", "(", "2", "+", "gam", "*", "beta", ")", "C", "/=", "1", "+", "x", "**", "2", "*", "(", "gam", "**", "2", "-", "1", ")", "F_1", "=", "(", "17", "-", "3", "*", "x", "**", "2", "/", "(", "2", "-", "x", ")", "**", "2", "-", "C", ")", "*", "np", ".", "sqrt", "(", "1", "-", "x", ")", "F_2", "=", "12", "*", "(", "2", "-", "x", ")", "-", "7", "*", "x", "**", "2", "/", "(", "2", "-", "x", ")", "-", "3", "*", "x", "**", "4", "/", "(", "2", "-", "x", ")", "**", "3", "F_3", "=", "np", ".", "log", "(", "(", "1", "+", "np", ".", "sqrt", "(", "1", "-", "x", ")", ")", "/", "np", ".", "sqrt", "(", "x", ")", ")", "return", "B", "*", "F_1", "+", "F_2", "*", "F_3" ]
Eqs. A6, A7 of Baring et al. (1999)
[ "Eqs", ".", "A6", "A7", "of", "Baring", "et", "al", ".", "(", "1999", ")" ]
python
train
ThreatConnect-Inc/tcex
tcex/tcex_bin_package.py
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_bin_package.py#L110-L132
def bundle_apps(self, bundle_name, bundle_apps): """Bundle multiple Job or Playbook Apps (.tcx files) into a single zip file. Args: bundle_name (str): The output name of the bundle zip file. bundle_apps (list): A list of Apps to include in the bundle. """ bundle_file = os.path.join( self.app_path, self.args.outdir, '{}-bundle.zip'.format(bundle_name) ) z = zipfile.ZipFile(bundle_file, 'w') for app in bundle_apps: # update package data self.package_data['bundle'].append( {'action': 'Adding App:', 'output': os.path.basename(app)} ) z.write(app, os.path.basename(app)) # update package data self.package_data['bundle'].append( {'action': 'Created Bundle:', 'output': os.path.basename(bundle_file)} ) z.close()
[ "def", "bundle_apps", "(", "self", ",", "bundle_name", ",", "bundle_apps", ")", ":", "bundle_file", "=", "os", ".", "path", ".", "join", "(", "self", ".", "app_path", ",", "self", ".", "args", ".", "outdir", ",", "'{}-bundle.zip'", ".", "format", "(", "bundle_name", ")", ")", "z", "=", "zipfile", ".", "ZipFile", "(", "bundle_file", ",", "'w'", ")", "for", "app", "in", "bundle_apps", ":", "# update package data", "self", ".", "package_data", "[", "'bundle'", "]", ".", "append", "(", "{", "'action'", ":", "'Adding App:'", ",", "'output'", ":", "os", ".", "path", ".", "basename", "(", "app", ")", "}", ")", "z", ".", "write", "(", "app", ",", "os", ".", "path", ".", "basename", "(", "app", ")", ")", "# update package data", "self", ".", "package_data", "[", "'bundle'", "]", ".", "append", "(", "{", "'action'", ":", "'Created Bundle:'", ",", "'output'", ":", "os", ".", "path", ".", "basename", "(", "bundle_file", ")", "}", ")", "z", ".", "close", "(", ")" ]
Bundle multiple Job or Playbook Apps (.tcx files) into a single zip file. Args: bundle_name (str): The output name of the bundle zip file. bundle_apps (list): A list of Apps to include in the bundle.
[ "Bundle", "multiple", "Job", "or", "Playbook", "Apps", "(", ".", "tcx", "files", ")", "into", "a", "single", "zip", "file", "." ]
python
train
DistrictDataLabs/yellowbrick
yellowbrick/utils/types.py
https://github.com/DistrictDataLabs/yellowbrick/blob/59b67236a3862c73363e8edad7cd86da5b69e3b2/yellowbrick/utils/types.py#L202-L214
def is_structured_array(obj): """ Returns True if the given object is a Numpy Structured Array. Parameters ---------- obj: instance The object to test whether or not is a Numpy Structured Array. """ if isinstance(obj, np.ndarray) and hasattr(obj, 'dtype'): if obj.dtype.names is not None: return True return False
[ "def", "is_structured_array", "(", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "np", ".", "ndarray", ")", "and", "hasattr", "(", "obj", ",", "'dtype'", ")", ":", "if", "obj", ".", "dtype", ".", "names", "is", "not", "None", ":", "return", "True", "return", "False" ]
Returns True if the given object is a Numpy Structured Array. Parameters ---------- obj: instance The object to test whether or not is a Numpy Structured Array.
[ "Returns", "True", "if", "the", "given", "object", "is", "a", "Numpy", "Structured", "Array", "." ]
python
train
NikolayDachev/jadm
lib/paramiko-1.14.1/paramiko/channel.py
https://github.com/NikolayDachev/jadm/blob/12bb550445edfcd87506f7cba7a6a35d413c5511/lib/paramiko-1.14.1/paramiko/channel.py#L700-L734
def send_stderr(self, s): """ Send data to the channel on the "stderr" stream. This is normally only used by servers to send output from shell commands -- clients won't use this. Returns the number of bytes sent, or 0 if the channel stream is closed. Applications are responsible for checking that all data has been sent: if only some of the data was transmitted, the application needs to attempt delivery of the remaining data. :param str s: data to send. :return: number of bytes actually sent, as an `int`. :raises socket.timeout: if no data could be sent before the timeout set by `settimeout`. .. versionadded:: 1.1 """ size = len(s) self.lock.acquire() try: size = self._wait_for_send_window(size) if size == 0: # eof or similar return 0 m = Message() m.add_byte(cMSG_CHANNEL_EXTENDED_DATA) m.add_int(self.remote_chanid) m.add_int(1) m.add_string(s[:size]) finally: self.lock.release() # Note: We release self.lock before calling _send_user_message. # Otherwise, we can deadlock during re-keying. self.transport._send_user_message(m) return size
[ "def", "send_stderr", "(", "self", ",", "s", ")", ":", "size", "=", "len", "(", "s", ")", "self", ".", "lock", ".", "acquire", "(", ")", "try", ":", "size", "=", "self", ".", "_wait_for_send_window", "(", "size", ")", "if", "size", "==", "0", ":", "# eof or similar", "return", "0", "m", "=", "Message", "(", ")", "m", ".", "add_byte", "(", "cMSG_CHANNEL_EXTENDED_DATA", ")", "m", ".", "add_int", "(", "self", ".", "remote_chanid", ")", "m", ".", "add_int", "(", "1", ")", "m", ".", "add_string", "(", "s", "[", ":", "size", "]", ")", "finally", ":", "self", ".", "lock", ".", "release", "(", ")", "# Note: We release self.lock before calling _send_user_message.", "# Otherwise, we can deadlock during re-keying.", "self", ".", "transport", ".", "_send_user_message", "(", "m", ")", "return", "size" ]
Send data to the channel on the "stderr" stream. This is normally only used by servers to send output from shell commands -- clients won't use this. Returns the number of bytes sent, or 0 if the channel stream is closed. Applications are responsible for checking that all data has been sent: if only some of the data was transmitted, the application needs to attempt delivery of the remaining data. :param str s: data to send. :return: number of bytes actually sent, as an `int`. :raises socket.timeout: if no data could be sent before the timeout set by `settimeout`. .. versionadded:: 1.1
[ "Send", "data", "to", "the", "channel", "on", "the", "stderr", "stream", ".", "This", "is", "normally", "only", "used", "by", "servers", "to", "send", "output", "from", "shell", "commands", "--", "clients", "won", "t", "use", "this", ".", "Returns", "the", "number", "of", "bytes", "sent", "or", "0", "if", "the", "channel", "stream", "is", "closed", ".", "Applications", "are", "responsible", "for", "checking", "that", "all", "data", "has", "been", "sent", ":", "if", "only", "some", "of", "the", "data", "was", "transmitted", "the", "application", "needs", "to", "attempt", "delivery", "of", "the", "remaining", "data", ".", ":", "param", "str", "s", ":", "data", "to", "send", ".", ":", "return", ":", "number", "of", "bytes", "actually", "sent", "as", "an", "int", ".", ":", "raises", "socket", ".", "timeout", ":", "if", "no", "data", "could", "be", "sent", "before", "the", "timeout", "set", "by", "settimeout", ".", "..", "versionadded", "::", "1", ".", "1" ]
python
train
20tab/twentytab-treeeditor
treeeditor/admin.py
https://github.com/20tab/twentytab-treeeditor/blob/f89d459b1348961880cd488df95690e68529f96b/treeeditor/admin.py#L96-L115
def ajax_editable_boolean(attr, short_description): """ Convenience function: Assign the return value of this method to a variable of your ModelAdmin class and put the variable name into list_display. Example:: class MyTreeEditor(TreeEditor): list_display = ('__unicode__', 'active_toggle') active_toggle = ajax_editable_boolean('active', _('is active')) """ def _fn(self, item): return ajax_editable_boolean_cell(item, attr) _fn.allow_tags = True _fn.short_description = short_description _fn.editable_boolean_field = attr return _fn
[ "def", "ajax_editable_boolean", "(", "attr", ",", "short_description", ")", ":", "def", "_fn", "(", "self", ",", "item", ")", ":", "return", "ajax_editable_boolean_cell", "(", "item", ",", "attr", ")", "_fn", ".", "allow_tags", "=", "True", "_fn", ".", "short_description", "=", "short_description", "_fn", ".", "editable_boolean_field", "=", "attr", "return", "_fn" ]
Convenience function: Assign the return value of this method to a variable of your ModelAdmin class and put the variable name into list_display. Example:: class MyTreeEditor(TreeEditor): list_display = ('__unicode__', 'active_toggle') active_toggle = ajax_editable_boolean('active', _('is active'))
[ "Convenience", "function", ":", "Assign", "the", "return", "value", "of", "this", "method", "to", "a", "variable", "of", "your", "ModelAdmin", "class", "and", "put", "the", "variable", "name", "into", "list_display", "." ]
python
test
Kitware/tangelo
tangelo/tangelo/pkgdata/plugin/watch/python/__init__.py
https://github.com/Kitware/tangelo/blob/470034ee9b3d7a01becc1ce5fddc7adc1d5263ef/tangelo/tangelo/pkgdata/plugin/watch/python/__init__.py#L237-L282
def watch_module_cache_get(cache, module): """ When we ask to fetch a module with optional config file, check time stamps and dependencies to determine if it should be reloaded or not. :param cache: the cache object that stores whether to check for config files and which files have been loaded. :param module: the path of the module to load. :returns: the loaded module. """ imp.acquire_lock() try: if not hasattr(cache, "timestamps"): cache.timestamps = {} mtime = os.path.getmtime(module) mtime = latest_submodule_time(module, mtime) if getattr(cache, "config", False): config_file = module[:-2] + "yaml" if os.path.exists(config_file): # Our timestamp is the latest time of the config file or the # module. mtime = max(mtime, os.path.getmtime(config_file)) # If we have a config file and the timestamp is more recent than # the recorded timestamp, remove the config file from the list of # loaded files so that it will get loaded again. if config_file in cache.config_files and mtime > cache.timestamps.get(module, 0): del cache.config_files[config_file] tangelo.log("WATCH", "Asking to reload config file %s" % config_file) # If the timestamp is more recent than the recorded value, remove the # the module from our records so that it will be loaded again. if module in cache.modules and mtime > cache.timestamps.get(module, 0): del cache.modules[module] tangelo.log("WATCH", "Asking to reload module %s" % module) if module not in cache.timestamps: tangelo.log_info("WATCH", "Monitoring module %s" % module) reload_recent_submodules(module, mtime) cache.timestamps[module] = mtime service = tangelo_module_cache_get(cache, module) # Update our time based on all the modules that we may have just # imported. The times can change from before because python files are # compiled, for instance. mtime = latest_submodule_time(module, mtime) cache.timestamps[module] = mtime finally: imp.release_lock() return service
[ "def", "watch_module_cache_get", "(", "cache", ",", "module", ")", ":", "imp", ".", "acquire_lock", "(", ")", "try", ":", "if", "not", "hasattr", "(", "cache", ",", "\"timestamps\"", ")", ":", "cache", ".", "timestamps", "=", "{", "}", "mtime", "=", "os", ".", "path", ".", "getmtime", "(", "module", ")", "mtime", "=", "latest_submodule_time", "(", "module", ",", "mtime", ")", "if", "getattr", "(", "cache", ",", "\"config\"", ",", "False", ")", ":", "config_file", "=", "module", "[", ":", "-", "2", "]", "+", "\"yaml\"", "if", "os", ".", "path", ".", "exists", "(", "config_file", ")", ":", "# Our timestamp is the latest time of the config file or the", "# module.", "mtime", "=", "max", "(", "mtime", ",", "os", ".", "path", ".", "getmtime", "(", "config_file", ")", ")", "# If we have a config file and the timestamp is more recent than", "# the recorded timestamp, remove the config file from the list of", "# loaded files so that it will get loaded again.", "if", "config_file", "in", "cache", ".", "config_files", "and", "mtime", ">", "cache", ".", "timestamps", ".", "get", "(", "module", ",", "0", ")", ":", "del", "cache", ".", "config_files", "[", "config_file", "]", "tangelo", ".", "log", "(", "\"WATCH\"", ",", "\"Asking to reload config file %s\"", "%", "config_file", ")", "# If the timestamp is more recent than the recorded value, remove the", "# the module from our records so that it will be loaded again.", "if", "module", "in", "cache", ".", "modules", "and", "mtime", ">", "cache", ".", "timestamps", ".", "get", "(", "module", ",", "0", ")", ":", "del", "cache", ".", "modules", "[", "module", "]", "tangelo", ".", "log", "(", "\"WATCH\"", ",", "\"Asking to reload module %s\"", "%", "module", ")", "if", "module", "not", "in", "cache", ".", "timestamps", ":", "tangelo", ".", "log_info", "(", "\"WATCH\"", ",", "\"Monitoring module %s\"", "%", "module", ")", "reload_recent_submodules", "(", "module", ",", "mtime", ")", "cache", ".", "timestamps", "[", "module", "]", "=", "mtime", "service", "=", "tangelo_module_cache_get", "(", "cache", ",", "module", ")", "# Update our time based on all the modules that we may have just", "# imported. The times can change from before because python files are", "# compiled, for instance.", "mtime", "=", "latest_submodule_time", "(", "module", ",", "mtime", ")", "cache", ".", "timestamps", "[", "module", "]", "=", "mtime", "finally", ":", "imp", ".", "release_lock", "(", ")", "return", "service" ]
When we ask to fetch a module with optional config file, check time stamps and dependencies to determine if it should be reloaded or not. :param cache: the cache object that stores whether to check for config files and which files have been loaded. :param module: the path of the module to load. :returns: the loaded module.
[ "When", "we", "ask", "to", "fetch", "a", "module", "with", "optional", "config", "file", "check", "time", "stamps", "and", "dependencies", "to", "determine", "if", "it", "should", "be", "reloaded", "or", "not", "." ]
python
train
gwastro/pycbc
pycbc/inference/models/gaussian_noise.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/inference/models/gaussian_noise.py#L557-L587
def low_frequency_cutoff_from_config(cp): """Gets the low frequency cutoff from the given config file. This looks for ``low-frequency-cutoff`` in the ``[model]`` section and casts it to float. If none is found, or the casting to float fails, an error is raised. Parameters ---------- cp : WorkflowConfigParser Config file parser to read. Returns ------- float : The low frequency cutoff. """ try: low_frequency_cutoff = float( cp.get('model', 'low-frequency-cutoff')) except (NoOptionError, NoSectionError) as e: logging.warning("Low frequency cutoff for calculation of inner " "product needs to be specified in config file " "under section 'model'") raise e except Exception as e: # everything the float() can throw logging.warning("Low frequency cutoff could not be " "converted to float ") raise e return low_frequency_cutoff
[ "def", "low_frequency_cutoff_from_config", "(", "cp", ")", ":", "try", ":", "low_frequency_cutoff", "=", "float", "(", "cp", ".", "get", "(", "'model'", ",", "'low-frequency-cutoff'", ")", ")", "except", "(", "NoOptionError", ",", "NoSectionError", ")", "as", "e", ":", "logging", ".", "warning", "(", "\"Low frequency cutoff for calculation of inner \"", "\"product needs to be specified in config file \"", "\"under section 'model'\"", ")", "raise", "e", "except", "Exception", "as", "e", ":", "# everything the float() can throw", "logging", ".", "warning", "(", "\"Low frequency cutoff could not be \"", "\"converted to float \"", ")", "raise", "e", "return", "low_frequency_cutoff" ]
Gets the low frequency cutoff from the given config file. This looks for ``low-frequency-cutoff`` in the ``[model]`` section and casts it to float. If none is found, or the casting to float fails, an error is raised. Parameters ---------- cp : WorkflowConfigParser Config file parser to read. Returns ------- float : The low frequency cutoff.
[ "Gets", "the", "low", "frequency", "cutoff", "from", "the", "given", "config", "file", "." ]
python
train
Erotemic/utool
utool/experimental/euler_tour_tree_avl.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/experimental/euler_tour_tree_avl.py#L382-L393
def _traverse_nodes(self): """ Debugging function (exposes cython nodes as dummy nodes) """ node = self.root stack = [] while stack or node is not None: if node is not None: stack.append(node) node = node.left else: node = stack.pop() yield node node = node.right
[ "def", "_traverse_nodes", "(", "self", ")", ":", "node", "=", "self", ".", "root", "stack", "=", "[", "]", "while", "stack", "or", "node", "is", "not", "None", ":", "if", "node", "is", "not", "None", ":", "stack", ".", "append", "(", "node", ")", "node", "=", "node", ".", "left", "else", ":", "node", "=", "stack", ".", "pop", "(", ")", "yield", "node", "node", "=", "node", ".", "right" ]
Debugging function (exposes cython nodes as dummy nodes)
[ "Debugging", "function", "(", "exposes", "cython", "nodes", "as", "dummy", "nodes", ")" ]
python
train
HewlettPackard/python-hpOneView
hpOneView/resources/task_monitor.py
https://github.com/HewlettPackard/python-hpOneView/blob/3c6219723ef25e6e0c83d44a89007f89bc325b89/hpOneView/resources/task_monitor.py#L70-L90
def wait_for_task(self, task, timeout=-1): """ Wait for task execution and return associated resource. Args: task: task dict timeout: timeout in seconds Returns: Associated resource when creating or updating; True when deleting. """ self.__wait_task_completion(task, timeout) task = self.get(task) logger.debug("Waiting for task. Percentage complete: " + str(task.get('computedPercentComplete'))) logger.debug("Waiting for task. Task state: " + str(task.get('taskState'))) task_response = self.__get_task_response(task) logger.debug('Task completed') return task_response
[ "def", "wait_for_task", "(", "self", ",", "task", ",", "timeout", "=", "-", "1", ")", ":", "self", ".", "__wait_task_completion", "(", "task", ",", "timeout", ")", "task", "=", "self", ".", "get", "(", "task", ")", "logger", ".", "debug", "(", "\"Waiting for task. Percentage complete: \"", "+", "str", "(", "task", ".", "get", "(", "'computedPercentComplete'", ")", ")", ")", "logger", ".", "debug", "(", "\"Waiting for task. Task state: \"", "+", "str", "(", "task", ".", "get", "(", "'taskState'", ")", ")", ")", "task_response", "=", "self", ".", "__get_task_response", "(", "task", ")", "logger", ".", "debug", "(", "'Task completed'", ")", "return", "task_response" ]
Wait for task execution and return associated resource. Args: task: task dict timeout: timeout in seconds Returns: Associated resource when creating or updating; True when deleting.
[ "Wait", "for", "task", "execution", "and", "return", "associated", "resource", "." ]
python
train
ldo/dbussy
dbussy.py
https://github.com/ldo/dbussy/blob/59e4fbe8b8111ceead884e50d1973901a0a2d240/dbussy.py#L1481-L1488
def get_version() : "returns the libdbus library version as a tuple of integers (major, minor, micro)." major = ct.c_int() minor = ct.c_int() micro = ct.c_int() dbus.dbus_get_version(ct.byref(major), ct.byref(minor), ct.byref(micro)) return \ (major.value, minor.value, micro.value)
[ "def", "get_version", "(", ")", ":", "major", "=", "ct", ".", "c_int", "(", ")", "minor", "=", "ct", ".", "c_int", "(", ")", "micro", "=", "ct", ".", "c_int", "(", ")", "dbus", ".", "dbus_get_version", "(", "ct", ".", "byref", "(", "major", ")", ",", "ct", ".", "byref", "(", "minor", ")", ",", "ct", ".", "byref", "(", "micro", ")", ")", "return", "(", "major", ".", "value", ",", "minor", ".", "value", ",", "micro", ".", "value", ")" ]
returns the libdbus library version as a tuple of integers (major, minor, micro).
[ "returns", "the", "libdbus", "library", "version", "as", "a", "tuple", "of", "integers", "(", "major", "minor", "micro", ")", "." ]
python
train
westurner/pyrpo
pyrpo/pyrpo.py
https://github.com/westurner/pyrpo/blob/2a910af055dc405b761571a52ef87842397ddadf/pyrpo/pyrpo.py#L1905-L1933
def do_repo_report(repos, report='full', output=sys.stdout, *args, **kwargs): """ Do a repository report: call the report function for each Repository Args: repos (iterable): iterable of Repository instances report (string): report name output (writeable): output stream to print to Yields: Repository subclass """ for i, repo in enumerate(repos): log.debug(str((i, next(repo.origin_report())))) try: if repo is not None: reportfunc = REPORT_TYPES.get(report) if reportfunc is None: raise Exception("Unrecognized report type: %r (%s)" % (report, ', '.join(REPORT_TYPES.keys()))) for l in reportfunc(repo, *args, **kwargs): print(l, file=output) except Exception as e: log.error(repo) log.error(report) log.error(e) raise yield repo
[ "def", "do_repo_report", "(", "repos", ",", "report", "=", "'full'", ",", "output", "=", "sys", ".", "stdout", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "for", "i", ",", "repo", "in", "enumerate", "(", "repos", ")", ":", "log", ".", "debug", "(", "str", "(", "(", "i", ",", "next", "(", "repo", ".", "origin_report", "(", ")", ")", ")", ")", ")", "try", ":", "if", "repo", "is", "not", "None", ":", "reportfunc", "=", "REPORT_TYPES", ".", "get", "(", "report", ")", "if", "reportfunc", "is", "None", ":", "raise", "Exception", "(", "\"Unrecognized report type: %r (%s)\"", "%", "(", "report", ",", "', '", ".", "join", "(", "REPORT_TYPES", ".", "keys", "(", ")", ")", ")", ")", "for", "l", "in", "reportfunc", "(", "repo", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "print", "(", "l", ",", "file", "=", "output", ")", "except", "Exception", "as", "e", ":", "log", ".", "error", "(", "repo", ")", "log", ".", "error", "(", "report", ")", "log", ".", "error", "(", "e", ")", "raise", "yield", "repo" ]
Do a repository report: call the report function for each Repository Args: repos (iterable): iterable of Repository instances report (string): report name output (writeable): output stream to print to Yields: Repository subclass
[ "Do", "a", "repository", "report", ":", "call", "the", "report", "function", "for", "each", "Repository" ]
python
train
jbittel/django-mama-cas
mama_cas/models.py
https://github.com/jbittel/django-mama-cas/blob/03935d97442b46d8127ab9e1cd8deb96953fe156/mama_cas/models.py#L58-L66
def create_ticket_str(self, prefix=None): """ Generate a sufficiently opaque ticket string to ensure the ticket is not guessable. If a prefix is provided, prepend it to the string. """ if not prefix: prefix = self.model.TICKET_PREFIX return "%s-%d-%s" % (prefix, int(time.time()), get_random_string(length=self.model.TICKET_RAND_LEN))
[ "def", "create_ticket_str", "(", "self", ",", "prefix", "=", "None", ")", ":", "if", "not", "prefix", ":", "prefix", "=", "self", ".", "model", ".", "TICKET_PREFIX", "return", "\"%s-%d-%s\"", "%", "(", "prefix", ",", "int", "(", "time", ".", "time", "(", ")", ")", ",", "get_random_string", "(", "length", "=", "self", ".", "model", ".", "TICKET_RAND_LEN", ")", ")" ]
Generate a sufficiently opaque ticket string to ensure the ticket is not guessable. If a prefix is provided, prepend it to the string.
[ "Generate", "a", "sufficiently", "opaque", "ticket", "string", "to", "ensure", "the", "ticket", "is", "not", "guessable", ".", "If", "a", "prefix", "is", "provided", "prepend", "it", "to", "the", "string", "." ]
python
train
ascribe/pyspool
spool/spoolverb.py
https://github.com/ascribe/pyspool/blob/f8b10df1e7d2ea7950dde433c1cb6d5225112f4f/spool/spoolverb.py#L72-L117
def from_verb(cls, verb): """ Constructs a :class:`Spoolverb` instance from the string representation of the given verb. Args: verb (str): representation of the verb e.g.: ``'ASCRIBESPOOL01LOAN12/150526150528'``. Can also be in binary format (:obj:`bytes`): ``b'ASCRIBESPOOL01PIECE'``. Returns: :class:`Spoolverb` instance. """ pattern = r'^(?P<meta>[A-Z]+)(?P<version>\d+)(?P<action>[A-Z]+)(?P<arg1>\d+)?(\/(?P<arg2>\d+))?$' try: verb = verb.decode() except AttributeError: pass match = re.match(pattern, verb) if not match: raise SpoolverbError('Invalid spoolverb: {}'.format(verb)) data = match.groupdict() meta = data['meta'] version = data['version'] action = data['action'] if action == 'EDITIONS': num_editions = data['arg1'] return cls(meta=meta, version=version, action=action, num_editions=int(num_editions)) elif action == 'LOAN': # TODO Review. Workaround for piece loans try: edition_num = int(data['arg1']) except TypeError: edition_num = 0 loan_start = data['arg2'][:6] loan_end = data['arg2'][6:] return cls(meta=meta, version=version, action=action, edition_num=int(edition_num), loan_start=loan_start, loan_end=loan_end) elif action in ['FUEL', 'PIECE', 'CONSIGNEDREGISTRATION']: # no edition number for these verbs return cls(meta=meta, version=version, action=action) else: edition_num = data['arg1'] return cls(meta=meta, version=version, action=action, edition_num=int(edition_num))
[ "def", "from_verb", "(", "cls", ",", "verb", ")", ":", "pattern", "=", "r'^(?P<meta>[A-Z]+)(?P<version>\\d+)(?P<action>[A-Z]+)(?P<arg1>\\d+)?(\\/(?P<arg2>\\d+))?$'", "try", ":", "verb", "=", "verb", ".", "decode", "(", ")", "except", "AttributeError", ":", "pass", "match", "=", "re", ".", "match", "(", "pattern", ",", "verb", ")", "if", "not", "match", ":", "raise", "SpoolverbError", "(", "'Invalid spoolverb: {}'", ".", "format", "(", "verb", ")", ")", "data", "=", "match", ".", "groupdict", "(", ")", "meta", "=", "data", "[", "'meta'", "]", "version", "=", "data", "[", "'version'", "]", "action", "=", "data", "[", "'action'", "]", "if", "action", "==", "'EDITIONS'", ":", "num_editions", "=", "data", "[", "'arg1'", "]", "return", "cls", "(", "meta", "=", "meta", ",", "version", "=", "version", ",", "action", "=", "action", ",", "num_editions", "=", "int", "(", "num_editions", ")", ")", "elif", "action", "==", "'LOAN'", ":", "# TODO Review. Workaround for piece loans", "try", ":", "edition_num", "=", "int", "(", "data", "[", "'arg1'", "]", ")", "except", "TypeError", ":", "edition_num", "=", "0", "loan_start", "=", "data", "[", "'arg2'", "]", "[", ":", "6", "]", "loan_end", "=", "data", "[", "'arg2'", "]", "[", "6", ":", "]", "return", "cls", "(", "meta", "=", "meta", ",", "version", "=", "version", ",", "action", "=", "action", ",", "edition_num", "=", "int", "(", "edition_num", ")", ",", "loan_start", "=", "loan_start", ",", "loan_end", "=", "loan_end", ")", "elif", "action", "in", "[", "'FUEL'", ",", "'PIECE'", ",", "'CONSIGNEDREGISTRATION'", "]", ":", "# no edition number for these verbs", "return", "cls", "(", "meta", "=", "meta", ",", "version", "=", "version", ",", "action", "=", "action", ")", "else", ":", "edition_num", "=", "data", "[", "'arg1'", "]", "return", "cls", "(", "meta", "=", "meta", ",", "version", "=", "version", ",", "action", "=", "action", ",", "edition_num", "=", "int", "(", "edition_num", ")", ")" ]
Constructs a :class:`Spoolverb` instance from the string representation of the given verb. Args: verb (str): representation of the verb e.g.: ``'ASCRIBESPOOL01LOAN12/150526150528'``. Can also be in binary format (:obj:`bytes`): ``b'ASCRIBESPOOL01PIECE'``. Returns: :class:`Spoolverb` instance.
[ "Constructs", "a", ":", "class", ":", "Spoolverb", "instance", "from", "the", "string", "representation", "of", "the", "given", "verb", "." ]
python
train
python-xlib/python-xlib
Xlib/protocol/rq.py
https://github.com/python-xlib/python-xlib/blob/8901e831737e79fe5645f48089d70e1d1046d2f2/Xlib/protocol/rq.py#L1092-L1133
def parse_value(self, val, display, rawdict = 0): """This function is used by List and Object fields to convert Struct objects with no var_fields into Python values. """ ret = {} vno = 0 for f in self.static_fields: # Fields without names should be ignored, and there should # not be any length or format fields if this function # ever gets called. (If there were such fields, there should # be a matching field in var_fields and then parse_binary # would have been called instead. if not f.name: pass elif isinstance(f, LengthField): pass elif isinstance(f, FormatField): pass # Value fields else: # If this field has a parse_value method, call it, otherwise # use the unpacked value as is. if f.structvalues == 1: field_val = val[vno] else: field_val = val[vno:vno+f.structvalues] if f.parse_value is not None: field_val = f.parse_value(field_val, display, rawdict=rawdict) ret[f.name] = field_val vno = vno + f.structvalues if not rawdict: return DictWrapper(ret) return ret
[ "def", "parse_value", "(", "self", ",", "val", ",", "display", ",", "rawdict", "=", "0", ")", ":", "ret", "=", "{", "}", "vno", "=", "0", "for", "f", "in", "self", ".", "static_fields", ":", "# Fields without names should be ignored, and there should", "# not be any length or format fields if this function", "# ever gets called. (If there were such fields, there should", "# be a matching field in var_fields and then parse_binary", "# would have been called instead.", "if", "not", "f", ".", "name", ":", "pass", "elif", "isinstance", "(", "f", ",", "LengthField", ")", ":", "pass", "elif", "isinstance", "(", "f", ",", "FormatField", ")", ":", "pass", "# Value fields", "else", ":", "# If this field has a parse_value method, call it, otherwise", "# use the unpacked value as is.", "if", "f", ".", "structvalues", "==", "1", ":", "field_val", "=", "val", "[", "vno", "]", "else", ":", "field_val", "=", "val", "[", "vno", ":", "vno", "+", "f", ".", "structvalues", "]", "if", "f", ".", "parse_value", "is", "not", "None", ":", "field_val", "=", "f", ".", "parse_value", "(", "field_val", ",", "display", ",", "rawdict", "=", "rawdict", ")", "ret", "[", "f", ".", "name", "]", "=", "field_val", "vno", "=", "vno", "+", "f", ".", "structvalues", "if", "not", "rawdict", ":", "return", "DictWrapper", "(", "ret", ")", "return", "ret" ]
This function is used by List and Object fields to convert Struct objects with no var_fields into Python values.
[ "This", "function", "is", "used", "by", "List", "and", "Object", "fields", "to", "convert", "Struct", "objects", "with", "no", "var_fields", "into", "Python", "values", "." ]
python
train