repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
DistrictDataLabs/yellowbrick
yellowbrick/classifier/rocauc.py
https://github.com/DistrictDataLabs/yellowbrick/blob/59b67236a3862c73363e8edad7cd86da5b69e3b2/yellowbrick/classifier/rocauc.py#L296-L316
def finalize(self, **kwargs): """ Finalize executes any subclass-specific axes finalization steps. The user calls poof and poof calls finalize. Parameters ---------- kwargs: generic keyword arguments. """ # Set the title and add the legend self.set_title('ROC Curves for {}'.format(self.name)) self.ax.legend(loc='lower right', frameon=True) # Set the limits for the ROC/AUC (always between 0 and 1) self.ax.set_xlim([0.0, 1.0]) self.ax.set_ylim([0.0, 1.0]) # Set x and y axis labels self.ax.set_ylabel('True Postive Rate') self.ax.set_xlabel('False Positive Rate')
[ "def", "finalize", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# Set the title and add the legend", "self", ".", "set_title", "(", "'ROC Curves for {}'", ".", "format", "(", "self", ".", "name", ")", ")", "self", ".", "ax", ".", "legend", "(", "loc", "=", "'lower right'", ",", "frameon", "=", "True", ")", "# Set the limits for the ROC/AUC (always between 0 and 1)", "self", ".", "ax", ".", "set_xlim", "(", "[", "0.0", ",", "1.0", "]", ")", "self", ".", "ax", ".", "set_ylim", "(", "[", "0.0", ",", "1.0", "]", ")", "# Set x and y axis labels", "self", ".", "ax", ".", "set_ylabel", "(", "'True Postive Rate'", ")", "self", ".", "ax", ".", "set_xlabel", "(", "'False Positive Rate'", ")" ]
Finalize executes any subclass-specific axes finalization steps. The user calls poof and poof calls finalize. Parameters ---------- kwargs: generic keyword arguments.
[ "Finalize", "executes", "any", "subclass", "-", "specific", "axes", "finalization", "steps", ".", "The", "user", "calls", "poof", "and", "poof", "calls", "finalize", "." ]
python
train
maxalbert/tohu
tohu/v2/custom_generator.py
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v2/custom_generator.py#L195-L225
def add_new_reset_method(obj): """ Attach a new `reset()` method to `obj` which resets the internal seed generator of `obj` and then resets each of its constituent field generators found in `obj.field_gens`. """ # # Create and assign automatically generated reset() method # def new_reset(self, seed=None): logger.debug(f'[EEE] Inside automatically generated reset() method for {self} (seed={seed})') if seed is not None: self.seed_generator.reset(seed) for name, gen in self.field_gens.items(): next_seed = next(self.seed_generator) gen.reset(next_seed) # TODO: the following should be covered by the newly added # reset() method in IndependentGeneratorMeta. However, for # some reason we can't call this via the usual `orig_reset()` # pattern, so we have to duplicate this here. Not ideal... for c in self._dependent_generators: c.reset_dependent_generator(seed) return self obj.reset = new_reset
[ "def", "add_new_reset_method", "(", "obj", ")", ":", "#", "# Create and assign automatically generated reset() method", "#", "def", "new_reset", "(", "self", ",", "seed", "=", "None", ")", ":", "logger", ".", "debug", "(", "f'[EEE] Inside automatically generated reset() method for {self} (seed={seed})'", ")", "if", "seed", "is", "not", "None", ":", "self", ".", "seed_generator", ".", "reset", "(", "seed", ")", "for", "name", ",", "gen", "in", "self", ".", "field_gens", ".", "items", "(", ")", ":", "next_seed", "=", "next", "(", "self", ".", "seed_generator", ")", "gen", ".", "reset", "(", "next_seed", ")", "# TODO: the following should be covered by the newly added", "# reset() method in IndependentGeneratorMeta. However, for", "# some reason we can't call this via the usual `orig_reset()`", "# pattern, so we have to duplicate this here. Not ideal...", "for", "c", "in", "self", ".", "_dependent_generators", ":", "c", ".", "reset_dependent_generator", "(", "seed", ")", "return", "self", "obj", ".", "reset", "=", "new_reset" ]
Attach a new `reset()` method to `obj` which resets the internal seed generator of `obj` and then resets each of its constituent field generators found in `obj.field_gens`.
[ "Attach", "a", "new", "reset", "()", "method", "to", "obj", "which", "resets", "the", "internal", "seed", "generator", "of", "obj", "and", "then", "resets", "each", "of", "its", "constituent", "field", "generators", "found", "in", "obj", ".", "field_gens", "." ]
python
train
bjodah/pycompilation
pycompilation/util.py
https://github.com/bjodah/pycompilation/blob/43eac8d82f8258d30d4df77fd2ad3f3e4f4dca18/pycompilation/util.py#L336-L353
def pyx_is_cplus(path): """ Inspect a Cython source file (.pyx) and look for comment line like: # distutils: language = c++ Returns True if such a file is present in the file, else False. """ for line in open(path, 'rt'): if line.startswith('#') and '=' in line: splitted = line.split('=') if len(splitted) != 2: continue lhs, rhs = splitted if lhs.strip().split()[-1].lower() == 'language' and \ rhs.strip().split()[0].lower() == 'c++': return True return False
[ "def", "pyx_is_cplus", "(", "path", ")", ":", "for", "line", "in", "open", "(", "path", ",", "'rt'", ")", ":", "if", "line", ".", "startswith", "(", "'#'", ")", "and", "'='", "in", "line", ":", "splitted", "=", "line", ".", "split", "(", "'='", ")", "if", "len", "(", "splitted", ")", "!=", "2", ":", "continue", "lhs", ",", "rhs", "=", "splitted", "if", "lhs", ".", "strip", "(", ")", ".", "split", "(", ")", "[", "-", "1", "]", ".", "lower", "(", ")", "==", "'language'", "and", "rhs", ".", "strip", "(", ")", ".", "split", "(", ")", "[", "0", "]", ".", "lower", "(", ")", "==", "'c++'", ":", "return", "True", "return", "False" ]
Inspect a Cython source file (.pyx) and look for comment line like: # distutils: language = c++ Returns True if such a file is present in the file, else False.
[ "Inspect", "a", "Cython", "source", "file", "(", ".", "pyx", ")", "and", "look", "for", "comment", "line", "like", ":" ]
python
train
archman/beamline
beamline/element.py
https://github.com/archman/beamline/blob/417bc5dc13e754bc89d246427984590fced64d07/beamline/element.py#L1066-L1162
def setDraw(self, p0=(0, 0), angle=0, mode='plain'): """ set element visualization drawing :param p0: start drawing position, (x,y) :param angle: angle [deg] between x-axis, angle is rotating from x-axis to be '+' or '-', '+': clockwise, '-': anticlockwise :param mode: artist mode, 'plain' or 'fancy', 'plain' by default """ sconf = self.getConfig(type='simu') if 'l' in sconf: self._style['length'] = float(sconf['l']) else: self._style['length'] = 0 self._style['angle'] = angle _theta = angle / 180.0 * np.pi # deg to rad _length = self._style['length'] _lw = self._style['lw'] _fancyc = self._style['color'] _alpha = self._style['alpha'] _plainc = MagBlock._MagBlock__styleconfig_dict['drift']['color'] # # | # --p0--p1-- # | # if mode == 'plain': x0, y0 = p0 x1, y1 = x0 + _length, y0 + _length * np.tan(_theta) pc = x0 + 0.5 * _length, (y0 + y1) * 0.5 vs = [(x0, y0), (x1, y1)] cs = [Path.MOVETO, Path.LINETO] pth = Path(vs, cs) ptch = patches.PathPatch(pth, lw=_lw, fc=_plainc, ec=_plainc, alpha=_alpha) self._patches = [] self._patches.append(ptch) self.next_p0 = x1, y1 self.next_inc_angle = 0 else: # fancy mode, same as plain, could be more fancy(Apr.08, 2016) x0, y0 = p0 # x1, y1 = x0 + _length, y0 + _length * np.tan(_theta) # pc = x0 + 0.5*_length, (y0 + y1)*0.5 x1, y1 = x0, y0 + 0.5 * _length x2, y2 = x1 + 1. / 3.0 * _length, y1 x3, y3 = x2 + 1. / 3.0 * _length, y1 x4, y4 = x3 + 1. / 3.0 * _length, y1 x5, y5 = x4, y0 x6, y6 = x5, y5 - 0.5 * _length x7, y7 = x3, y6 x8, y8 = x3, y7 + 1. / 3.0 * _length x9, y9 = x2, y8 x10, y10 = x9, y7 x11, y11 = x0, y7 pc = (x0 + x5) * 0.5, (y0 + y5) * 0.5 verts1 = [ (x0, y0), (x1, y1), (x2, y2), pc, (x3, y3), (x4, y4), (x5, y5), (x6, y6), (x7, y7), (x8, y8), (x9, y9), (x10, y10), (x11, y11), (x0, y0), ] codes1 = [ Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY, ] pth = Path(verts1, codes1) ptch = patches.PathPatch(pth, lw=_lw, fc=_fancyc, ec=_fancyc, alpha=_alpha) self._patches = [] self._patches.append(ptch) # self.next_p0 = x1, y1 self.next_p0 = x5, y5 self.next_inc_angle = 0 self._anote = {'xypos': pc, 'textpos': pc, 'name': self.name.upper(), 'type': self.typename}
[ "def", "setDraw", "(", "self", ",", "p0", "=", "(", "0", ",", "0", ")", ",", "angle", "=", "0", ",", "mode", "=", "'plain'", ")", ":", "sconf", "=", "self", ".", "getConfig", "(", "type", "=", "'simu'", ")", "if", "'l'", "in", "sconf", ":", "self", ".", "_style", "[", "'length'", "]", "=", "float", "(", "sconf", "[", "'l'", "]", ")", "else", ":", "self", ".", "_style", "[", "'length'", "]", "=", "0", "self", ".", "_style", "[", "'angle'", "]", "=", "angle", "_theta", "=", "angle", "/", "180.0", "*", "np", ".", "pi", "# deg to rad", "_length", "=", "self", ".", "_style", "[", "'length'", "]", "_lw", "=", "self", ".", "_style", "[", "'lw'", "]", "_fancyc", "=", "self", ".", "_style", "[", "'color'", "]", "_alpha", "=", "self", ".", "_style", "[", "'alpha'", "]", "_plainc", "=", "MagBlock", ".", "_MagBlock__styleconfig_dict", "[", "'drift'", "]", "[", "'color'", "]", "#", "# |", "# --p0--p1--", "# |", "# ", "if", "mode", "==", "'plain'", ":", "x0", ",", "y0", "=", "p0", "x1", ",", "y1", "=", "x0", "+", "_length", ",", "y0", "+", "_length", "*", "np", ".", "tan", "(", "_theta", ")", "pc", "=", "x0", "+", "0.5", "*", "_length", ",", "(", "y0", "+", "y1", ")", "*", "0.5", "vs", "=", "[", "(", "x0", ",", "y0", ")", ",", "(", "x1", ",", "y1", ")", "]", "cs", "=", "[", "Path", ".", "MOVETO", ",", "Path", ".", "LINETO", "]", "pth", "=", "Path", "(", "vs", ",", "cs", ")", "ptch", "=", "patches", ".", "PathPatch", "(", "pth", ",", "lw", "=", "_lw", ",", "fc", "=", "_plainc", ",", "ec", "=", "_plainc", ",", "alpha", "=", "_alpha", ")", "self", ".", "_patches", "=", "[", "]", "self", ".", "_patches", ".", "append", "(", "ptch", ")", "self", ".", "next_p0", "=", "x1", ",", "y1", "self", ".", "next_inc_angle", "=", "0", "else", ":", "# fancy mode, same as plain, could be more fancy(Apr.08, 2016)", "x0", ",", "y0", "=", "p0", "# x1, y1 = x0 + _length, y0 + _length * np.tan(_theta)", "# pc = x0 + 0.5*_length, (y0 + y1)*0.5", "x1", ",", "y1", "=", "x0", ",", "y0", "+", "0.5", "*", "_length", "x2", ",", "y2", "=", "x1", "+", "1.", "/", "3.0", "*", "_length", ",", "y1", "x3", ",", "y3", "=", "x2", "+", "1.", "/", "3.0", "*", "_length", ",", "y1", "x4", ",", "y4", "=", "x3", "+", "1.", "/", "3.0", "*", "_length", ",", "y1", "x5", ",", "y5", "=", "x4", ",", "y0", "x6", ",", "y6", "=", "x5", ",", "y5", "-", "0.5", "*", "_length", "x7", ",", "y7", "=", "x3", ",", "y6", "x8", ",", "y8", "=", "x3", ",", "y7", "+", "1.", "/", "3.0", "*", "_length", "x9", ",", "y9", "=", "x2", ",", "y8", "x10", ",", "y10", "=", "x9", ",", "y7", "x11", ",", "y11", "=", "x0", ",", "y7", "pc", "=", "(", "x0", "+", "x5", ")", "*", "0.5", ",", "(", "y0", "+", "y5", ")", "*", "0.5", "verts1", "=", "[", "(", "x0", ",", "y0", ")", ",", "(", "x1", ",", "y1", ")", ",", "(", "x2", ",", "y2", ")", ",", "pc", ",", "(", "x3", ",", "y3", ")", ",", "(", "x4", ",", "y4", ")", ",", "(", "x5", ",", "y5", ")", ",", "(", "x6", ",", "y6", ")", ",", "(", "x7", ",", "y7", ")", ",", "(", "x8", ",", "y8", ")", ",", "(", "x9", ",", "y9", ")", ",", "(", "x10", ",", "y10", ")", ",", "(", "x11", ",", "y11", ")", ",", "(", "x0", ",", "y0", ")", ",", "]", "codes1", "=", "[", "Path", ".", "MOVETO", ",", "Path", ".", "LINETO", ",", "Path", ".", "LINETO", ",", "Path", ".", "LINETO", ",", "Path", ".", "LINETO", ",", "Path", ".", "LINETO", ",", "Path", ".", "LINETO", ",", "Path", ".", "LINETO", ",", "Path", ".", "LINETO", ",", "Path", ".", "LINETO", ",", "Path", ".", "LINETO", ",", "Path", ".", "LINETO", ",", "Path", ".", "LINETO", ",", "Path", ".", "CLOSEPOLY", ",", "]", "pth", "=", "Path", "(", "verts1", ",", "codes1", ")", "ptch", "=", "patches", ".", "PathPatch", "(", "pth", ",", "lw", "=", "_lw", ",", "fc", "=", "_fancyc", ",", "ec", "=", "_fancyc", ",", "alpha", "=", "_alpha", ")", "self", ".", "_patches", "=", "[", "]", "self", ".", "_patches", ".", "append", "(", "ptch", ")", "# self.next_p0 = x1, y1", "self", ".", "next_p0", "=", "x5", ",", "y5", "self", ".", "next_inc_angle", "=", "0", "self", ".", "_anote", "=", "{", "'xypos'", ":", "pc", ",", "'textpos'", ":", "pc", ",", "'name'", ":", "self", ".", "name", ".", "upper", "(", ")", ",", "'type'", ":", "self", ".", "typename", "}" ]
set element visualization drawing :param p0: start drawing position, (x,y) :param angle: angle [deg] between x-axis, angle is rotating from x-axis to be '+' or '-', '+': clockwise, '-': anticlockwise :param mode: artist mode, 'plain' or 'fancy', 'plain' by default
[ "set", "element", "visualization", "drawing", ":", "param", "p0", ":", "start", "drawing", "position", "(", "x", "y", ")", ":", "param", "angle", ":", "angle", "[", "deg", "]", "between", "x", "-", "axis", "angle", "is", "rotating", "from", "x", "-", "axis", "to", "be", "+", "or", "-", "+", ":", "clockwise", "-", ":", "anticlockwise", ":", "param", "mode", ":", "artist", "mode", "plain", "or", "fancy", "plain", "by", "default" ]
python
train
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/visuals/collections/util.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/visuals/collections/util.py#L13-L72
def dtype_reduce(dtype, level=0, depth=0): """ Try to reduce dtype up to a given level when it is possible dtype = [ ('vertex', [('x', 'f4'), ('y', 'f4'), ('z', 'f4')]), ('normal', [('x', 'f4'), ('y', 'f4'), ('z', 'f4')]), ('color', [('r', 'f4'), ('g', 'f4'), ('b', 'f4'), ('a', 'f4')])] level 0: ['color,vertex,normal,', 10, 'float32'] level 1: [['color', 4, 'float32'] ['normal', 3, 'float32'] ['vertex', 3, 'float32']] """ dtype = np.dtype(dtype) fields = dtype.fields # No fields if fields is None: if len(dtype.shape): count = reduce(mul, dtype.shape) else: count = 1 # size = dtype.itemsize / count if dtype.subdtype: name = str(dtype.subdtype[0]) else: name = str(dtype) return ['', count, name] else: items = [] name = '' # Get reduced fields for key, value in fields.items(): l = dtype_reduce(value[0], level, depth + 1) if type(l[0]) is str: items.append([key, l[1], l[2]]) else: items.append(l) name += key + ',' # Check if we can reduce item list ctype = None count = 0 for i, item in enumerate(items): # One item is a list, we cannot reduce if type(item[0]) is not str: return items else: if i == 0: ctype = item[2] count += item[1] else: if item[2] != ctype: return items count += item[1] if depth >= level: return [name, count, ctype] else: return items
[ "def", "dtype_reduce", "(", "dtype", ",", "level", "=", "0", ",", "depth", "=", "0", ")", ":", "dtype", "=", "np", ".", "dtype", "(", "dtype", ")", "fields", "=", "dtype", ".", "fields", "# No fields", "if", "fields", "is", "None", ":", "if", "len", "(", "dtype", ".", "shape", ")", ":", "count", "=", "reduce", "(", "mul", ",", "dtype", ".", "shape", ")", "else", ":", "count", "=", "1", "# size = dtype.itemsize / count", "if", "dtype", ".", "subdtype", ":", "name", "=", "str", "(", "dtype", ".", "subdtype", "[", "0", "]", ")", "else", ":", "name", "=", "str", "(", "dtype", ")", "return", "[", "''", ",", "count", ",", "name", "]", "else", ":", "items", "=", "[", "]", "name", "=", "''", "# Get reduced fields", "for", "key", ",", "value", "in", "fields", ".", "items", "(", ")", ":", "l", "=", "dtype_reduce", "(", "value", "[", "0", "]", ",", "level", ",", "depth", "+", "1", ")", "if", "type", "(", "l", "[", "0", "]", ")", "is", "str", ":", "items", ".", "append", "(", "[", "key", ",", "l", "[", "1", "]", ",", "l", "[", "2", "]", "]", ")", "else", ":", "items", ".", "append", "(", "l", ")", "name", "+=", "key", "+", "','", "# Check if we can reduce item list", "ctype", "=", "None", "count", "=", "0", "for", "i", ",", "item", "in", "enumerate", "(", "items", ")", ":", "# One item is a list, we cannot reduce", "if", "type", "(", "item", "[", "0", "]", ")", "is", "not", "str", ":", "return", "items", "else", ":", "if", "i", "==", "0", ":", "ctype", "=", "item", "[", "2", "]", "count", "+=", "item", "[", "1", "]", "else", ":", "if", "item", "[", "2", "]", "!=", "ctype", ":", "return", "items", "count", "+=", "item", "[", "1", "]", "if", "depth", ">=", "level", ":", "return", "[", "name", ",", "count", ",", "ctype", "]", "else", ":", "return", "items" ]
Try to reduce dtype up to a given level when it is possible dtype = [ ('vertex', [('x', 'f4'), ('y', 'f4'), ('z', 'f4')]), ('normal', [('x', 'f4'), ('y', 'f4'), ('z', 'f4')]), ('color', [('r', 'f4'), ('g', 'f4'), ('b', 'f4'), ('a', 'f4')])] level 0: ['color,vertex,normal,', 10, 'float32'] level 1: [['color', 4, 'float32'] ['normal', 3, 'float32'] ['vertex', 3, 'float32']]
[ "Try", "to", "reduce", "dtype", "up", "to", "a", "given", "level", "when", "it", "is", "possible" ]
python
train
ethereum/eth-account
eth_account/account.py
https://github.com/ethereum/eth-account/blob/335199b815ae34fea87f1523e2f29777fd52946e/eth_account/account.py#L137-L196
def encrypt(cls, private_key, password, kdf=None, iterations=None): ''' Creates a dictionary with an encrypted version of your private key. To import this keyfile into Ethereum clients like geth and parity: encode this dictionary with :func:`json.dumps` and save it to disk where your client keeps key files. :param private_key: The raw private key :type private_key: hex str, bytes, int or :class:`eth_keys.datatypes.PrivateKey` :param str password: The password which you will need to unlock the account in your client :param str kdf: The key derivation function to use when encrypting your private key :param int iterations: The work factor for the key derivation function :returns: The data to use in your encrypted file :rtype: dict .. code-block:: python >>> import getpass >>> encrypted = Account.encrypt( 0xb25c7db31feed9122727bf0939dc769a96564b2de4c4726d035b36ecf1e5b364, getpass.getpass() ) { 'address': '5ce9454909639d2d17a3f753ce7d93fa0b9ab12e', 'crypto': { 'cipher': 'aes-128-ctr', 'cipherparams': { 'iv': '0b7845a5c3597d3d378bde9b7c7319b7' }, 'ciphertext': 'a494f1feb3c854e99c1ff01e6aaa17d43c0752009073503b908457dc8de5d2a5', # noqa: E501 'kdf': 'scrypt', 'kdfparams': { 'dklen': 32, 'n': 262144, 'p': 8, 'r': 1, 'salt': '13c4a48123affaa29189e9097726c698' }, 'mac': 'f4cfb027eb0af9bd7a320b4374a3fa7bef02cfbafe0ec5d1fd7ad129401de0b1' }, 'id': 'a60e0578-0e5b-4a75-b991-d55ec6451a6f', 'version': 3 } >>> with open('my-keyfile', 'w') as f: f.write(json.dumps(encrypted)) ''' if isinstance(private_key, keys.PrivateKey): key_bytes = private_key.to_bytes() else: key_bytes = HexBytes(private_key) if kdf is None: kdf = cls.default_kdf password_bytes = text_if_str(to_bytes, password) assert len(key_bytes) == 32 return create_keyfile_json(key_bytes, password_bytes, kdf=kdf, iterations=iterations)
[ "def", "encrypt", "(", "cls", ",", "private_key", ",", "password", ",", "kdf", "=", "None", ",", "iterations", "=", "None", ")", ":", "if", "isinstance", "(", "private_key", ",", "keys", ".", "PrivateKey", ")", ":", "key_bytes", "=", "private_key", ".", "to_bytes", "(", ")", "else", ":", "key_bytes", "=", "HexBytes", "(", "private_key", ")", "if", "kdf", "is", "None", ":", "kdf", "=", "cls", ".", "default_kdf", "password_bytes", "=", "text_if_str", "(", "to_bytes", ",", "password", ")", "assert", "len", "(", "key_bytes", ")", "==", "32", "return", "create_keyfile_json", "(", "key_bytes", ",", "password_bytes", ",", "kdf", "=", "kdf", ",", "iterations", "=", "iterations", ")" ]
Creates a dictionary with an encrypted version of your private key. To import this keyfile into Ethereum clients like geth and parity: encode this dictionary with :func:`json.dumps` and save it to disk where your client keeps key files. :param private_key: The raw private key :type private_key: hex str, bytes, int or :class:`eth_keys.datatypes.PrivateKey` :param str password: The password which you will need to unlock the account in your client :param str kdf: The key derivation function to use when encrypting your private key :param int iterations: The work factor for the key derivation function :returns: The data to use in your encrypted file :rtype: dict .. code-block:: python >>> import getpass >>> encrypted = Account.encrypt( 0xb25c7db31feed9122727bf0939dc769a96564b2de4c4726d035b36ecf1e5b364, getpass.getpass() ) { 'address': '5ce9454909639d2d17a3f753ce7d93fa0b9ab12e', 'crypto': { 'cipher': 'aes-128-ctr', 'cipherparams': { 'iv': '0b7845a5c3597d3d378bde9b7c7319b7' }, 'ciphertext': 'a494f1feb3c854e99c1ff01e6aaa17d43c0752009073503b908457dc8de5d2a5', # noqa: E501 'kdf': 'scrypt', 'kdfparams': { 'dklen': 32, 'n': 262144, 'p': 8, 'r': 1, 'salt': '13c4a48123affaa29189e9097726c698' }, 'mac': 'f4cfb027eb0af9bd7a320b4374a3fa7bef02cfbafe0ec5d1fd7ad129401de0b1' }, 'id': 'a60e0578-0e5b-4a75-b991-d55ec6451a6f', 'version': 3 } >>> with open('my-keyfile', 'w') as f: f.write(json.dumps(encrypted))
[ "Creates", "a", "dictionary", "with", "an", "encrypted", "version", "of", "your", "private", "key", ".", "To", "import", "this", "keyfile", "into", "Ethereum", "clients", "like", "geth", "and", "parity", ":", "encode", "this", "dictionary", "with", ":", "func", ":", "json", ".", "dumps", "and", "save", "it", "to", "disk", "where", "your", "client", "keeps", "key", "files", "." ]
python
train
theycallmeswift/BreakfastSerial
BreakfastSerial/util.py
https://github.com/theycallmeswift/BreakfastSerial/blob/cb6072f8c200838fc580afe1a04b80bae9509ce8/BreakfastSerial/util.py#L40-L55
def debounce(wait): """ Decorator that will postpone a functions execution until after wait seconds have elapsed since the last time it was invoked. """ def decorator(fn): def debounced(*args, **kwargs): def call_it(): fn(*args, **kwargs) try: debounced.t.cancel() except(AttributeError): pass debounced.t = threading.Timer(wait, call_it) debounced.t.start() return debounced return decorator
[ "def", "debounce", "(", "wait", ")", ":", "def", "decorator", "(", "fn", ")", ":", "def", "debounced", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "def", "call_it", "(", ")", ":", "fn", "(", "*", "args", ",", "*", "*", "kwargs", ")", "try", ":", "debounced", ".", "t", ".", "cancel", "(", ")", "except", "(", "AttributeError", ")", ":", "pass", "debounced", ".", "t", "=", "threading", ".", "Timer", "(", "wait", ",", "call_it", ")", "debounced", ".", "t", ".", "start", "(", ")", "return", "debounced", "return", "decorator" ]
Decorator that will postpone a functions execution until after wait seconds have elapsed since the last time it was invoked.
[ "Decorator", "that", "will", "postpone", "a", "functions", "execution", "until", "after", "wait", "seconds", "have", "elapsed", "since", "the", "last", "time", "it", "was", "invoked", "." ]
python
train
scanny/python-pptx
pptx/table.py
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/table.py#L260-L289
def merge(self, other_cell): """Create merged cell from this cell to *other_cell*. This cell and *other_cell* specify opposite corners of the merged cell range. Either diagonal of the cell region may be specified in either order, e.g. self=bottom-right, other_cell=top-left, etc. Raises |ValueError| if the specified range already contains merged cells anywhere within its extents or if *other_cell* is not in the same table as *self*. """ tc_range = TcRange(self._tc, other_cell._tc) if not tc_range.in_same_table: raise ValueError('other_cell from different table') if tc_range.contains_merged_cell: raise ValueError('range contains one or more merged cells') tc_range.move_content_to_origin() row_count, col_count = tc_range.dimensions for tc in tc_range.iter_top_row_tcs(): tc.rowSpan = row_count for tc in tc_range.iter_left_col_tcs(): tc.gridSpan = col_count for tc in tc_range.iter_except_left_col_tcs(): tc.hMerge = True for tc in tc_range.iter_except_top_row_tcs(): tc.vMerge = True
[ "def", "merge", "(", "self", ",", "other_cell", ")", ":", "tc_range", "=", "TcRange", "(", "self", ".", "_tc", ",", "other_cell", ".", "_tc", ")", "if", "not", "tc_range", ".", "in_same_table", ":", "raise", "ValueError", "(", "'other_cell from different table'", ")", "if", "tc_range", ".", "contains_merged_cell", ":", "raise", "ValueError", "(", "'range contains one or more merged cells'", ")", "tc_range", ".", "move_content_to_origin", "(", ")", "row_count", ",", "col_count", "=", "tc_range", ".", "dimensions", "for", "tc", "in", "tc_range", ".", "iter_top_row_tcs", "(", ")", ":", "tc", ".", "rowSpan", "=", "row_count", "for", "tc", "in", "tc_range", ".", "iter_left_col_tcs", "(", ")", ":", "tc", ".", "gridSpan", "=", "col_count", "for", "tc", "in", "tc_range", ".", "iter_except_left_col_tcs", "(", ")", ":", "tc", ".", "hMerge", "=", "True", "for", "tc", "in", "tc_range", ".", "iter_except_top_row_tcs", "(", ")", ":", "tc", ".", "vMerge", "=", "True" ]
Create merged cell from this cell to *other_cell*. This cell and *other_cell* specify opposite corners of the merged cell range. Either diagonal of the cell region may be specified in either order, e.g. self=bottom-right, other_cell=top-left, etc. Raises |ValueError| if the specified range already contains merged cells anywhere within its extents or if *other_cell* is not in the same table as *self*.
[ "Create", "merged", "cell", "from", "this", "cell", "to", "*", "other_cell", "*", "." ]
python
train
dev-pipeline/dev-pipeline-configure
lib/devpipeline_configure/load.py
https://github.com/dev-pipeline/dev-pipeline-configure/blob/26de2dd1b39d9a77d5417244f09319e7dce47495/lib/devpipeline_configure/load.py#L117-L142
def update_cache(force=False, cache_file=None): """ Load a build cache, updating it if necessary. A cache is considered outdated if any of its inputs have changed. Arguments force -- Consider a cache outdated regardless of whether its inputs have been modified. """ if not cache_file: cache_file = find_config() cache_config = devpipeline_configure.parser.read_config(cache_file) cache = devpipeline_configure.cache._CachedConfig(cache_config, cache_file) if force or _is_outdated(cache_file, cache): cache = devpipeline_configure.config.process_config( cache_config.get("DEFAULT", "dp.build_config"), os.path.dirname(cache_file), "build.cache", profiles=cache_config.get("DEFAULT", "dp.profile_name", fallback=None), overrides=cache_config.get("DEFAULT", "dp.overrides", fallback=None), ) devpipeline_core.sanitizer.sanitize( cache, lambda n, m: print("{} [{}]".format(m, n)) ) return cache
[ "def", "update_cache", "(", "force", "=", "False", ",", "cache_file", "=", "None", ")", ":", "if", "not", "cache_file", ":", "cache_file", "=", "find_config", "(", ")", "cache_config", "=", "devpipeline_configure", ".", "parser", ".", "read_config", "(", "cache_file", ")", "cache", "=", "devpipeline_configure", ".", "cache", ".", "_CachedConfig", "(", "cache_config", ",", "cache_file", ")", "if", "force", "or", "_is_outdated", "(", "cache_file", ",", "cache", ")", ":", "cache", "=", "devpipeline_configure", ".", "config", ".", "process_config", "(", "cache_config", ".", "get", "(", "\"DEFAULT\"", ",", "\"dp.build_config\"", ")", ",", "os", ".", "path", ".", "dirname", "(", "cache_file", ")", ",", "\"build.cache\"", ",", "profiles", "=", "cache_config", ".", "get", "(", "\"DEFAULT\"", ",", "\"dp.profile_name\"", ",", "fallback", "=", "None", ")", ",", "overrides", "=", "cache_config", ".", "get", "(", "\"DEFAULT\"", ",", "\"dp.overrides\"", ",", "fallback", "=", "None", ")", ",", ")", "devpipeline_core", ".", "sanitizer", ".", "sanitize", "(", "cache", ",", "lambda", "n", ",", "m", ":", "print", "(", "\"{} [{}]\"", ".", "format", "(", "m", ",", "n", ")", ")", ")", "return", "cache" ]
Load a build cache, updating it if necessary. A cache is considered outdated if any of its inputs have changed. Arguments force -- Consider a cache outdated regardless of whether its inputs have been modified.
[ "Load", "a", "build", "cache", "updating", "it", "if", "necessary", "." ]
python
train
knorby/facterpy
facter/__init__.py
https://github.com/knorby/facterpy/blob/4799b020cc8c1bf69b2a828b90d6e20862771a33/facter/__init__.py#L104-L112
def has_cache(self): """Intended to be called before any call that might access the cache. If the cache is not selected, then returns False, otherwise the cache is build if needed and returns True.""" if not self.cache_enabled: return False if self._cache is None: self.build_cache() return True
[ "def", "has_cache", "(", "self", ")", ":", "if", "not", "self", ".", "cache_enabled", ":", "return", "False", "if", "self", ".", "_cache", "is", "None", ":", "self", ".", "build_cache", "(", ")", "return", "True" ]
Intended to be called before any call that might access the cache. If the cache is not selected, then returns False, otherwise the cache is build if needed and returns True.
[ "Intended", "to", "be", "called", "before", "any", "call", "that", "might", "access", "the", "cache", ".", "If", "the", "cache", "is", "not", "selected", "then", "returns", "False", "otherwise", "the", "cache", "is", "build", "if", "needed", "and", "returns", "True", "." ]
python
train
ihmeuw/vivarium
src/vivarium/framework/results_writer.py
https://github.com/ihmeuw/vivarium/blob/c5f5d50f775c8bf337d3aae1ff7c57c025a8e258/src/vivarium/framework/results_writer.py#L30-L48
def add_sub_directory(self, key, path): """Adds a sub-directory to the results directory. Parameters ---------- key: str A look-up key for the directory path. path: str The relative path from the root of the results directory to the sub-directory. Returns ------- str: The absolute path to the sub-directory. """ sub_dir_path = os.path.join(self.results_root, path) os.makedirs(sub_dir_path, exist_ok=True) self._directories[key] = sub_dir_path return sub_dir_path
[ "def", "add_sub_directory", "(", "self", ",", "key", ",", "path", ")", ":", "sub_dir_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "results_root", ",", "path", ")", "os", ".", "makedirs", "(", "sub_dir_path", ",", "exist_ok", "=", "True", ")", "self", ".", "_directories", "[", "key", "]", "=", "sub_dir_path", "return", "sub_dir_path" ]
Adds a sub-directory to the results directory. Parameters ---------- key: str A look-up key for the directory path. path: str The relative path from the root of the results directory to the sub-directory. Returns ------- str: The absolute path to the sub-directory.
[ "Adds", "a", "sub", "-", "directory", "to", "the", "results", "directory", "." ]
python
train
dereneaton/ipyrad
ipyrad/assemble/refmap.py
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/refmap.py#L819-L890
def check_insert_size(data, sample): """ check mean insert size for this sample and update hackersonly.max_inner_mate_distance if need be. This value controls how far apart mate pairs can be to still be considered for bedtools merging downstream. """ ## pipe stats output to grep cmd1 = [ipyrad.bins.samtools, "stats", sample.files.mapped_reads] cmd2 = ["grep", "SN"] proc1 = sps.Popen(cmd1, stderr=sps.STDOUT, stdout=sps.PIPE) proc2 = sps.Popen(cmd2, stderr=sps.STDOUT, stdout=sps.PIPE, stdin=proc1.stdout) ## get piped result res = proc2.communicate()[0] ## raise exception on failure and do cleanup if proc2.returncode: raise IPyradWarningExit("error in %s: %s", cmd2, res) ## starting vals avg_insert = 0 stdv_insert = 0 avg_len = 0 ## iterate over results for line in res.split("\n"): if "insert size average" in line: avg_insert = float(line.split(":")[-1].strip()) elif "insert size standard deviation" in line: ## hack to fix sim data when stdv is 0.0. Shouldn't ## impact real data bcz stdv gets rounded up below stdv_insert = float(line.split(":")[-1].strip()) + 0.1 elif "average length" in line: avg_len = float(line.split(":")[-1].strip()) LOGGER.debug("avg {} stdv {} avg_len {}"\ .format(avg_insert, stdv_insert, avg_len)) ## If all values return successfully set the max inner mate distance. ## This is tricky. avg_insert is the average length of R1+R2+inner mate ## distance. avg_len is the average length of a read. If there are lots ## of reads that overlap then avg_insert will be close to but bigger than ## avg_len. We are looking for the right value for `bedtools merge -d` ## which wants to know the max distance between reads. if all([avg_insert, stdv_insert, avg_len]): ## If 2 * the average length of a read is less than the average ## insert size then most reads DO NOT overlap if stdv_insert < 5: stdv_insert = 5. if (2 * avg_len) < avg_insert: hack = avg_insert + (3 * np.math.ceil(stdv_insert)) - (2 * avg_len) ## If it is > than the average insert size then most reads DO ## overlap, so we have to calculate inner mate distance a little ## differently. else: hack = (avg_insert - avg_len) + (3 * np.math.ceil(stdv_insert)) ## set the hackerdict value LOGGER.info("stdv: hacked insert size is %s", hack) data._hackersonly["max_inner_mate_distance"] = int(np.math.ceil(hack)) else: ## If something fsck then set a relatively conservative distance data._hackersonly["max_inner_mate_distance"] = 300 LOGGER.debug("inner mate distance for {} - {}".format(sample.name,\ data._hackersonly["max_inner_mate_distance"]))
[ "def", "check_insert_size", "(", "data", ",", "sample", ")", ":", "## pipe stats output to grep", "cmd1", "=", "[", "ipyrad", ".", "bins", ".", "samtools", ",", "\"stats\"", ",", "sample", ".", "files", ".", "mapped_reads", "]", "cmd2", "=", "[", "\"grep\"", ",", "\"SN\"", "]", "proc1", "=", "sps", ".", "Popen", "(", "cmd1", ",", "stderr", "=", "sps", ".", "STDOUT", ",", "stdout", "=", "sps", ".", "PIPE", ")", "proc2", "=", "sps", ".", "Popen", "(", "cmd2", ",", "stderr", "=", "sps", ".", "STDOUT", ",", "stdout", "=", "sps", ".", "PIPE", ",", "stdin", "=", "proc1", ".", "stdout", ")", "## get piped result", "res", "=", "proc2", ".", "communicate", "(", ")", "[", "0", "]", "## raise exception on failure and do cleanup", "if", "proc2", ".", "returncode", ":", "raise", "IPyradWarningExit", "(", "\"error in %s: %s\"", ",", "cmd2", ",", "res", ")", "## starting vals", "avg_insert", "=", "0", "stdv_insert", "=", "0", "avg_len", "=", "0", "## iterate over results", "for", "line", "in", "res", ".", "split", "(", "\"\\n\"", ")", ":", "if", "\"insert size average\"", "in", "line", ":", "avg_insert", "=", "float", "(", "line", ".", "split", "(", "\":\"", ")", "[", "-", "1", "]", ".", "strip", "(", ")", ")", "elif", "\"insert size standard deviation\"", "in", "line", ":", "## hack to fix sim data when stdv is 0.0. Shouldn't", "## impact real data bcz stdv gets rounded up below", "stdv_insert", "=", "float", "(", "line", ".", "split", "(", "\":\"", ")", "[", "-", "1", "]", ".", "strip", "(", ")", ")", "+", "0.1", "elif", "\"average length\"", "in", "line", ":", "avg_len", "=", "float", "(", "line", ".", "split", "(", "\":\"", ")", "[", "-", "1", "]", ".", "strip", "(", ")", ")", "LOGGER", ".", "debug", "(", "\"avg {} stdv {} avg_len {}\"", ".", "format", "(", "avg_insert", ",", "stdv_insert", ",", "avg_len", ")", ")", "## If all values return successfully set the max inner mate distance.", "## This is tricky. avg_insert is the average length of R1+R2+inner mate", "## distance. avg_len is the average length of a read. If there are lots", "## of reads that overlap then avg_insert will be close to but bigger than", "## avg_len. We are looking for the right value for `bedtools merge -d`", "## which wants to know the max distance between reads. ", "if", "all", "(", "[", "avg_insert", ",", "stdv_insert", ",", "avg_len", "]", ")", ":", "## If 2 * the average length of a read is less than the average", "## insert size then most reads DO NOT overlap", "if", "stdv_insert", "<", "5", ":", "stdv_insert", "=", "5.", "if", "(", "2", "*", "avg_len", ")", "<", "avg_insert", ":", "hack", "=", "avg_insert", "+", "(", "3", "*", "np", ".", "math", ".", "ceil", "(", "stdv_insert", ")", ")", "-", "(", "2", "*", "avg_len", ")", "## If it is > than the average insert size then most reads DO", "## overlap, so we have to calculate inner mate distance a little ", "## differently.", "else", ":", "hack", "=", "(", "avg_insert", "-", "avg_len", ")", "+", "(", "3", "*", "np", ".", "math", ".", "ceil", "(", "stdv_insert", ")", ")", "## set the hackerdict value", "LOGGER", ".", "info", "(", "\"stdv: hacked insert size is %s\"", ",", "hack", ")", "data", ".", "_hackersonly", "[", "\"max_inner_mate_distance\"", "]", "=", "int", "(", "np", ".", "math", ".", "ceil", "(", "hack", ")", ")", "else", ":", "## If something fsck then set a relatively conservative distance", "data", ".", "_hackersonly", "[", "\"max_inner_mate_distance\"", "]", "=", "300", "LOGGER", ".", "debug", "(", "\"inner mate distance for {} - {}\"", ".", "format", "(", "sample", ".", "name", ",", "data", ".", "_hackersonly", "[", "\"max_inner_mate_distance\"", "]", ")", ")" ]
check mean insert size for this sample and update hackersonly.max_inner_mate_distance if need be. This value controls how far apart mate pairs can be to still be considered for bedtools merging downstream.
[ "check", "mean", "insert", "size", "for", "this", "sample", "and", "update", "hackersonly", ".", "max_inner_mate_distance", "if", "need", "be", ".", "This", "value", "controls", "how", "far", "apart", "mate", "pairs", "can", "be", "to", "still", "be", "considered", "for", "bedtools", "merging", "downstream", "." ]
python
valid
cdgriffith/Box
box.py
https://github.com/cdgriffith/Box/blob/5f09df824022127e7e335e3d993f7ddc1ed97fce/box.py#L1029-L1042
def int(self, item, default=None): """ Return value of key as an int :param item: key of value to transform :param default: value to return if item does not exist :return: int of value """ try: item = self.__getattr__(item) except AttributeError as err: if default is not None: return default raise err return int(item)
[ "def", "int", "(", "self", ",", "item", ",", "default", "=", "None", ")", ":", "try", ":", "item", "=", "self", ".", "__getattr__", "(", "item", ")", "except", "AttributeError", "as", "err", ":", "if", "default", "is", "not", "None", ":", "return", "default", "raise", "err", "return", "int", "(", "item", ")" ]
Return value of key as an int :param item: key of value to transform :param default: value to return if item does not exist :return: int of value
[ "Return", "value", "of", "key", "as", "an", "int" ]
python
train
openergy/oplus
oplus/epm/queryset.py
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/epm/queryset.py#L94-L108
def select(self, filter_by=None): """ Parameters ---------- filter_by: callable, default None Callable must take one argument (a record of queryset), and return True to keep record, or False to skip it. Example : .select(lambda x: x.name == "my_name"). If None, records are not filtered. Returns ------- Queryset instance, containing all selected records. """ iterator = self._records if filter_by is None else filter(filter_by, self._records) return Queryset(self._table, iterator)
[ "def", "select", "(", "self", ",", "filter_by", "=", "None", ")", ":", "iterator", "=", "self", ".", "_records", "if", "filter_by", "is", "None", "else", "filter", "(", "filter_by", ",", "self", ".", "_records", ")", "return", "Queryset", "(", "self", ".", "_table", ",", "iterator", ")" ]
Parameters ---------- filter_by: callable, default None Callable must take one argument (a record of queryset), and return True to keep record, or False to skip it. Example : .select(lambda x: x.name == "my_name"). If None, records are not filtered. Returns ------- Queryset instance, containing all selected records.
[ "Parameters", "----------", "filter_by", ":", "callable", "default", "None", "Callable", "must", "take", "one", "argument", "(", "a", "record", "of", "queryset", ")", "and", "return", "True", "to", "keep", "record", "or", "False", "to", "skip", "it", ".", "Example", ":", ".", "select", "(", "lambda", "x", ":", "x", ".", "name", "==", "my_name", ")", ".", "If", "None", "records", "are", "not", "filtered", "." ]
python
test
saltstack/salt
salt/roster/terraform.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/roster/terraform.py#L99-L116
def _add_ssh_key(ret): ''' Setups the salt-ssh minion to be accessed with salt-ssh default key ''' priv = None if __opts__.get('ssh_use_home_key') and os.path.isfile(os.path.expanduser('~/.ssh/id_rsa')): priv = os.path.expanduser('~/.ssh/id_rsa') else: priv = __opts__.get( 'ssh_priv', os.path.abspath(os.path.join( __opts__['pki_dir'], 'ssh', 'salt-ssh.rsa' )) ) if priv and os.path.isfile(priv): ret['priv'] = priv
[ "def", "_add_ssh_key", "(", "ret", ")", ":", "priv", "=", "None", "if", "__opts__", ".", "get", "(", "'ssh_use_home_key'", ")", "and", "os", ".", "path", ".", "isfile", "(", "os", ".", "path", ".", "expanduser", "(", "'~/.ssh/id_rsa'", ")", ")", ":", "priv", "=", "os", ".", "path", ".", "expanduser", "(", "'~/.ssh/id_rsa'", ")", "else", ":", "priv", "=", "__opts__", ".", "get", "(", "'ssh_priv'", ",", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "__opts__", "[", "'pki_dir'", "]", ",", "'ssh'", ",", "'salt-ssh.rsa'", ")", ")", ")", "if", "priv", "and", "os", ".", "path", ".", "isfile", "(", "priv", ")", ":", "ret", "[", "'priv'", "]", "=", "priv" ]
Setups the salt-ssh minion to be accessed with salt-ssh default key
[ "Setups", "the", "salt", "-", "ssh", "minion", "to", "be", "accessed", "with", "salt", "-", "ssh", "default", "key" ]
python
train
honeynet/beeswarm
beeswarm/drones/drone.py
https://github.com/honeynet/beeswarm/blob/db51ea0bc29f631c3e3b5312b479ac9d5e31079a/beeswarm/drones/drone.py#L85-L104
def start(self): """ Starts services. """ cert_path = os.path.join(self.work_dir, 'certificates') public_keys_dir = os.path.join(cert_path, 'public_keys') private_keys_dir = os.path.join(cert_path, 'private_keys') client_secret_file = os.path.join(private_keys_dir, "client.key") client_public, client_secret = zmq.auth.load_certificate(client_secret_file) server_public_file = os.path.join(public_keys_dir, "server.key") server_public, _ = zmq.auth.load_certificate(server_public_file) self.outgoing_msg_greenlet = gevent.spawn(self.outgoing_server_comms, server_public, client_public, client_secret) self.outgoing_msg_greenlet.link_exception(self.on_exception) self.incoming_msg_greenlet = gevent.spawn(self.incoming_server_comms, server_public, client_public, client_secret) self.incoming_msg_greenlet.link_exception(self.on_exception) logger.info('Waiting for detailed configuration from Beeswarm server.') gevent.joinall([self.outgoing_msg_greenlet])
[ "def", "start", "(", "self", ")", ":", "cert_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "work_dir", ",", "'certificates'", ")", "public_keys_dir", "=", "os", ".", "path", ".", "join", "(", "cert_path", ",", "'public_keys'", ")", "private_keys_dir", "=", "os", ".", "path", ".", "join", "(", "cert_path", ",", "'private_keys'", ")", "client_secret_file", "=", "os", ".", "path", ".", "join", "(", "private_keys_dir", ",", "\"client.key\"", ")", "client_public", ",", "client_secret", "=", "zmq", ".", "auth", ".", "load_certificate", "(", "client_secret_file", ")", "server_public_file", "=", "os", ".", "path", ".", "join", "(", "public_keys_dir", ",", "\"server.key\"", ")", "server_public", ",", "_", "=", "zmq", ".", "auth", ".", "load_certificate", "(", "server_public_file", ")", "self", ".", "outgoing_msg_greenlet", "=", "gevent", ".", "spawn", "(", "self", ".", "outgoing_server_comms", ",", "server_public", ",", "client_public", ",", "client_secret", ")", "self", ".", "outgoing_msg_greenlet", ".", "link_exception", "(", "self", ".", "on_exception", ")", "self", ".", "incoming_msg_greenlet", "=", "gevent", ".", "spawn", "(", "self", ".", "incoming_server_comms", ",", "server_public", ",", "client_public", ",", "client_secret", ")", "self", ".", "incoming_msg_greenlet", ".", "link_exception", "(", "self", ".", "on_exception", ")", "logger", ".", "info", "(", "'Waiting for detailed configuration from Beeswarm server.'", ")", "gevent", ".", "joinall", "(", "[", "self", ".", "outgoing_msg_greenlet", "]", ")" ]
Starts services.
[ "Starts", "services", "." ]
python
train
saltstack/salt
salt/minion.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L773-L798
def _return_retry_timer(self): ''' Based on the minion configuration, either return a randomized timer or just return the value of the return_retry_timer. ''' msg = 'Minion return retry timer set to %s seconds' if self.opts.get('return_retry_timer_max'): try: random_retry = randint(self.opts['return_retry_timer'], self.opts['return_retry_timer_max']) retry_msg = msg % random_retry log.debug('%s (randomized)', msg % random_retry) return random_retry except ValueError: # Catch wiseguys using negative integers here log.error( 'Invalid value (return_retry_timer: %s or ' 'return_retry_timer_max: %s). Both must be positive ' 'integers.', self.opts['return_retry_timer'], self.opts['return_retry_timer_max'], ) log.debug(msg, DEFAULT_MINION_OPTS['return_retry_timer']) return DEFAULT_MINION_OPTS['return_retry_timer'] else: log.debug(msg, self.opts.get('return_retry_timer')) return self.opts.get('return_retry_timer')
[ "def", "_return_retry_timer", "(", "self", ")", ":", "msg", "=", "'Minion return retry timer set to %s seconds'", "if", "self", ".", "opts", ".", "get", "(", "'return_retry_timer_max'", ")", ":", "try", ":", "random_retry", "=", "randint", "(", "self", ".", "opts", "[", "'return_retry_timer'", "]", ",", "self", ".", "opts", "[", "'return_retry_timer_max'", "]", ")", "retry_msg", "=", "msg", "%", "random_retry", "log", ".", "debug", "(", "'%s (randomized)'", ",", "msg", "%", "random_retry", ")", "return", "random_retry", "except", "ValueError", ":", "# Catch wiseguys using negative integers here", "log", ".", "error", "(", "'Invalid value (return_retry_timer: %s or '", "'return_retry_timer_max: %s). Both must be positive '", "'integers.'", ",", "self", ".", "opts", "[", "'return_retry_timer'", "]", ",", "self", ".", "opts", "[", "'return_retry_timer_max'", "]", ",", ")", "log", ".", "debug", "(", "msg", ",", "DEFAULT_MINION_OPTS", "[", "'return_retry_timer'", "]", ")", "return", "DEFAULT_MINION_OPTS", "[", "'return_retry_timer'", "]", "else", ":", "log", ".", "debug", "(", "msg", ",", "self", ".", "opts", ".", "get", "(", "'return_retry_timer'", ")", ")", "return", "self", ".", "opts", ".", "get", "(", "'return_retry_timer'", ")" ]
Based on the minion configuration, either return a randomized timer or just return the value of the return_retry_timer.
[ "Based", "on", "the", "minion", "configuration", "either", "return", "a", "randomized", "timer", "or", "just", "return", "the", "value", "of", "the", "return_retry_timer", "." ]
python
train
SeleniumHQ/selenium
py/selenium/webdriver/remote/webelement.py
https://github.com/SeleniumHQ/selenium/blob/df40c28b41d4b3953f90eaff84838a9ac052b84a/py/selenium/webdriver/remote/webelement.py#L545-L554
def size(self): """The size of the element.""" size = {} if self._w3c: size = self._execute(Command.GET_ELEMENT_RECT)['value'] else: size = self._execute(Command.GET_ELEMENT_SIZE)['value'] new_size = {"height": size["height"], "width": size["width"]} return new_size
[ "def", "size", "(", "self", ")", ":", "size", "=", "{", "}", "if", "self", ".", "_w3c", ":", "size", "=", "self", ".", "_execute", "(", "Command", ".", "GET_ELEMENT_RECT", ")", "[", "'value'", "]", "else", ":", "size", "=", "self", ".", "_execute", "(", "Command", ".", "GET_ELEMENT_SIZE", ")", "[", "'value'", "]", "new_size", "=", "{", "\"height\"", ":", "size", "[", "\"height\"", "]", ",", "\"width\"", ":", "size", "[", "\"width\"", "]", "}", "return", "new_size" ]
The size of the element.
[ "The", "size", "of", "the", "element", "." ]
python
train
mitsei/dlkit
dlkit/records/assessment/clix/assessment_offered_records.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/records/assessment/clix/assessment_offered_records.py#L97-L103
def clear_n_of_m(self): """stub""" if (self.get_n_of_m_metadata().is_read_only() or self.get_n_of_m_metadata().is_required()): raise NoAccess() self.my_osid_object_form._my_map['nOfM'] = \ int(self._n_of_m_metadata['default_object_values'][0])
[ "def", "clear_n_of_m", "(", "self", ")", ":", "if", "(", "self", ".", "get_n_of_m_metadata", "(", ")", ".", "is_read_only", "(", ")", "or", "self", ".", "get_n_of_m_metadata", "(", ")", ".", "is_required", "(", ")", ")", ":", "raise", "NoAccess", "(", ")", "self", ".", "my_osid_object_form", ".", "_my_map", "[", "'nOfM'", "]", "=", "int", "(", "self", ".", "_n_of_m_metadata", "[", "'default_object_values'", "]", "[", "0", "]", ")" ]
stub
[ "stub" ]
python
train
xtuml/pyxtuml
xtuml/meta.py
https://github.com/xtuml/pyxtuml/blob/7dd9343b9a0191d1db1887ab9288d0a026608d9a/xtuml/meta.py#L1210-L1243
def define_association(self, rel_id, source_kind, source_keys, source_many, source_conditional, source_phrase, target_kind, target_keys, target_many, target_conditional, target_phrase): ''' Define and return an association from one kind of class (the source kind) to some other kind of class (the target kind). ''' if isinstance(rel_id, int): rel_id = 'R%d' % rel_id source_metaclass = self.find_metaclass(source_kind) target_metaclass = self.find_metaclass(target_kind) source_link = target_metaclass.add_link(source_metaclass, rel_id, many=source_many, phrase=target_phrase, conditional=source_conditional) target_link = source_metaclass.add_link(target_metaclass, rel_id, many=target_many, phrase=source_phrase, conditional=target_conditional) ass = Association(rel_id, source_keys, source_link, target_keys, target_link) source_link.key_map = dict(zip(source_keys, target_keys)) target_link.key_map = dict(zip(target_keys, source_keys)) self.associations.append(ass) return ass
[ "def", "define_association", "(", "self", ",", "rel_id", ",", "source_kind", ",", "source_keys", ",", "source_many", ",", "source_conditional", ",", "source_phrase", ",", "target_kind", ",", "target_keys", ",", "target_many", ",", "target_conditional", ",", "target_phrase", ")", ":", "if", "isinstance", "(", "rel_id", ",", "int", ")", ":", "rel_id", "=", "'R%d'", "%", "rel_id", "source_metaclass", "=", "self", ".", "find_metaclass", "(", "source_kind", ")", "target_metaclass", "=", "self", ".", "find_metaclass", "(", "target_kind", ")", "source_link", "=", "target_metaclass", ".", "add_link", "(", "source_metaclass", ",", "rel_id", ",", "many", "=", "source_many", ",", "phrase", "=", "target_phrase", ",", "conditional", "=", "source_conditional", ")", "target_link", "=", "source_metaclass", ".", "add_link", "(", "target_metaclass", ",", "rel_id", ",", "many", "=", "target_many", ",", "phrase", "=", "source_phrase", ",", "conditional", "=", "target_conditional", ")", "ass", "=", "Association", "(", "rel_id", ",", "source_keys", ",", "source_link", ",", "target_keys", ",", "target_link", ")", "source_link", ".", "key_map", "=", "dict", "(", "zip", "(", "source_keys", ",", "target_keys", ")", ")", "target_link", ".", "key_map", "=", "dict", "(", "zip", "(", "target_keys", ",", "source_keys", ")", ")", "self", ".", "associations", ".", "append", "(", "ass", ")", "return", "ass" ]
Define and return an association from one kind of class (the source kind) to some other kind of class (the target kind).
[ "Define", "and", "return", "an", "association", "from", "one", "kind", "of", "class", "(", "the", "source", "kind", ")", "to", "some", "other", "kind", "of", "class", "(", "the", "target", "kind", ")", "." ]
python
test
sashs/filebytes
filebytes/pe.py
https://github.com/sashs/filebytes/blob/41ee009832aba19603f33d1fd3483b84d6684ebf/filebytes/pe.py#L689-L699
def __parseThunkData(self, thunk,importSection): """Parses the data of a thunk and sets the data""" offset = to_offset(thunk.header.AddressOfData, importSection) if 0xf0000000 & thunk.header.AddressOfData == 0x80000000: thunk.ordinal = thunk.header.AddressOfData & 0x0fffffff else: ibn = IMAGE_IMPORT_BY_NAME.from_buffer(importSection.raw, offset) checkOffset(offset+2, importSection) name = get_str(importSection.raw, offset+2) thunk.importByName = ImportByNameData(header=ibn, hint=ibn.Hint, name=name)
[ "def", "__parseThunkData", "(", "self", ",", "thunk", ",", "importSection", ")", ":", "offset", "=", "to_offset", "(", "thunk", ".", "header", ".", "AddressOfData", ",", "importSection", ")", "if", "0xf0000000", "&", "thunk", ".", "header", ".", "AddressOfData", "==", "0x80000000", ":", "thunk", ".", "ordinal", "=", "thunk", ".", "header", ".", "AddressOfData", "&", "0x0fffffff", "else", ":", "ibn", "=", "IMAGE_IMPORT_BY_NAME", ".", "from_buffer", "(", "importSection", ".", "raw", ",", "offset", ")", "checkOffset", "(", "offset", "+", "2", ",", "importSection", ")", "name", "=", "get_str", "(", "importSection", ".", "raw", ",", "offset", "+", "2", ")", "thunk", ".", "importByName", "=", "ImportByNameData", "(", "header", "=", "ibn", ",", "hint", "=", "ibn", ".", "Hint", ",", "name", "=", "name", ")" ]
Parses the data of a thunk and sets the data
[ "Parses", "the", "data", "of", "a", "thunk", "and", "sets", "the", "data" ]
python
train
cisco-sas/kitty
kitty/model/low_level/aliases.py
https://github.com/cisco-sas/kitty/blob/cb0760989dcdfe079e43ac574d872d0b18953a32/kitty/model/low_level/aliases.py#L214-L219
def Md5(depends_on, encoder=ENC_STR_DEFAULT, fuzzable=False, name=None): ''' :rtype: :class:`~kitty.model.low_level.calculated.Hash` :return: MD5 hash field ''' return Hash(depends_on=depends_on, algorithm='md5', encoder=encoder, fuzzable=fuzzable, name=name)
[ "def", "Md5", "(", "depends_on", ",", "encoder", "=", "ENC_STR_DEFAULT", ",", "fuzzable", "=", "False", ",", "name", "=", "None", ")", ":", "return", "Hash", "(", "depends_on", "=", "depends_on", ",", "algorithm", "=", "'md5'", ",", "encoder", "=", "encoder", ",", "fuzzable", "=", "fuzzable", ",", "name", "=", "name", ")" ]
:rtype: :class:`~kitty.model.low_level.calculated.Hash` :return: MD5 hash field
[ ":", "rtype", ":", ":", "class", ":", "~kitty", ".", "model", ".", "low_level", ".", "calculated", ".", "Hash", ":", "return", ":", "MD5", "hash", "field" ]
python
train
google/grr
grr/server/grr_response_server/notification.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/notification.py#L182-L196
def _NotifyLegacy(username, notification_type, message, object_reference): """Schedules a legacy AFF4 user notification.""" try: with aff4.FACTORY.Open( aff4.ROOT_URN.Add("users").Add(username), aff4_type=aff4_users.GRRUser, mode="rw") as fd: args = _MapLegacyArgs(notification_type, message, object_reference) args[0] += ":%s" % notification_type fd.Notify(*args) except aff4.InstantiationError: logging.error("Trying to notify non-existent user: %s", username)
[ "def", "_NotifyLegacy", "(", "username", ",", "notification_type", ",", "message", ",", "object_reference", ")", ":", "try", ":", "with", "aff4", ".", "FACTORY", ".", "Open", "(", "aff4", ".", "ROOT_URN", ".", "Add", "(", "\"users\"", ")", ".", "Add", "(", "username", ")", ",", "aff4_type", "=", "aff4_users", ".", "GRRUser", ",", "mode", "=", "\"rw\"", ")", "as", "fd", ":", "args", "=", "_MapLegacyArgs", "(", "notification_type", ",", "message", ",", "object_reference", ")", "args", "[", "0", "]", "+=", "\":%s\"", "%", "notification_type", "fd", ".", "Notify", "(", "*", "args", ")", "except", "aff4", ".", "InstantiationError", ":", "logging", ".", "error", "(", "\"Trying to notify non-existent user: %s\"", ",", "username", ")" ]
Schedules a legacy AFF4 user notification.
[ "Schedules", "a", "legacy", "AFF4", "user", "notification", "." ]
python
train
fjwCode/cerium
cerium/androiddriver.py
https://github.com/fjwCode/cerium/blob/f6e06e0dcf83a0bc924828e9d6cb81383ed2364f/cerium/androiddriver.py#L249-L253
def push(self, local: _PATH = 'LICENSE', remote: _PATH = '/sdcard/LICENSE') -> None: '''Copy local files/directories to device.''' if not os.path.exists(local): raise FileNotFoundError(f'Local {local!r} does not exist.') self._execute('-s', self.device_sn, 'push', local, remote)
[ "def", "push", "(", "self", ",", "local", ":", "_PATH", "=", "'LICENSE'", ",", "remote", ":", "_PATH", "=", "'/sdcard/LICENSE'", ")", "->", "None", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "local", ")", ":", "raise", "FileNotFoundError", "(", "f'Local {local!r} does not exist.'", ")", "self", ".", "_execute", "(", "'-s'", ",", "self", ".", "device_sn", ",", "'push'", ",", "local", ",", "remote", ")" ]
Copy local files/directories to device.
[ "Copy", "local", "files", "/", "directories", "to", "device", "." ]
python
train
thebjorn/pydeps
pydeps/tools/pydeps2requirements.py
https://github.com/thebjorn/pydeps/blob/1e6715b7bea47a40e8042821b57937deaaa0fdc3/pydeps/tools/pydeps2requirements.py#L58-L66
def main(): """Cli entrypoint. """ if len(sys.argv) == 2: fname = sys.argv[1] data = json.load(open(fname, 'rb')) else: data = json.loads(sys.stdin.read()) print(pydeps2reqs(data))
[ "def", "main", "(", ")", ":", "if", "len", "(", "sys", ".", "argv", ")", "==", "2", ":", "fname", "=", "sys", ".", "argv", "[", "1", "]", "data", "=", "json", ".", "load", "(", "open", "(", "fname", ",", "'rb'", ")", ")", "else", ":", "data", "=", "json", ".", "loads", "(", "sys", ".", "stdin", ".", "read", "(", ")", ")", "print", "(", "pydeps2reqs", "(", "data", ")", ")" ]
Cli entrypoint.
[ "Cli", "entrypoint", "." ]
python
train
the01/python-paps
paps/si/app/sensor.py
https://github.com/the01/python-paps/blob/2dde5a71913e4c7b22901cf05c6ecedd890919c4/paps/si/app/sensor.py#L272-L291
def _receiving(self): """ Receiving loop :rtype: None """ while self._is_running: try: rlist, wlist, xlist = select.select( self._listening, [], [], self._select_timeout ) except: self.exception("Failed to select socket") continue for sock in rlist: try: self._get_packet(sock) except: self.exception("Failed to receive packet")
[ "def", "_receiving", "(", "self", ")", ":", "while", "self", ".", "_is_running", ":", "try", ":", "rlist", ",", "wlist", ",", "xlist", "=", "select", ".", "select", "(", "self", ".", "_listening", ",", "[", "]", ",", "[", "]", ",", "self", ".", "_select_timeout", ")", "except", ":", "self", ".", "exception", "(", "\"Failed to select socket\"", ")", "continue", "for", "sock", "in", "rlist", ":", "try", ":", "self", ".", "_get_packet", "(", "sock", ")", "except", ":", "self", ".", "exception", "(", "\"Failed to receive packet\"", ")" ]
Receiving loop :rtype: None
[ "Receiving", "loop" ]
python
train
saltstack/salt
salt/fileserver/__init__.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileserver/__init__.py#L893-L906
def send(self, load, tries=None, timeout=None, raw=False): # pylint: disable=unused-argument ''' Emulate the channel send method, the tries and timeout are not used ''' if 'cmd' not in load: log.error('Malformed request, no cmd: %s', load) return {} cmd = load['cmd'].lstrip('_') if cmd in self.cmd_stub: return self.cmd_stub[cmd] if not hasattr(self.fs, cmd): log.error('Malformed request, invalid cmd: %s', load) return {} return getattr(self.fs, cmd)(load)
[ "def", "send", "(", "self", ",", "load", ",", "tries", "=", "None", ",", "timeout", "=", "None", ",", "raw", "=", "False", ")", ":", "# pylint: disable=unused-argument", "if", "'cmd'", "not", "in", "load", ":", "log", ".", "error", "(", "'Malformed request, no cmd: %s'", ",", "load", ")", "return", "{", "}", "cmd", "=", "load", "[", "'cmd'", "]", ".", "lstrip", "(", "'_'", ")", "if", "cmd", "in", "self", ".", "cmd_stub", ":", "return", "self", ".", "cmd_stub", "[", "cmd", "]", "if", "not", "hasattr", "(", "self", ".", "fs", ",", "cmd", ")", ":", "log", ".", "error", "(", "'Malformed request, invalid cmd: %s'", ",", "load", ")", "return", "{", "}", "return", "getattr", "(", "self", ".", "fs", ",", "cmd", ")", "(", "load", ")" ]
Emulate the channel send method, the tries and timeout are not used
[ "Emulate", "the", "channel", "send", "method", "the", "tries", "and", "timeout", "are", "not", "used" ]
python
train
mitsei/dlkit
dlkit/json_/learning/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/learning/objects.py#L1290-L1302
def clear_completion(self): """Clears the completion. raise: NoAccess - ``Metadata.isRequired()`` or ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.grading.GradeSystemForm.clear_lowest_numeric_score if (self.get_completion_metadata().is_read_only() or self.get_completion_metadata().is_required()): raise errors.NoAccess() self._my_map['completion'] = self._completion_default
[ "def", "clear_completion", "(", "self", ")", ":", "# Implemented from template for osid.grading.GradeSystemForm.clear_lowest_numeric_score", "if", "(", "self", ".", "get_completion_metadata", "(", ")", ".", "is_read_only", "(", ")", "or", "self", ".", "get_completion_metadata", "(", ")", ".", "is_required", "(", ")", ")", ":", "raise", "errors", ".", "NoAccess", "(", ")", "self", ".", "_my_map", "[", "'completion'", "]", "=", "self", ".", "_completion_default" ]
Clears the completion. raise: NoAccess - ``Metadata.isRequired()`` or ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.*
[ "Clears", "the", "completion", "." ]
python
train
pantsbuild/pex
pex/util.py
https://github.com/pantsbuild/pex/blob/87b2129d860250d3b9edce75b9cb62f9789ee521/pex/util.py#L108-L112
def update_hash(cls, filelike, digest): """Update the digest of a single file in a memory-efficient manner.""" block_size = digest.block_size * 1024 for chunk in iter(lambda: filelike.read(block_size), b''): digest.update(chunk)
[ "def", "update_hash", "(", "cls", ",", "filelike", ",", "digest", ")", ":", "block_size", "=", "digest", ".", "block_size", "*", "1024", "for", "chunk", "in", "iter", "(", "lambda", ":", "filelike", ".", "read", "(", "block_size", ")", ",", "b''", ")", ":", "digest", ".", "update", "(", "chunk", ")" ]
Update the digest of a single file in a memory-efficient manner.
[ "Update", "the", "digest", "of", "a", "single", "file", "in", "a", "memory", "-", "efficient", "manner", "." ]
python
train
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Node/FS.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Node/FS.py#L1676-L1728
def rel_path(self, other): """Return a path to "other" relative to this directory. """ # This complicated and expensive method, which constructs relative # paths between arbitrary Node.FS objects, is no longer used # by SCons itself. It was introduced to store dependency paths # in .sconsign files relative to the target, but that ended up # being significantly inefficient. # # We're continuing to support the method because some SConstruct # files out there started using it when it was available, and # we're all about backwards compatibility.. try: memo_dict = self._memo['rel_path'] except KeyError: memo_dict = {} self._memo['rel_path'] = memo_dict else: try: return memo_dict[other] except KeyError: pass if self is other: result = '.' elif not other in self._path_elements: try: other_dir = other.get_dir() except AttributeError: result = str(other) else: if other_dir is None: result = other.name else: dir_rel_path = self.rel_path(other_dir) if dir_rel_path == '.': result = other.name else: result = dir_rel_path + OS_SEP + other.name else: i = self._path_elements.index(other) + 1 path_elems = ['..'] * (len(self._path_elements) - i) \ + [n.name for n in other._path_elements[i:]] result = OS_SEP.join(path_elems) memo_dict[other] = result return result
[ "def", "rel_path", "(", "self", ",", "other", ")", ":", "# This complicated and expensive method, which constructs relative", "# paths between arbitrary Node.FS objects, is no longer used", "# by SCons itself. It was introduced to store dependency paths", "# in .sconsign files relative to the target, but that ended up", "# being significantly inefficient.", "#", "# We're continuing to support the method because some SConstruct", "# files out there started using it when it was available, and", "# we're all about backwards compatibility..", "try", ":", "memo_dict", "=", "self", ".", "_memo", "[", "'rel_path'", "]", "except", "KeyError", ":", "memo_dict", "=", "{", "}", "self", ".", "_memo", "[", "'rel_path'", "]", "=", "memo_dict", "else", ":", "try", ":", "return", "memo_dict", "[", "other", "]", "except", "KeyError", ":", "pass", "if", "self", "is", "other", ":", "result", "=", "'.'", "elif", "not", "other", "in", "self", ".", "_path_elements", ":", "try", ":", "other_dir", "=", "other", ".", "get_dir", "(", ")", "except", "AttributeError", ":", "result", "=", "str", "(", "other", ")", "else", ":", "if", "other_dir", "is", "None", ":", "result", "=", "other", ".", "name", "else", ":", "dir_rel_path", "=", "self", ".", "rel_path", "(", "other_dir", ")", "if", "dir_rel_path", "==", "'.'", ":", "result", "=", "other", ".", "name", "else", ":", "result", "=", "dir_rel_path", "+", "OS_SEP", "+", "other", ".", "name", "else", ":", "i", "=", "self", ".", "_path_elements", ".", "index", "(", "other", ")", "+", "1", "path_elems", "=", "[", "'..'", "]", "*", "(", "len", "(", "self", ".", "_path_elements", ")", "-", "i", ")", "+", "[", "n", ".", "name", "for", "n", "in", "other", ".", "_path_elements", "[", "i", ":", "]", "]", "result", "=", "OS_SEP", ".", "join", "(", "path_elems", ")", "memo_dict", "[", "other", "]", "=", "result", "return", "result" ]
Return a path to "other" relative to this directory.
[ "Return", "a", "path", "to", "other", "relative", "to", "this", "directory", "." ]
python
train
pyviz/holoviews
holoviews/core/data/multipath.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/data/multipath.py#L172-L176
def select_paths(cls, dataset, selection): """ Allows selecting paths with usual NumPy slicing index. """ return [s[0] for s in np.array([{0: p} for p in dataset.data])[selection]]
[ "def", "select_paths", "(", "cls", ",", "dataset", ",", "selection", ")", ":", "return", "[", "s", "[", "0", "]", "for", "s", "in", "np", ".", "array", "(", "[", "{", "0", ":", "p", "}", "for", "p", "in", "dataset", ".", "data", "]", ")", "[", "selection", "]", "]" ]
Allows selecting paths with usual NumPy slicing index.
[ "Allows", "selecting", "paths", "with", "usual", "NumPy", "slicing", "index", "." ]
python
train
osrg/ryu
ryu/services/protocols/bgp/api/rtconf.py
https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/services/protocols/bgp/api/rtconf.py#L139-L144
def set_neighbor_in_filter(neigh_ip_address, filters): """Returns a neighbor in_filter for given ip address if exists.""" core = CORE_MANAGER.get_core_service() peer = core.peer_manager.get_by_addr(neigh_ip_address) peer.in_filters = filters return True
[ "def", "set_neighbor_in_filter", "(", "neigh_ip_address", ",", "filters", ")", ":", "core", "=", "CORE_MANAGER", ".", "get_core_service", "(", ")", "peer", "=", "core", ".", "peer_manager", ".", "get_by_addr", "(", "neigh_ip_address", ")", "peer", ".", "in_filters", "=", "filters", "return", "True" ]
Returns a neighbor in_filter for given ip address if exists.
[ "Returns", "a", "neighbor", "in_filter", "for", "given", "ip", "address", "if", "exists", "." ]
python
train
Dallinger/Dallinger
dallinger/nodes.py
https://github.com/Dallinger/Dallinger/blob/76ca8217c709989c116d0ebd8fca37bd22f591af/dallinger/nodes.py#L63-L66
def create_information(self): """Create new infos on demand.""" info = self._info_type()(origin=self, contents=self._contents()) return info
[ "def", "create_information", "(", "self", ")", ":", "info", "=", "self", ".", "_info_type", "(", ")", "(", "origin", "=", "self", ",", "contents", "=", "self", ".", "_contents", "(", ")", ")", "return", "info" ]
Create new infos on demand.
[ "Create", "new", "infos", "on", "demand", "." ]
python
train
blockstack-packages/blockstack-gpg
blockstack_gpg/gpg.py
https://github.com/blockstack-packages/blockstack-gpg/blob/e4d51e4e51678d9b946596ca9dec53e2d78c8710/blockstack_gpg/gpg.py#L105-L113
def get_gpg_home( appname, config_dir=None ): """ Get the GPG keyring directory for a particular application. Return the path. """ assert is_valid_appname(appname) config_dir = get_config_dir( config_dir ) path = os.path.join( config_dir, "gpgkeys", appname ) return path
[ "def", "get_gpg_home", "(", "appname", ",", "config_dir", "=", "None", ")", ":", "assert", "is_valid_appname", "(", "appname", ")", "config_dir", "=", "get_config_dir", "(", "config_dir", ")", "path", "=", "os", ".", "path", ".", "join", "(", "config_dir", ",", "\"gpgkeys\"", ",", "appname", ")", "return", "path" ]
Get the GPG keyring directory for a particular application. Return the path.
[ "Get", "the", "GPG", "keyring", "directory", "for", "a", "particular", "application", ".", "Return", "the", "path", "." ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_policer.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_policer.py#L121-L133
def police_priority_map_conform_map_pri7_conform(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") police_priority_map = ET.SubElement(config, "police-priority-map", xmlns="urn:brocade.com:mgmt:brocade-policer") name_key = ET.SubElement(police_priority_map, "name") name_key.text = kwargs.pop('name') conform = ET.SubElement(police_priority_map, "conform") map_pri7_conform = ET.SubElement(conform, "map-pri7-conform") map_pri7_conform.text = kwargs.pop('map_pri7_conform') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "police_priority_map_conform_map_pri7_conform", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "police_priority_map", "=", "ET", ".", "SubElement", "(", "config", ",", "\"police-priority-map\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-policer\"", ")", "name_key", "=", "ET", ".", "SubElement", "(", "police_priority_map", ",", "\"name\"", ")", "name_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'name'", ")", "conform", "=", "ET", ".", "SubElement", "(", "police_priority_map", ",", "\"conform\"", ")", "map_pri7_conform", "=", "ET", ".", "SubElement", "(", "conform", ",", "\"map-pri7-conform\"", ")", "map_pri7_conform", ".", "text", "=", "kwargs", ".", "pop", "(", "'map_pri7_conform'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
Unidata/MetPy
metpy/plots/_util.py
https://github.com/Unidata/MetPy/blob/16f68a94919b9a82dcf9cada2169cf039129e67b/metpy/plots/_util.py#L206-L282
def convert_gempak_color(c, style='psc'): """Convert GEMPAK color numbers into corresponding Matplotlib colors. Takes a sequence of GEMPAK color numbers and turns them into equivalent Matplotlib colors. Various GEMPAK quirks are respected, such as treating negative values as equivalent to 0. Parameters ---------- c : int or sequence of ints GEMPAK color number(s) style : str, optional The GEMPAK 'device' to use to interpret color numbers. May be 'psc' (the default; best for a white background) or 'xw' (best for a black background). Returns ------- List of strings of Matplotlib colors, or a single string if only one color requested. """ def normalize(x): """Transform input x to an int in range 0 to 31 consistent with GEMPAK color quirks.""" x = int(x) if x < 0 or x == 101: x = 0 else: x = x % 32 return x # Define GEMPAK colors (Matplotlib doesn't appear to like numbered variants) cols = ['white', # 0/32 'black', # 1 'red', # 2 'green', # 3 'blue', # 4 'yellow', # 5 'cyan', # 6 'magenta', # 7 '#CD6839', # 8 (sienna3) '#FF8247', # 9 (sienna1) '#FFA54F', # 10 (tan1) '#FFAEB9', # 11 (LightPink1) '#FF6A6A', # 12 (IndianRed1) '#EE2C2C', # 13 (firebrick2) '#8B0000', # 14 (red4) '#CD0000', # 15 (red3) '#EE4000', # 16 (OrangeRed2) '#FF7F00', # 17 (DarkOrange1) '#CD8500', # 18 (orange3) 'gold', # 19 '#EEEE00', # 20 (yellow2) 'chartreuse', # 21 '#00CD00', # 22 (green3) '#008B00', # 23 (green4) '#104E8B', # 24 (DodgerBlue4) 'DodgerBlue', # 25 '#00B2EE', # 26 (DeepSkyBlue2) '#00EEEE', # 27 (cyan2) '#8968CD', # 28 (MediumPurple3) '#912CEE', # 29 (purple2) '#8B008B', # 30 (magenta4) 'bisque'] # 31 if style != 'psc': if style == 'xw': cols[0] = 'black' cols[1] = 'bisque' cols[31] = 'white' else: raise ValueError('Unknown style parameter') try: c_list = list(c) res = [cols[normalize(x)] for x in c_list] except TypeError: res = cols[normalize(c)] return res
[ "def", "convert_gempak_color", "(", "c", ",", "style", "=", "'psc'", ")", ":", "def", "normalize", "(", "x", ")", ":", "\"\"\"Transform input x to an int in range 0 to 31 consistent with GEMPAK color quirks.\"\"\"", "x", "=", "int", "(", "x", ")", "if", "x", "<", "0", "or", "x", "==", "101", ":", "x", "=", "0", "else", ":", "x", "=", "x", "%", "32", "return", "x", "# Define GEMPAK colors (Matplotlib doesn't appear to like numbered variants)", "cols", "=", "[", "'white'", ",", "# 0/32", "'black'", ",", "# 1", "'red'", ",", "# 2", "'green'", ",", "# 3", "'blue'", ",", "# 4", "'yellow'", ",", "# 5", "'cyan'", ",", "# 6", "'magenta'", ",", "# 7", "'#CD6839'", ",", "# 8 (sienna3)", "'#FF8247'", ",", "# 9 (sienna1)", "'#FFA54F'", ",", "# 10 (tan1)", "'#FFAEB9'", ",", "# 11 (LightPink1)", "'#FF6A6A'", ",", "# 12 (IndianRed1)", "'#EE2C2C'", ",", "# 13 (firebrick2)", "'#8B0000'", ",", "# 14 (red4)", "'#CD0000'", ",", "# 15 (red3)", "'#EE4000'", ",", "# 16 (OrangeRed2)", "'#FF7F00'", ",", "# 17 (DarkOrange1)", "'#CD8500'", ",", "# 18 (orange3)", "'gold'", ",", "# 19", "'#EEEE00'", ",", "# 20 (yellow2)", "'chartreuse'", ",", "# 21", "'#00CD00'", ",", "# 22 (green3)", "'#008B00'", ",", "# 23 (green4)", "'#104E8B'", ",", "# 24 (DodgerBlue4)", "'DodgerBlue'", ",", "# 25", "'#00B2EE'", ",", "# 26 (DeepSkyBlue2)", "'#00EEEE'", ",", "# 27 (cyan2)", "'#8968CD'", ",", "# 28 (MediumPurple3)", "'#912CEE'", ",", "# 29 (purple2)", "'#8B008B'", ",", "# 30 (magenta4)", "'bisque'", "]", "# 31", "if", "style", "!=", "'psc'", ":", "if", "style", "==", "'xw'", ":", "cols", "[", "0", "]", "=", "'black'", "cols", "[", "1", "]", "=", "'bisque'", "cols", "[", "31", "]", "=", "'white'", "else", ":", "raise", "ValueError", "(", "'Unknown style parameter'", ")", "try", ":", "c_list", "=", "list", "(", "c", ")", "res", "=", "[", "cols", "[", "normalize", "(", "x", ")", "]", "for", "x", "in", "c_list", "]", "except", "TypeError", ":", "res", "=", "cols", "[", "normalize", "(", "c", ")", "]", "return", "res" ]
Convert GEMPAK color numbers into corresponding Matplotlib colors. Takes a sequence of GEMPAK color numbers and turns them into equivalent Matplotlib colors. Various GEMPAK quirks are respected, such as treating negative values as equivalent to 0. Parameters ---------- c : int or sequence of ints GEMPAK color number(s) style : str, optional The GEMPAK 'device' to use to interpret color numbers. May be 'psc' (the default; best for a white background) or 'xw' (best for a black background). Returns ------- List of strings of Matplotlib colors, or a single string if only one color requested.
[ "Convert", "GEMPAK", "color", "numbers", "into", "corresponding", "Matplotlib", "colors", "." ]
python
train
rossant/ipymd
ipymd/core/format_manager.py
https://github.com/rossant/ipymd/blob/d87c9ebc59d67fe78b0139ee00e0e5307682e303/ipymd/core/format_manager.py#L87-L103
def register_entrypoints(self): """Look through the `setup_tools` `entry_points` and load all of the formats. """ for spec in iter_entry_points(self.entry_point_group): format_properties = {"name": spec.name} try: format_properties.update(spec.load()) except (DistributionNotFound, ImportError) as err: self.log.info( "ipymd format {} could not be loaded: {}".format( spec.name, err)) continue self.register(**format_properties) return self
[ "def", "register_entrypoints", "(", "self", ")", ":", "for", "spec", "in", "iter_entry_points", "(", "self", ".", "entry_point_group", ")", ":", "format_properties", "=", "{", "\"name\"", ":", "spec", ".", "name", "}", "try", ":", "format_properties", ".", "update", "(", "spec", ".", "load", "(", ")", ")", "except", "(", "DistributionNotFound", ",", "ImportError", ")", "as", "err", ":", "self", ".", "log", ".", "info", "(", "\"ipymd format {} could not be loaded: {}\"", ".", "format", "(", "spec", ".", "name", ",", "err", ")", ")", "continue", "self", ".", "register", "(", "*", "*", "format_properties", ")", "return", "self" ]
Look through the `setup_tools` `entry_points` and load all of the formats.
[ "Look", "through", "the", "setup_tools", "entry_points", "and", "load", "all", "of", "the", "formats", "." ]
python
train
saltstack/salt
salt/modules/vboxmanage.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/vboxmanage.py#L139-L152
def start(name): ''' Start a VM CLI Example: .. code-block:: bash salt '*' vboxmanage.start my_vm ''' ret = {} cmd = '{0} startvm {1}'.format(vboxcmd(), name) ret = salt.modules.cmdmod.run(cmd).splitlines() return ret
[ "def", "start", "(", "name", ")", ":", "ret", "=", "{", "}", "cmd", "=", "'{0} startvm {1}'", ".", "format", "(", "vboxcmd", "(", ")", ",", "name", ")", "ret", "=", "salt", ".", "modules", ".", "cmdmod", ".", "run", "(", "cmd", ")", ".", "splitlines", "(", ")", "return", "ret" ]
Start a VM CLI Example: .. code-block:: bash salt '*' vboxmanage.start my_vm
[ "Start", "a", "VM" ]
python
train
svven/summary
summarize.py
https://github.com/svven/summary/blob/3a6c43d2da08a7452f6b76d1813aa70ba36b8a54/summarize.py#L6-L23
def render(template, **kwargs): """ Renders the HTML containing provided summaries. The summary has to be an instance of summary.Summary, or at least contain similar properties: title, image, url, description and collections: titles, images, descriptions. """ import jinja2 import os.path as path searchpath = path.join(path.dirname(__file__), "templates") loader = jinja2.FileSystemLoader(searchpath=searchpath) env = jinja2.Environment(loader=loader) temp = env.get_template(template) return temp.render(**kwargs)
[ "def", "render", "(", "template", ",", "*", "*", "kwargs", ")", ":", "import", "jinja2", "import", "os", ".", "path", "as", "path", "searchpath", "=", "path", ".", "join", "(", "path", ".", "dirname", "(", "__file__", ")", ",", "\"templates\"", ")", "loader", "=", "jinja2", ".", "FileSystemLoader", "(", "searchpath", "=", "searchpath", ")", "env", "=", "jinja2", ".", "Environment", "(", "loader", "=", "loader", ")", "temp", "=", "env", ".", "get_template", "(", "template", ")", "return", "temp", ".", "render", "(", "*", "*", "kwargs", ")" ]
Renders the HTML containing provided summaries. The summary has to be an instance of summary.Summary, or at least contain similar properties: title, image, url, description and collections: titles, images, descriptions.
[ "Renders", "the", "HTML", "containing", "provided", "summaries", ".", "The", "summary", "has", "to", "be", "an", "instance", "of", "summary", ".", "Summary", "or", "at", "least", "contain", "similar", "properties", ":", "title", "image", "url", "description", "and", "collections", ":", "titles", "images", "descriptions", "." ]
python
train
mdickinson/bigfloat
bigfloat/core.py
https://github.com/mdickinson/bigfloat/blob/e5fdd1048615191ed32a2b7460e14b3b3ff24662/bigfloat/core.py#L1156-L1163
def divmod(x, y, context=None): """ Return the pair (floordiv(x, y, context), mod(x, y, context)). Semantics for negative inputs match those of Python's divmod function. """ return floordiv(x, y, context=context), mod(x, y, context=context)
[ "def", "divmod", "(", "x", ",", "y", ",", "context", "=", "None", ")", ":", "return", "floordiv", "(", "x", ",", "y", ",", "context", "=", "context", ")", ",", "mod", "(", "x", ",", "y", ",", "context", "=", "context", ")" ]
Return the pair (floordiv(x, y, context), mod(x, y, context)). Semantics for negative inputs match those of Python's divmod function.
[ "Return", "the", "pair", "(", "floordiv", "(", "x", "y", "context", ")", "mod", "(", "x", "y", "context", "))", "." ]
python
train
SHDShim/pytheos
pytheos/eqn_jamieson.py
https://github.com/SHDShim/pytheos/blob/be079624405e92fbec60c5ead253eb5917e55237/pytheos/eqn_jamieson.py#L30-L59
def jamieson_pst(v, v0, c0, s, gamma0, q, theta0, n, z, mass, c_v, three_r=3. * constants.R, t_ref=300.): """ calculate static pressure at 300 K from Hugoniot data using the constq formulation :param v: unit-cell volume in A^3 :param v0: unit-cell volume in A^3 at 1 bar :param c0: velocity at 1 bar in km/s :param s: slope of the velocity change :param gamma0: Gruneisen parameter at 1 bar :param q: logarithmic derivative of Gruneisen parameter :param theta0: Debye temperature in K :param n: number of elements in a chemical formula :param z: number of formula unit in a unit cell :param mass: molar mass in gram :param c_v: heat capacity :param three_r: 3 times gas constant. Jamieson modified this value to compensate for mismatches :param t_ref: reference temperature, 300 K :return: static pressure in GPa :note: 2017/05/18 I am unsure if this is actually being used in pytheos """ rho = mass / vol_uc2mol(v, z) * 1.e-6 rho0 = mass / vol_uc2mol(v0, z) * 1.e-6 p_h = hugoniot_p(rho, rho0, c0, s) p_th_h = jamieson_pth(v, v0, c0, s, gamma0, q, theta0, n, z, mass, c_v, three_r=three_r, t_ref=t_ref) p_st = p_h - p_th_h return p_st
[ "def", "jamieson_pst", "(", "v", ",", "v0", ",", "c0", ",", "s", ",", "gamma0", ",", "q", ",", "theta0", ",", "n", ",", "z", ",", "mass", ",", "c_v", ",", "three_r", "=", "3.", "*", "constants", ".", "R", ",", "t_ref", "=", "300.", ")", ":", "rho", "=", "mass", "/", "vol_uc2mol", "(", "v", ",", "z", ")", "*", "1.e-6", "rho0", "=", "mass", "/", "vol_uc2mol", "(", "v0", ",", "z", ")", "*", "1.e-6", "p_h", "=", "hugoniot_p", "(", "rho", ",", "rho0", ",", "c0", ",", "s", ")", "p_th_h", "=", "jamieson_pth", "(", "v", ",", "v0", ",", "c0", ",", "s", ",", "gamma0", ",", "q", ",", "theta0", ",", "n", ",", "z", ",", "mass", ",", "c_v", ",", "three_r", "=", "three_r", ",", "t_ref", "=", "t_ref", ")", "p_st", "=", "p_h", "-", "p_th_h", "return", "p_st" ]
calculate static pressure at 300 K from Hugoniot data using the constq formulation :param v: unit-cell volume in A^3 :param v0: unit-cell volume in A^3 at 1 bar :param c0: velocity at 1 bar in km/s :param s: slope of the velocity change :param gamma0: Gruneisen parameter at 1 bar :param q: logarithmic derivative of Gruneisen parameter :param theta0: Debye temperature in K :param n: number of elements in a chemical formula :param z: number of formula unit in a unit cell :param mass: molar mass in gram :param c_v: heat capacity :param three_r: 3 times gas constant. Jamieson modified this value to compensate for mismatches :param t_ref: reference temperature, 300 K :return: static pressure in GPa :note: 2017/05/18 I am unsure if this is actually being used in pytheos
[ "calculate", "static", "pressure", "at", "300", "K", "from", "Hugoniot", "data", "using", "the", "constq", "formulation" ]
python
train
pytorch/ignite
ignite/engine/engine.py
https://github.com/pytorch/ignite/blob/a96bd07cb58822cfb39fd81765135712f1db41ca/ignite/engine/engine.py#L326-L361
def run(self, data, max_epochs=1): """Runs the process_function over the passed data. Args: data (Iterable): Collection of batches allowing repeated iteration (e.g., list or `DataLoader`). max_epochs (int, optional): max epochs to run for (default: 1). Returns: State: output state. """ self.state = State(dataloader=data, epoch=0, max_epochs=max_epochs, metrics={}) try: self._logger.info("Engine run starting with max_epochs={}.".format(max_epochs)) start_time = time.time() self._fire_event(Events.STARTED) while self.state.epoch < max_epochs and not self.should_terminate: self.state.epoch += 1 self._fire_event(Events.EPOCH_STARTED) hours, mins, secs = self._run_once_on_dataset() self._logger.info("Epoch[%s] Complete. Time taken: %02d:%02d:%02d", self.state.epoch, hours, mins, secs) if self.should_terminate: break self._fire_event(Events.EPOCH_COMPLETED) self._fire_event(Events.COMPLETED) time_taken = time.time() - start_time hours, mins, secs = _to_hours_mins_secs(time_taken) self._logger.info("Engine run complete. Time taken %02d:%02d:%02d" % (hours, mins, secs)) except BaseException as e: self._logger.error("Engine run is terminating due to exception: %s.", str(e)) self._handle_exception(e) return self.state
[ "def", "run", "(", "self", ",", "data", ",", "max_epochs", "=", "1", ")", ":", "self", ".", "state", "=", "State", "(", "dataloader", "=", "data", ",", "epoch", "=", "0", ",", "max_epochs", "=", "max_epochs", ",", "metrics", "=", "{", "}", ")", "try", ":", "self", ".", "_logger", ".", "info", "(", "\"Engine run starting with max_epochs={}.\"", ".", "format", "(", "max_epochs", ")", ")", "start_time", "=", "time", ".", "time", "(", ")", "self", ".", "_fire_event", "(", "Events", ".", "STARTED", ")", "while", "self", ".", "state", ".", "epoch", "<", "max_epochs", "and", "not", "self", ".", "should_terminate", ":", "self", ".", "state", ".", "epoch", "+=", "1", "self", ".", "_fire_event", "(", "Events", ".", "EPOCH_STARTED", ")", "hours", ",", "mins", ",", "secs", "=", "self", ".", "_run_once_on_dataset", "(", ")", "self", ".", "_logger", ".", "info", "(", "\"Epoch[%s] Complete. Time taken: %02d:%02d:%02d\"", ",", "self", ".", "state", ".", "epoch", ",", "hours", ",", "mins", ",", "secs", ")", "if", "self", ".", "should_terminate", ":", "break", "self", ".", "_fire_event", "(", "Events", ".", "EPOCH_COMPLETED", ")", "self", ".", "_fire_event", "(", "Events", ".", "COMPLETED", ")", "time_taken", "=", "time", ".", "time", "(", ")", "-", "start_time", "hours", ",", "mins", ",", "secs", "=", "_to_hours_mins_secs", "(", "time_taken", ")", "self", ".", "_logger", ".", "info", "(", "\"Engine run complete. Time taken %02d:%02d:%02d\"", "%", "(", "hours", ",", "mins", ",", "secs", ")", ")", "except", "BaseException", "as", "e", ":", "self", ".", "_logger", ".", "error", "(", "\"Engine run is terminating due to exception: %s.\"", ",", "str", "(", "e", ")", ")", "self", ".", "_handle_exception", "(", "e", ")", "return", "self", ".", "state" ]
Runs the process_function over the passed data. Args: data (Iterable): Collection of batches allowing repeated iteration (e.g., list or `DataLoader`). max_epochs (int, optional): max epochs to run for (default: 1). Returns: State: output state.
[ "Runs", "the", "process_function", "over", "the", "passed", "data", "." ]
python
train
readbeyond/aeneas
aeneas/executejob.py
https://github.com/readbeyond/aeneas/blob/9d95535ad63eef4a98530cfdff033b8c35315ee1/aeneas/executejob.py#L298-L316
def clean(self, remove_working_directory=True): """ Remove the temporary directory. If ``remove_working_directory`` is ``True`` remove the working directory as well, otherwise just remove the temporary directory. :param bool remove_working_directory: if ``True``, remove the working directory as well """ if remove_working_directory is not None: self.log(u"Removing working directory... ") gf.delete_directory(self.working_directory) self.working_directory = None self.log(u"Removing working directory... done") self.log(u"Removing temporary directory... ") gf.delete_directory(self.tmp_directory) self.tmp_directory = None self.log(u"Removing temporary directory... done")
[ "def", "clean", "(", "self", ",", "remove_working_directory", "=", "True", ")", ":", "if", "remove_working_directory", "is", "not", "None", ":", "self", ".", "log", "(", "u\"Removing working directory... \"", ")", "gf", ".", "delete_directory", "(", "self", ".", "working_directory", ")", "self", ".", "working_directory", "=", "None", "self", ".", "log", "(", "u\"Removing working directory... done\"", ")", "self", ".", "log", "(", "u\"Removing temporary directory... \"", ")", "gf", ".", "delete_directory", "(", "self", ".", "tmp_directory", ")", "self", ".", "tmp_directory", "=", "None", "self", ".", "log", "(", "u\"Removing temporary directory... done\"", ")" ]
Remove the temporary directory. If ``remove_working_directory`` is ``True`` remove the working directory as well, otherwise just remove the temporary directory. :param bool remove_working_directory: if ``True``, remove the working directory as well
[ "Remove", "the", "temporary", "directory", ".", "If", "remove_working_directory", "is", "True", "remove", "the", "working", "directory", "as", "well", "otherwise", "just", "remove", "the", "temporary", "directory", "." ]
python
train
jaraco/jaraco.ui
jaraco/ui/progress.py
https://github.com/jaraco/jaraco.ui/blob/10e844c03f3afb3d37bd5d727ba9334af2547fcf/jaraco/ui/progress.py#L135-L153
def countdown(template, duration=datetime.timedelta(seconds=5)): """ Do a countdown for duration, printing the template (which may accept one positional argument). Template should be something like ``countdown complete in {} seconds.`` """ now = datetime.datetime.now() deadline = now + duration remaining = deadline - datetime.datetime.now() while remaining: remaining = deadline - datetime.datetime.now() remaining = max(datetime.timedelta(), remaining) msg = template.format(remaining.total_seconds()) print(msg, end=' ' * 10) sys.stdout.flush() time.sleep(.1) print('\b' * 80, end='') sys.stdout.flush() print()
[ "def", "countdown", "(", "template", ",", "duration", "=", "datetime", ".", "timedelta", "(", "seconds", "=", "5", ")", ")", ":", "now", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "deadline", "=", "now", "+", "duration", "remaining", "=", "deadline", "-", "datetime", ".", "datetime", ".", "now", "(", ")", "while", "remaining", ":", "remaining", "=", "deadline", "-", "datetime", ".", "datetime", ".", "now", "(", ")", "remaining", "=", "max", "(", "datetime", ".", "timedelta", "(", ")", ",", "remaining", ")", "msg", "=", "template", ".", "format", "(", "remaining", ".", "total_seconds", "(", ")", ")", "print", "(", "msg", ",", "end", "=", "' '", "*", "10", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "time", ".", "sleep", "(", ".1", ")", "print", "(", "'\\b'", "*", "80", ",", "end", "=", "''", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "print", "(", ")" ]
Do a countdown for duration, printing the template (which may accept one positional argument). Template should be something like ``countdown complete in {} seconds.``
[ "Do", "a", "countdown", "for", "duration", "printing", "the", "template", "(", "which", "may", "accept", "one", "positional", "argument", ")", ".", "Template", "should", "be", "something", "like", "countdown", "complete", "in", "{}", "seconds", "." ]
python
train
ThreatConnect-Inc/tcex
tcex/tcex_ti/tcex_ti_tc_request.py
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_ti/tcex_ti_tc_request.py#L49-L63
def delete(self, main_type, sub_type, unique_id, owner=None): """ Deletes the Indicator/Group/Victim or Security Label Args: main_type: sub_type: unique_id: owner: """ params = {'owner': owner} if owner else {} if not sub_type: url = '/v2/{}/{}'.format(main_type, unique_id) else: url = '/v2/{}/{}/{}'.format(main_type, sub_type, unique_id) return self.tcex.session.delete(url, params=params)
[ "def", "delete", "(", "self", ",", "main_type", ",", "sub_type", ",", "unique_id", ",", "owner", "=", "None", ")", ":", "params", "=", "{", "'owner'", ":", "owner", "}", "if", "owner", "else", "{", "}", "if", "not", "sub_type", ":", "url", "=", "'/v2/{}/{}'", ".", "format", "(", "main_type", ",", "unique_id", ")", "else", ":", "url", "=", "'/v2/{}/{}/{}'", ".", "format", "(", "main_type", ",", "sub_type", ",", "unique_id", ")", "return", "self", ".", "tcex", ".", "session", ".", "delete", "(", "url", ",", "params", "=", "params", ")" ]
Deletes the Indicator/Group/Victim or Security Label Args: main_type: sub_type: unique_id: owner:
[ "Deletes", "the", "Indicator", "/", "Group", "/", "Victim", "or", "Security", "Label", "Args", ":", "main_type", ":", "sub_type", ":", "unique_id", ":", "owner", ":" ]
python
train
xtuml/pyxtuml
xtuml/persist.py
https://github.com/xtuml/pyxtuml/blob/7dd9343b9a0191d1db1887ab9288d0a026608d9a/xtuml/persist.py#L32-L57
def serialize_value(value, ty): ''' Serialize a value from an xtuml metamodel instance. ''' ty = ty.upper() null_value = { 'BOOLEAN' : False, 'INTEGER' : 0, 'REAL' : 0.0, 'STRING' : '', 'UNIQUE_ID' : 0 } transfer_fn = { 'BOOLEAN' : lambda v: '%d' % int(v), 'INTEGER' : lambda v: '%d' % v, 'REAL' : lambda v: '%f' % v, 'STRING' : lambda v: "'%s'" % v.replace("'", "''"), 'UNIQUE_ID' : lambda v: '"%s"' % uuid.UUID(int=v) } if value is None: value = null_value[ty] return transfer_fn[ty](value)
[ "def", "serialize_value", "(", "value", ",", "ty", ")", ":", "ty", "=", "ty", ".", "upper", "(", ")", "null_value", "=", "{", "'BOOLEAN'", ":", "False", ",", "'INTEGER'", ":", "0", ",", "'REAL'", ":", "0.0", ",", "'STRING'", ":", "''", ",", "'UNIQUE_ID'", ":", "0", "}", "transfer_fn", "=", "{", "'BOOLEAN'", ":", "lambda", "v", ":", "'%d'", "%", "int", "(", "v", ")", ",", "'INTEGER'", ":", "lambda", "v", ":", "'%d'", "%", "v", ",", "'REAL'", ":", "lambda", "v", ":", "'%f'", "%", "v", ",", "'STRING'", ":", "lambda", "v", ":", "\"'%s'\"", "%", "v", ".", "replace", "(", "\"'\"", ",", "\"''\"", ")", ",", "'UNIQUE_ID'", ":", "lambda", "v", ":", "'\"%s\"'", "%", "uuid", ".", "UUID", "(", "int", "=", "v", ")", "}", "if", "value", "is", "None", ":", "value", "=", "null_value", "[", "ty", "]", "return", "transfer_fn", "[", "ty", "]", "(", "value", ")" ]
Serialize a value from an xtuml metamodel instance.
[ "Serialize", "a", "value", "from", "an", "xtuml", "metamodel", "instance", "." ]
python
test
CityOfZion/neo-python-core
neocore/Cryptography/MerkleTree.py
https://github.com/CityOfZion/neo-python-core/blob/786c02cc2f41712d70b1f064ae3d67f86167107f/neocore/Cryptography/MerkleTree.py#L70-L101
def __Build(leaves): """ Build the merkle tree. Args: leaves (list): items are of type MerkleTreeNode. Returns: MerkleTreeNode: the root node. """ if len(leaves) < 1: raise Exception('Leaves must have length') if len(leaves) == 1: return leaves[0] num_parents = int((len(leaves) + 1) / 2) parents = [MerkleTreeNode() for i in range(0, num_parents)] for i in range(0, num_parents): node = parents[i] node.LeftChild = leaves[i * 2] leaves[i * 2].Parent = node if (i * 2 + 1 == len(leaves)): node.RightChild = node.LeftChild else: node.RightChild = leaves[i * 2 + 1] leaves[i * 2 + 1].Parent = node hasharray = bytearray(node.LeftChild.Hash.ToArray() + node.RightChild.Hash.ToArray()) node.Hash = UInt256(data=Crypto.Hash256(hasharray)) return MerkleTree.__Build(parents)
[ "def", "__Build", "(", "leaves", ")", ":", "if", "len", "(", "leaves", ")", "<", "1", ":", "raise", "Exception", "(", "'Leaves must have length'", ")", "if", "len", "(", "leaves", ")", "==", "1", ":", "return", "leaves", "[", "0", "]", "num_parents", "=", "int", "(", "(", "len", "(", "leaves", ")", "+", "1", ")", "/", "2", ")", "parents", "=", "[", "MerkleTreeNode", "(", ")", "for", "i", "in", "range", "(", "0", ",", "num_parents", ")", "]", "for", "i", "in", "range", "(", "0", ",", "num_parents", ")", ":", "node", "=", "parents", "[", "i", "]", "node", ".", "LeftChild", "=", "leaves", "[", "i", "*", "2", "]", "leaves", "[", "i", "*", "2", "]", ".", "Parent", "=", "node", "if", "(", "i", "*", "2", "+", "1", "==", "len", "(", "leaves", ")", ")", ":", "node", ".", "RightChild", "=", "node", ".", "LeftChild", "else", ":", "node", ".", "RightChild", "=", "leaves", "[", "i", "*", "2", "+", "1", "]", "leaves", "[", "i", "*", "2", "+", "1", "]", ".", "Parent", "=", "node", "hasharray", "=", "bytearray", "(", "node", ".", "LeftChild", ".", "Hash", ".", "ToArray", "(", ")", "+", "node", ".", "RightChild", ".", "Hash", ".", "ToArray", "(", ")", ")", "node", ".", "Hash", "=", "UInt256", "(", "data", "=", "Crypto", ".", "Hash256", "(", "hasharray", ")", ")", "return", "MerkleTree", ".", "__Build", "(", "parents", ")" ]
Build the merkle tree. Args: leaves (list): items are of type MerkleTreeNode. Returns: MerkleTreeNode: the root node.
[ "Build", "the", "merkle", "tree", "." ]
python
train
mithro/python-datetime-tz
datetime_tz/__init__.py
https://github.com/mithro/python-datetime-tz/blob/3c682d003f8b28e39f0c096773e471aeb68e6bbb/datetime_tz/__init__.py#L714-L718
def utcnow(cls): """Return a new datetime representing UTC day and time.""" obj = datetime.datetime.utcnow() obj = cls(obj, tzinfo=pytz.utc) return obj
[ "def", "utcnow", "(", "cls", ")", ":", "obj", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", "obj", "=", "cls", "(", "obj", ",", "tzinfo", "=", "pytz", ".", "utc", ")", "return", "obj" ]
Return a new datetime representing UTC day and time.
[ "Return", "a", "new", "datetime", "representing", "UTC", "day", "and", "time", "." ]
python
train
wbond/asn1crypto
asn1crypto/_iri.py
https://github.com/wbond/asn1crypto/blob/ecda20176f55d37021cbca1f6da9083a8e491197/asn1crypto/_iri.py#L37-L117
def iri_to_uri(value, normalize=False): """ Encodes a unicode IRI into an ASCII byte string URI :param value: A unicode string of an IRI :param normalize: A bool that controls URI normalization :return: A byte string of the ASCII-encoded URI """ if not isinstance(value, str_cls): raise TypeError(unwrap( ''' value must be a unicode string, not %s ''', type_name(value) )) scheme = None # Python 2.6 doesn't split properly is the URL doesn't start with http:// or https:// if sys.version_info < (2, 7) and not value.startswith('http://') and not value.startswith('https://'): real_prefix = None prefix_match = re.match('^[^:]*://', value) if prefix_match: real_prefix = prefix_match.group(0) value = 'http://' + value[len(real_prefix):] parsed = urlsplit(value) if real_prefix: value = real_prefix + value[7:] scheme = _urlquote(real_prefix[:-3]) else: parsed = urlsplit(value) if scheme is None: scheme = _urlquote(parsed.scheme) hostname = parsed.hostname if hostname is not None: hostname = hostname.encode('idna') # RFC 3986 allows userinfo to contain sub-delims username = _urlquote(parsed.username, safe='!$&\'()*+,;=') password = _urlquote(parsed.password, safe='!$&\'()*+,;=') port = parsed.port if port is not None: port = str_cls(port).encode('ascii') netloc = b'' if username is not None: netloc += username if password: netloc += b':' + password netloc += b'@' if hostname is not None: netloc += hostname if port is not None: default_http = scheme == b'http' and port == b'80' default_https = scheme == b'https' and port == b'443' if not normalize or (not default_http and not default_https): netloc += b':' + port # RFC 3986 allows a path to contain sub-delims, plus "@" and ":" path = _urlquote(parsed.path, safe='/!$&\'()*+,;=@:') # RFC 3986 allows the query to contain sub-delims, plus "@", ":" , "/" and "?" query = _urlquote(parsed.query, safe='/?!$&\'()*+,;=@:') # RFC 3986 allows the fragment to contain sub-delims, plus "@", ":" , "/" and "?" fragment = _urlquote(parsed.fragment, safe='/?!$&\'()*+,;=@:') if normalize and query is None and fragment is None and path == b'/': path = None # Python 2.7 compat if path is None: path = '' output = urlunsplit((scheme, netloc, path, query, fragment)) if isinstance(output, str_cls): output = output.encode('latin1') return output
[ "def", "iri_to_uri", "(", "value", ",", "normalize", "=", "False", ")", ":", "if", "not", "isinstance", "(", "value", ",", "str_cls", ")", ":", "raise", "TypeError", "(", "unwrap", "(", "'''\n value must be a unicode string, not %s\n '''", ",", "type_name", "(", "value", ")", ")", ")", "scheme", "=", "None", "# Python 2.6 doesn't split properly is the URL doesn't start with http:// or https://", "if", "sys", ".", "version_info", "<", "(", "2", ",", "7", ")", "and", "not", "value", ".", "startswith", "(", "'http://'", ")", "and", "not", "value", ".", "startswith", "(", "'https://'", ")", ":", "real_prefix", "=", "None", "prefix_match", "=", "re", ".", "match", "(", "'^[^:]*://'", ",", "value", ")", "if", "prefix_match", ":", "real_prefix", "=", "prefix_match", ".", "group", "(", "0", ")", "value", "=", "'http://'", "+", "value", "[", "len", "(", "real_prefix", ")", ":", "]", "parsed", "=", "urlsplit", "(", "value", ")", "if", "real_prefix", ":", "value", "=", "real_prefix", "+", "value", "[", "7", ":", "]", "scheme", "=", "_urlquote", "(", "real_prefix", "[", ":", "-", "3", "]", ")", "else", ":", "parsed", "=", "urlsplit", "(", "value", ")", "if", "scheme", "is", "None", ":", "scheme", "=", "_urlquote", "(", "parsed", ".", "scheme", ")", "hostname", "=", "parsed", ".", "hostname", "if", "hostname", "is", "not", "None", ":", "hostname", "=", "hostname", ".", "encode", "(", "'idna'", ")", "# RFC 3986 allows userinfo to contain sub-delims", "username", "=", "_urlquote", "(", "parsed", ".", "username", ",", "safe", "=", "'!$&\\'()*+,;='", ")", "password", "=", "_urlquote", "(", "parsed", ".", "password", ",", "safe", "=", "'!$&\\'()*+,;='", ")", "port", "=", "parsed", ".", "port", "if", "port", "is", "not", "None", ":", "port", "=", "str_cls", "(", "port", ")", ".", "encode", "(", "'ascii'", ")", "netloc", "=", "b''", "if", "username", "is", "not", "None", ":", "netloc", "+=", "username", "if", "password", ":", "netloc", "+=", "b':'", "+", "password", "netloc", "+=", "b'@'", "if", "hostname", "is", "not", "None", ":", "netloc", "+=", "hostname", "if", "port", "is", "not", "None", ":", "default_http", "=", "scheme", "==", "b'http'", "and", "port", "==", "b'80'", "default_https", "=", "scheme", "==", "b'https'", "and", "port", "==", "b'443'", "if", "not", "normalize", "or", "(", "not", "default_http", "and", "not", "default_https", ")", ":", "netloc", "+=", "b':'", "+", "port", "# RFC 3986 allows a path to contain sub-delims, plus \"@\" and \":\"", "path", "=", "_urlquote", "(", "parsed", ".", "path", ",", "safe", "=", "'/!$&\\'()*+,;=@:'", ")", "# RFC 3986 allows the query to contain sub-delims, plus \"@\", \":\" , \"/\" and \"?\"", "query", "=", "_urlquote", "(", "parsed", ".", "query", ",", "safe", "=", "'/?!$&\\'()*+,;=@:'", ")", "# RFC 3986 allows the fragment to contain sub-delims, plus \"@\", \":\" , \"/\" and \"?\"", "fragment", "=", "_urlquote", "(", "parsed", ".", "fragment", ",", "safe", "=", "'/?!$&\\'()*+,;=@:'", ")", "if", "normalize", "and", "query", "is", "None", "and", "fragment", "is", "None", "and", "path", "==", "b'/'", ":", "path", "=", "None", "# Python 2.7 compat", "if", "path", "is", "None", ":", "path", "=", "''", "output", "=", "urlunsplit", "(", "(", "scheme", ",", "netloc", ",", "path", ",", "query", ",", "fragment", ")", ")", "if", "isinstance", "(", "output", ",", "str_cls", ")", ":", "output", "=", "output", ".", "encode", "(", "'latin1'", ")", "return", "output" ]
Encodes a unicode IRI into an ASCII byte string URI :param value: A unicode string of an IRI :param normalize: A bool that controls URI normalization :return: A byte string of the ASCII-encoded URI
[ "Encodes", "a", "unicode", "IRI", "into", "an", "ASCII", "byte", "string", "URI" ]
python
train
llllllllll/codetransformer
codetransformer/utils/pretty.py
https://github.com/llllllllll/codetransformer/blob/c5f551e915df45adc7da7e0b1b635f0cc6a1bb27/codetransformer/utils/pretty.py#L275-L304
def display(text, mode='exec', file=None): """ Show `text`, rendered as AST and as Bytecode. Parameters ---------- text : str Text of Python code to render. mode : {'exec', 'eval'}, optional Mode for `ast.parse` and `compile`. Default is 'exec'. file : None or file-like object, optional File to use to print output. If the default of `None` is passed, we use sys.stdout. """ if file is None: file = sys.stdout ast_section = StringIO() a(text, mode=mode, file=ast_section) code_section = StringIO() d(text, mode=mode, file=code_section) rendered = _DISPLAY_TEMPLATE.format( text=text, ast=ast_section.getvalue(), code=code_section.getvalue(), ) print(rendered, file=file)
[ "def", "display", "(", "text", ",", "mode", "=", "'exec'", ",", "file", "=", "None", ")", ":", "if", "file", "is", "None", ":", "file", "=", "sys", ".", "stdout", "ast_section", "=", "StringIO", "(", ")", "a", "(", "text", ",", "mode", "=", "mode", ",", "file", "=", "ast_section", ")", "code_section", "=", "StringIO", "(", ")", "d", "(", "text", ",", "mode", "=", "mode", ",", "file", "=", "code_section", ")", "rendered", "=", "_DISPLAY_TEMPLATE", ".", "format", "(", "text", "=", "text", ",", "ast", "=", "ast_section", ".", "getvalue", "(", ")", ",", "code", "=", "code_section", ".", "getvalue", "(", ")", ",", ")", "print", "(", "rendered", ",", "file", "=", "file", ")" ]
Show `text`, rendered as AST and as Bytecode. Parameters ---------- text : str Text of Python code to render. mode : {'exec', 'eval'}, optional Mode for `ast.parse` and `compile`. Default is 'exec'. file : None or file-like object, optional File to use to print output. If the default of `None` is passed, we use sys.stdout.
[ "Show", "text", "rendered", "as", "AST", "and", "as", "Bytecode", "." ]
python
train
PlaidWeb/Pushl
pushl/entries.py
https://github.com/PlaidWeb/Pushl/blob/5ea92275c37a6c1989e3d5f53e26c6e0ebfb9a8c/pushl/entries.py#L99-L105
def get_targets(self, config): """ Given an Entry object, return all of the outgoing links. """ return {urllib.parse.urljoin(self.url, attrs['href']) for attrs in self._targets if self._check_rel(attrs, config.rel_whitelist, config.rel_blacklist) and self._domain_differs(attrs['href'])}
[ "def", "get_targets", "(", "self", ",", "config", ")", ":", "return", "{", "urllib", ".", "parse", ".", "urljoin", "(", "self", ".", "url", ",", "attrs", "[", "'href'", "]", ")", "for", "attrs", "in", "self", ".", "_targets", "if", "self", ".", "_check_rel", "(", "attrs", ",", "config", ".", "rel_whitelist", ",", "config", ".", "rel_blacklist", ")", "and", "self", ".", "_domain_differs", "(", "attrs", "[", "'href'", "]", ")", "}" ]
Given an Entry object, return all of the outgoing links.
[ "Given", "an", "Entry", "object", "return", "all", "of", "the", "outgoing", "links", "." ]
python
train
saltstack/salt
salt/cloud/clouds/gce.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/gce.py#L877-L909
def show_subnetwork(kwargs=None, call=None): ''' ... versionadded:: 2017.7.0 Show details of an existing GCE Subnetwork. Must specify name and region. CLI Example: .. code-block:: bash salt-cloud -f show_subnetwork gce name=mysubnet region=us-west1 ''' if call != 'function': raise SaltCloudSystemExit( 'The show_subnetwork function must be called with -f or --function.' ) if not kwargs or 'name' not in kwargs: log.error( 'Must specify name of subnet.' ) return False if 'region' not in kwargs: log.error( 'Must specify region of subnet.' ) return False name = kwargs['name'] region = kwargs['region'] conn = get_conn() return _expand_item(conn.ex_get_subnetwork(name, region))
[ "def", "show_subnetwork", "(", "kwargs", "=", "None", ",", "call", "=", "None", ")", ":", "if", "call", "!=", "'function'", ":", "raise", "SaltCloudSystemExit", "(", "'The show_subnetwork function must be called with -f or --function.'", ")", "if", "not", "kwargs", "or", "'name'", "not", "in", "kwargs", ":", "log", ".", "error", "(", "'Must specify name of subnet.'", ")", "return", "False", "if", "'region'", "not", "in", "kwargs", ":", "log", ".", "error", "(", "'Must specify region of subnet.'", ")", "return", "False", "name", "=", "kwargs", "[", "'name'", "]", "region", "=", "kwargs", "[", "'region'", "]", "conn", "=", "get_conn", "(", ")", "return", "_expand_item", "(", "conn", ".", "ex_get_subnetwork", "(", "name", ",", "region", ")", ")" ]
... versionadded:: 2017.7.0 Show details of an existing GCE Subnetwork. Must specify name and region. CLI Example: .. code-block:: bash salt-cloud -f show_subnetwork gce name=mysubnet region=us-west1
[ "...", "versionadded", "::", "2017", ".", "7", ".", "0", "Show", "details", "of", "an", "existing", "GCE", "Subnetwork", ".", "Must", "specify", "name", "and", "region", "." ]
python
train
chimera0/accel-brain-code
Reinforcement-Learning/pyqlearning/annealingmodel/costfunctionable/greedy_q_learning_cost.py
https://github.com/chimera0/accel-brain-code/blob/03661f6f544bed656269fcd4b3c23c9061629daa/Reinforcement-Learning/pyqlearning/annealingmodel/costfunctionable/greedy_q_learning_cost.py#L35-L59
def compute(self, x): ''' Compute cost. Args: x: `np.ndarray` of explanatory variables. Returns: cost ''' q_learning = copy(self.__greedy_q_learning) q_learning.epsilon_greedy_rate = x[0] q_learning.alpha_value = x[1] q_learning.gamma_value = x[2] if self.__init_state_key is not None: q_learning.learn(state_key=self.__init_state_key, limit=int(x[3])) else: q_learning.learn(limit=x[3]) q_sum = q_learning.q_df.q_value.sum() if q_sum != 0: cost = q_learning.q_df.shape[0] / q_sum else: cost = q_learning.q_df.shape[0] / 1e-4 return cost
[ "def", "compute", "(", "self", ",", "x", ")", ":", "q_learning", "=", "copy", "(", "self", ".", "__greedy_q_learning", ")", "q_learning", ".", "epsilon_greedy_rate", "=", "x", "[", "0", "]", "q_learning", ".", "alpha_value", "=", "x", "[", "1", "]", "q_learning", ".", "gamma_value", "=", "x", "[", "2", "]", "if", "self", ".", "__init_state_key", "is", "not", "None", ":", "q_learning", ".", "learn", "(", "state_key", "=", "self", ".", "__init_state_key", ",", "limit", "=", "int", "(", "x", "[", "3", "]", ")", ")", "else", ":", "q_learning", ".", "learn", "(", "limit", "=", "x", "[", "3", "]", ")", "q_sum", "=", "q_learning", ".", "q_df", ".", "q_value", ".", "sum", "(", ")", "if", "q_sum", "!=", "0", ":", "cost", "=", "q_learning", ".", "q_df", ".", "shape", "[", "0", "]", "/", "q_sum", "else", ":", "cost", "=", "q_learning", ".", "q_df", ".", "shape", "[", "0", "]", "/", "1e-4", "return", "cost" ]
Compute cost. Args: x: `np.ndarray` of explanatory variables. Returns: cost
[ "Compute", "cost", ".", "Args", ":", "x", ":", "np", ".", "ndarray", "of", "explanatory", "variables", ".", "Returns", ":", "cost" ]
python
train
YosaiProject/yosai
yosai/core/mgt/mgt.py
https://github.com/YosaiProject/yosai/blob/7f96aa6b837ceae9bf3d7387cd7e35f5ab032575/yosai/core/mgt/mgt.py#L635-L684
def login(self, subject, authc_token): """ Login authenticates a user using an AuthenticationToken. If authentication is successful AND the Authenticator has determined that authentication is complete for the account, login constructs a Subject instance representing the authenticated account's identity. Once a subject instance is constructed, it is bound to the application for subsequent access before being returned to the caller. If login successfully authenticates a token but the Authenticator has determined that subject's account isn't considered authenticated, the account is configured for multi-factor authentication. Sessionless environments must pass all authentication tokens to login at once. :param authc_token: the authenticationToken to process for the login attempt :type authc_token: authc_abcs.authenticationToken :returns: a Subject representing the authenticated user :raises AuthenticationException: if there is a problem authenticating the specified authc_token :raises AdditionalAuthenticationRequired: during multi-factor authentication when additional tokens are required """ try: # account_id is a SimpleIdentifierCollection account_id = self.authenticator.authenticate_account(subject.identifiers, authc_token) # implies multi-factor authc not complete: except AdditionalAuthenticationRequired as exc: # identity needs to be accessible for subsequent authentication: self.update_subject_identity(exc.account_id, subject) # no need to propagate account further: raise AdditionalAuthenticationRequired except AuthenticationException as authc_ex: try: self.on_failed_login(authc_token, authc_ex, subject) except Exception: msg = ("on_failed_login method raised an exception. Logging " "and propagating original AuthenticationException.") logger.info(msg, exc_info=True) raise logged_in = self.create_subject(authc_token=authc_token, account_id=account_id, existing_subject=subject) self.on_successful_login(authc_token, account_id, logged_in) return logged_in
[ "def", "login", "(", "self", ",", "subject", ",", "authc_token", ")", ":", "try", ":", "# account_id is a SimpleIdentifierCollection", "account_id", "=", "self", ".", "authenticator", ".", "authenticate_account", "(", "subject", ".", "identifiers", ",", "authc_token", ")", "# implies multi-factor authc not complete:", "except", "AdditionalAuthenticationRequired", "as", "exc", ":", "# identity needs to be accessible for subsequent authentication:", "self", ".", "update_subject_identity", "(", "exc", ".", "account_id", ",", "subject", ")", "# no need to propagate account further:", "raise", "AdditionalAuthenticationRequired", "except", "AuthenticationException", "as", "authc_ex", ":", "try", ":", "self", ".", "on_failed_login", "(", "authc_token", ",", "authc_ex", ",", "subject", ")", "except", "Exception", ":", "msg", "=", "(", "\"on_failed_login method raised an exception. Logging \"", "\"and propagating original AuthenticationException.\"", ")", "logger", ".", "info", "(", "msg", ",", "exc_info", "=", "True", ")", "raise", "logged_in", "=", "self", ".", "create_subject", "(", "authc_token", "=", "authc_token", ",", "account_id", "=", "account_id", ",", "existing_subject", "=", "subject", ")", "self", ".", "on_successful_login", "(", "authc_token", ",", "account_id", ",", "logged_in", ")", "return", "logged_in" ]
Login authenticates a user using an AuthenticationToken. If authentication is successful AND the Authenticator has determined that authentication is complete for the account, login constructs a Subject instance representing the authenticated account's identity. Once a subject instance is constructed, it is bound to the application for subsequent access before being returned to the caller. If login successfully authenticates a token but the Authenticator has determined that subject's account isn't considered authenticated, the account is configured for multi-factor authentication. Sessionless environments must pass all authentication tokens to login at once. :param authc_token: the authenticationToken to process for the login attempt :type authc_token: authc_abcs.authenticationToken :returns: a Subject representing the authenticated user :raises AuthenticationException: if there is a problem authenticating the specified authc_token :raises AdditionalAuthenticationRequired: during multi-factor authentication when additional tokens are required
[ "Login", "authenticates", "a", "user", "using", "an", "AuthenticationToken", ".", "If", "authentication", "is", "successful", "AND", "the", "Authenticator", "has", "determined", "that", "authentication", "is", "complete", "for", "the", "account", "login", "constructs", "a", "Subject", "instance", "representing", "the", "authenticated", "account", "s", "identity", ".", "Once", "a", "subject", "instance", "is", "constructed", "it", "is", "bound", "to", "the", "application", "for", "subsequent", "access", "before", "being", "returned", "to", "the", "caller", "." ]
python
train
user-cont/colin
colin/core/checks/check_utils.py
https://github.com/user-cont/colin/blob/00bb80e6e91522e15361935f813e8cf13d7e76dc/colin/core/checks/check_utils.py#L7-L34
def check_label(labels, required, value_regex, target_labels): """ Check if the label is required and match the regex :param labels: [str] :param required: bool (if the presence means pass or not) :param value_regex: str (using search method) :param target_labels: [str] :return: bool (required==True: True if the label is present and match the regex if specified) (required==False: True if the label is not present) """ present = target_labels is not None and not set(labels).isdisjoint(set(target_labels)) if present: if required and not value_regex: return True elif value_regex: pattern = re.compile(value_regex) present_labels = set(labels) & set(target_labels) for l in present_labels: if not bool(pattern.search(target_labels[l])): return False return True else: return False else: return not required
[ "def", "check_label", "(", "labels", ",", "required", ",", "value_regex", ",", "target_labels", ")", ":", "present", "=", "target_labels", "is", "not", "None", "and", "not", "set", "(", "labels", ")", ".", "isdisjoint", "(", "set", "(", "target_labels", ")", ")", "if", "present", ":", "if", "required", "and", "not", "value_regex", ":", "return", "True", "elif", "value_regex", ":", "pattern", "=", "re", ".", "compile", "(", "value_regex", ")", "present_labels", "=", "set", "(", "labels", ")", "&", "set", "(", "target_labels", ")", "for", "l", "in", "present_labels", ":", "if", "not", "bool", "(", "pattern", ".", "search", "(", "target_labels", "[", "l", "]", ")", ")", ":", "return", "False", "return", "True", "else", ":", "return", "False", "else", ":", "return", "not", "required" ]
Check if the label is required and match the regex :param labels: [str] :param required: bool (if the presence means pass or not) :param value_regex: str (using search method) :param target_labels: [str] :return: bool (required==True: True if the label is present and match the regex if specified) (required==False: True if the label is not present)
[ "Check", "if", "the", "label", "is", "required", "and", "match", "the", "regex" ]
python
train
twaddington/django-gravatar
django_gravatar/helpers.py
https://github.com/twaddington/django-gravatar/blob/c4849d93ed43b419eceff0ff2de83d4265597629/django_gravatar/helpers.py#L74-L87
def has_gravatar(email): """ Returns True if the user has a gravatar, False if otherwise """ # Request a 404 response if the gravatar does not exist url = get_gravatar_url(email, default=GRAVATAR_DEFAULT_IMAGE_404) # Verify an OK response was received try: request = Request(url) request.get_method = lambda: 'HEAD' return 200 == urlopen(request).code except (HTTPError, URLError): return False
[ "def", "has_gravatar", "(", "email", ")", ":", "# Request a 404 response if the gravatar does not exist", "url", "=", "get_gravatar_url", "(", "email", ",", "default", "=", "GRAVATAR_DEFAULT_IMAGE_404", ")", "# Verify an OK response was received", "try", ":", "request", "=", "Request", "(", "url", ")", "request", ".", "get_method", "=", "lambda", ":", "'HEAD'", "return", "200", "==", "urlopen", "(", "request", ")", ".", "code", "except", "(", "HTTPError", ",", "URLError", ")", ":", "return", "False" ]
Returns True if the user has a gravatar, False if otherwise
[ "Returns", "True", "if", "the", "user", "has", "a", "gravatar", "False", "if", "otherwise" ]
python
test
ModisWorks/modis
modis/discord_modis/modules/tableflip/api_flipcheck.py
https://github.com/ModisWorks/modis/blob/1f1225c9841835ec1d1831fc196306527567db8b/modis/discord_modis/modules/tableflip/api_flipcheck.py#L1-L59
def flipcheck(content): """Checks a string for anger and soothes said anger Args: content (str): The message to be flipchecked Returns: putitback (str): The righted table or text """ # Prevent tampering with flip punct = """!"#$%&'*+,-./:;<=>?@[\]^_`{|}~ ━─""" tamperdict = str.maketrans('', '', punct) tamperproof = content.translate(tamperdict) # Unflip if "(╯°□°)╯︵" in tamperproof: # For tables if "┻┻" in tamperproof: # Calculate table length length = 0 for letter in content: if letter == "━": length += 1.36 elif letter == "─": length += 1 elif letter == "-": length += 0.50 # Construct table putitback = "┬" for i in range(int(length)): putitback += "─" putitback += "┬ ノ( ゜-゜ノ)" return putitback # For text else: # Create dictionary for flipping text flipdict = str.maketrans( 'abcdefghijklmnopqrstuvwxyzɐqɔpǝɟbɥıظʞןɯuodbɹsʇnʌʍxʎz😅🙃😞😟😠😡☹🙁😱😨😰😦😧😢😓😥😭', 'ɐqɔpǝɟbɥıظʞןɯuodbɹsʇnʌʍxʎzabcdefghijklmnopqrstuvwxyz😄🙂🙂🙂🙂🙂🙂😀😀🙂😄🙂🙂😄😄😄😁' ) # Construct flipped text flipstart = content.index('︵') flipped = content[flipstart+1:] flipped = str.lower(flipped).translate(flipdict) putitback = ''.join(list(reversed(list(flipped)))) putitback += "ノ( ゜-゜ノ)" return putitback else: return False
[ "def", "flipcheck", "(", "content", ")", ":", "# Prevent tampering with flip", "punct", "=", "\"\"\"!\"#$%&'*+,-./:;<=>?@[\\]^_`{|}~ ━─\"\"\"", "tamperdict", "=", "str", ".", "maketrans", "(", "''", ",", "''", ",", "punct", ")", "tamperproof", "=", "content", ".", "translate", "(", "tamperdict", ")", "# Unflip", "if", "\"(╯°□°)╯︵\" in tamperpr", "of", "", "", "# For tables", "if", "\"┻┻\" in ", "am", "erproof:", "", "# Calculate table length", "length", "=", "0", "for", "letter", "in", "content", ":", "if", "letter", "==", "\"━\":", "", "length", "+=", "1.36", "elif", "letter", "==", "\"─\":", "", "length", "+=", "1", "elif", "letter", "==", "\"-\"", ":", "length", "+=", "0.50", "# Construct table", "putitback", "=", "\"┬\"", "for", "i", "in", "range", "(", "int", "(", "length", ")", ")", ":", "putitback", "+=", "\"─\"", "putitback", "+=", "\"┬ ノ( ゜-゜ノ)\"", "return", "putitback", "# For text", "else", ":", "# Create dictionary for flipping text", "flipdict", "=", "str", ".", "maketrans", "(", "'abcdefghijklmnopqrstuvwxyzɐqɔpǝɟbɥıظʞןɯuodbɹsʇnʌʍxʎz😅🙃😞😟😠😡☹🙁😱😨😰😦😧😢😓😥😭',", "", "'ɐqɔpǝɟbɥıظʞןɯuodbɹsʇnʌʍxʎzabcdefghijklmnopqrstuvwxyz😄🙂🙂🙂🙂🙂🙂😀😀🙂😄🙂🙂😄😄😄😁'", ")", "# Construct flipped text", "flipstart", "=", "content", ".", "index", "(", "'︵')", "", "flipped", "=", "content", "[", "flipstart", "+", "1", ":", "]", "flipped", "=", "str", ".", "lower", "(", "flipped", ")", ".", "translate", "(", "flipdict", ")", "putitback", "=", "''", ".", "join", "(", "list", "(", "reversed", "(", "list", "(", "flipped", ")", ")", ")", ")", "putitback", "+=", "\"ノ( ゜-゜ノ)\"", "return", "putitback", "else", ":", "return", "False" ]
Checks a string for anger and soothes said anger Args: content (str): The message to be flipchecked Returns: putitback (str): The righted table or text
[ "Checks", "a", "string", "for", "anger", "and", "soothes", "said", "anger" ]
python
train
GPflow/GPflow
gpflow/training/hmc.py
https://github.com/GPflow/GPflow/blob/549394f0b1b0696c7b521a065e49bdae6e7acf27/gpflow/training/hmc.py#L24-L124
def sample(self, model, num_samples, epsilon, lmin=1, lmax=1, thin=1, burn=0, session=None, initialize=True, anchor=True, logprobs=True): """ A straight-forward HMC implementation. The mass matrix is assumed to be the identity. The gpflow model must implement `build_objective` method to build `f` function (tensor) which in turn based on model's internal trainable parameters `x`. f(x) = E(x) we then generate samples from the distribution pi(x) = exp(-E(x))/Z The total number of iterations is given by: burn + thin * num_samples The leafrog (Verlet) integrator works by picking a random number of steps uniformly between lmin and lmax, and taking steps of length epsilon. :param model: gpflow model with `build_objective` method implementation. :param num_samples: number of samples to generate. :param epsilon: HMC tuning parameter - stepsize. :param lmin: HMC tuning parameter - lowest integer `a` of uniform `[a, b]` distribution used for drawing number of leapfrog iterations. :param lmax: HMC tuning parameter - largest integer `b` from uniform `[a, b]` distribution used for drawing number of leapfrog iterations. :param thin: an integer which specifies the thinning interval. :param burn: an integer which specifies how many initial samples to discard. :param session: TensorFlow session. The default session or cached GPflow session will be used if it is none. :param initialize: indication either TensorFlow initialization is required or not. :param anchor: dump live trainable values computed within specified TensorFlow session to actual parameters (in python scope). :param logprobs: indicates either logprob values shall be included in output or not. :return: data frame with `num_samples` traces, where columns are full names of trainable parameters except last column, which is `logprobs`. Trainable parameters are represented as constrained values in output. :raises: ValueError exception in case when wrong parameter ranges were passed. """ if lmax <= 0 or lmin <= 0: raise ValueError('The lmin and lmax parameters must be greater zero.') if thin <= 0: raise ValueError('The thin parameter must be greater zero.') if burn < 0: raise ValueError('The burn parameter must be equal or greater zero.') lmax += 1 session = model.enquire_session(session) model.initialize(session=session, force=initialize) with tf.name_scope('hmc'): params = list(model.trainable_parameters) xs = list(model.trainable_tensors) def logprob_grads(): logprob = tf.negative(model.build_objective()) grads = tf.gradients(logprob, xs) return logprob, grads thin_args = [logprob_grads, xs, thin, epsilon, lmin, lmax] if burn > 0: burn_op = _burning(burn, *thin_args) session.run(burn_op, feed_dict=model.feeds) xs_dtypes = _map(lambda x: x.dtype, xs) logprob_dtype = model.objective.dtype dtypes = _flat(xs_dtypes, [logprob_dtype]) indices = np.arange(num_samples) def map_body(_): xs_sample, logprob_sample = _thinning(*thin_args) return _flat(xs_sample, [logprob_sample]) hmc_output = tf.map_fn(map_body, indices, dtype=dtypes, back_prop=False, parallel_iterations=1) with tf.control_dependencies(hmc_output): unconstrained_trace, logprob_trace = hmc_output[:-1], hmc_output[-1] constrained_trace = _map(lambda x, param: param.transform.forward_tensor(x), unconstrained_trace, params) hmc_output = constrained_trace + [logprob_trace] names = [param.pathname for param in params] raw_traces = session.run(hmc_output, feed_dict=model.feeds) if anchor: model.anchor(session) traces = dict(zip(names, map(list, raw_traces[:-1]))) if logprobs: traces.update({'logprobs': raw_traces[-1]}) return pd.DataFrame(traces)
[ "def", "sample", "(", "self", ",", "model", ",", "num_samples", ",", "epsilon", ",", "lmin", "=", "1", ",", "lmax", "=", "1", ",", "thin", "=", "1", ",", "burn", "=", "0", ",", "session", "=", "None", ",", "initialize", "=", "True", ",", "anchor", "=", "True", ",", "logprobs", "=", "True", ")", ":", "if", "lmax", "<=", "0", "or", "lmin", "<=", "0", ":", "raise", "ValueError", "(", "'The lmin and lmax parameters must be greater zero.'", ")", "if", "thin", "<=", "0", ":", "raise", "ValueError", "(", "'The thin parameter must be greater zero.'", ")", "if", "burn", "<", "0", ":", "raise", "ValueError", "(", "'The burn parameter must be equal or greater zero.'", ")", "lmax", "+=", "1", "session", "=", "model", ".", "enquire_session", "(", "session", ")", "model", ".", "initialize", "(", "session", "=", "session", ",", "force", "=", "initialize", ")", "with", "tf", ".", "name_scope", "(", "'hmc'", ")", ":", "params", "=", "list", "(", "model", ".", "trainable_parameters", ")", "xs", "=", "list", "(", "model", ".", "trainable_tensors", ")", "def", "logprob_grads", "(", ")", ":", "logprob", "=", "tf", ".", "negative", "(", "model", ".", "build_objective", "(", ")", ")", "grads", "=", "tf", ".", "gradients", "(", "logprob", ",", "xs", ")", "return", "logprob", ",", "grads", "thin_args", "=", "[", "logprob_grads", ",", "xs", ",", "thin", ",", "epsilon", ",", "lmin", ",", "lmax", "]", "if", "burn", ">", "0", ":", "burn_op", "=", "_burning", "(", "burn", ",", "*", "thin_args", ")", "session", ".", "run", "(", "burn_op", ",", "feed_dict", "=", "model", ".", "feeds", ")", "xs_dtypes", "=", "_map", "(", "lambda", "x", ":", "x", ".", "dtype", ",", "xs", ")", "logprob_dtype", "=", "model", ".", "objective", ".", "dtype", "dtypes", "=", "_flat", "(", "xs_dtypes", ",", "[", "logprob_dtype", "]", ")", "indices", "=", "np", ".", "arange", "(", "num_samples", ")", "def", "map_body", "(", "_", ")", ":", "xs_sample", ",", "logprob_sample", "=", "_thinning", "(", "*", "thin_args", ")", "return", "_flat", "(", "xs_sample", ",", "[", "logprob_sample", "]", ")", "hmc_output", "=", "tf", ".", "map_fn", "(", "map_body", ",", "indices", ",", "dtype", "=", "dtypes", ",", "back_prop", "=", "False", ",", "parallel_iterations", "=", "1", ")", "with", "tf", ".", "control_dependencies", "(", "hmc_output", ")", ":", "unconstrained_trace", ",", "logprob_trace", "=", "hmc_output", "[", ":", "-", "1", "]", ",", "hmc_output", "[", "-", "1", "]", "constrained_trace", "=", "_map", "(", "lambda", "x", ",", "param", ":", "param", ".", "transform", ".", "forward_tensor", "(", "x", ")", ",", "unconstrained_trace", ",", "params", ")", "hmc_output", "=", "constrained_trace", "+", "[", "logprob_trace", "]", "names", "=", "[", "param", ".", "pathname", "for", "param", "in", "params", "]", "raw_traces", "=", "session", ".", "run", "(", "hmc_output", ",", "feed_dict", "=", "model", ".", "feeds", ")", "if", "anchor", ":", "model", ".", "anchor", "(", "session", ")", "traces", "=", "dict", "(", "zip", "(", "names", ",", "map", "(", "list", ",", "raw_traces", "[", ":", "-", "1", "]", ")", ")", ")", "if", "logprobs", ":", "traces", ".", "update", "(", "{", "'logprobs'", ":", "raw_traces", "[", "-", "1", "]", "}", ")", "return", "pd", ".", "DataFrame", "(", "traces", ")" ]
A straight-forward HMC implementation. The mass matrix is assumed to be the identity. The gpflow model must implement `build_objective` method to build `f` function (tensor) which in turn based on model's internal trainable parameters `x`. f(x) = E(x) we then generate samples from the distribution pi(x) = exp(-E(x))/Z The total number of iterations is given by: burn + thin * num_samples The leafrog (Verlet) integrator works by picking a random number of steps uniformly between lmin and lmax, and taking steps of length epsilon. :param model: gpflow model with `build_objective` method implementation. :param num_samples: number of samples to generate. :param epsilon: HMC tuning parameter - stepsize. :param lmin: HMC tuning parameter - lowest integer `a` of uniform `[a, b]` distribution used for drawing number of leapfrog iterations. :param lmax: HMC tuning parameter - largest integer `b` from uniform `[a, b]` distribution used for drawing number of leapfrog iterations. :param thin: an integer which specifies the thinning interval. :param burn: an integer which specifies how many initial samples to discard. :param session: TensorFlow session. The default session or cached GPflow session will be used if it is none. :param initialize: indication either TensorFlow initialization is required or not. :param anchor: dump live trainable values computed within specified TensorFlow session to actual parameters (in python scope). :param logprobs: indicates either logprob values shall be included in output or not. :return: data frame with `num_samples` traces, where columns are full names of trainable parameters except last column, which is `logprobs`. Trainable parameters are represented as constrained values in output. :raises: ValueError exception in case when wrong parameter ranges were passed.
[ "A", "straight", "-", "forward", "HMC", "implementation", ".", "The", "mass", "matrix", "is", "assumed", "to", "be", "the", "identity", "." ]
python
train
kubernetes-client/python
kubernetes/client/apis/certificates_v1beta1_api.py
https://github.com/kubernetes-client/python/blob/5e512ff564c244c50cab780d821542ed56aa965a/kubernetes/client/apis/certificates_v1beta1_api.py#L38-L60
def create_certificate_signing_request(self, body, **kwargs): """ create a CertificateSigningRequest This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_certificate_signing_request(body, async_req=True) >>> result = thread.get() :param async_req bool :param V1beta1CertificateSigningRequest body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. :return: V1beta1CertificateSigningRequest If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.create_certificate_signing_request_with_http_info(body, **kwargs) else: (data) = self.create_certificate_signing_request_with_http_info(body, **kwargs) return data
[ "def", "create_certificate_signing_request", "(", "self", ",", "body", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "create_certificate_signing_request_with_http_info", "(", "body", ",", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "self", ".", "create_certificate_signing_request_with_http_info", "(", "body", ",", "*", "*", "kwargs", ")", "return", "data" ]
create a CertificateSigningRequest This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_certificate_signing_request(body, async_req=True) >>> result = thread.get() :param async_req bool :param V1beta1CertificateSigningRequest body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. :return: V1beta1CertificateSigningRequest If the method is called asynchronously, returns the request thread.
[ "create", "a", "CertificateSigningRequest", "This", "method", "makes", "a", "synchronous", "HTTP", "request", "by", "default", ".", "To", "make", "an", "asynchronous", "HTTP", "request", "please", "pass", "async_req", "=", "True", ">>>", "thread", "=", "api", ".", "create_certificate_signing_request", "(", "body", "async_req", "=", "True", ")", ">>>", "result", "=", "thread", ".", "get", "()" ]
python
train
odlgroup/odl
odl/solvers/functional/default_functionals.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/solvers/functional/default_functionals.py#L2676-L2743
def gradient(self): r"""Gradient operator of the functional. The gradient of the Huber functional is given by .. math:: \nabla f_{\gamma}(x) = \begin{cases} \frac{1}{\gamma} x & \text{if } \|x\|_2 \leq \gamma \\ \frac{1}{\|x\|_2} x & \text{else} \end{cases}. Examples -------- Check that the gradient norm is less than the norm of the one element: >>> space = odl.uniform_discr(0, 1, 14) >>> norm_one = space.one().norm() >>> x = odl.phantom.white_noise(space) >>> huber_norm = odl.solvers.Huber(space, gamma=0.1) >>> grad = huber_norm.gradient(x) >>> tol = 1e-5 >>> grad.norm() <= norm_one + tol True Redo previous example for a product space in two dimensions: >>> domain = odl.uniform_discr([0, 0], [1, 1], [5, 5]) >>> space = odl.ProductSpace(domain, 2) >>> norm_one = space.one().norm() >>> x = odl.phantom.white_noise(space) >>> huber_norm = odl.solvers.Huber(space, gamma=0.2) >>> grad = huber_norm.gradient(x) >>> tol = 1e-5 >>> grad.norm() <= norm_one + tol True """ functional = self class HuberGradient(Operator): """The gradient operator of this functional.""" def __init__(self): """Initialize a new instance.""" super(HuberGradient, self).__init__( functional.domain, functional.domain, linear=False) def _call(self, x): """Apply the gradient operator to the given point.""" if isinstance(self.domain, ProductSpace): norm = PointwiseNorm(self.domain, 2)(x) else: norm = x.ufuncs.absolute() grad = x / functional.gamma index = norm.ufuncs.greater_equal(functional.gamma) if isinstance(self.domain, ProductSpace): for xi, gi in zip(x, grad): gi[index] = xi[index] / norm[index] else: grad[index] = x[index] / norm[index] return grad return HuberGradient()
[ "def", "gradient", "(", "self", ")", ":", "functional", "=", "self", "class", "HuberGradient", "(", "Operator", ")", ":", "\"\"\"The gradient operator of this functional.\"\"\"", "def", "__init__", "(", "self", ")", ":", "\"\"\"Initialize a new instance.\"\"\"", "super", "(", "HuberGradient", ",", "self", ")", ".", "__init__", "(", "functional", ".", "domain", ",", "functional", ".", "domain", ",", "linear", "=", "False", ")", "def", "_call", "(", "self", ",", "x", ")", ":", "\"\"\"Apply the gradient operator to the given point.\"\"\"", "if", "isinstance", "(", "self", ".", "domain", ",", "ProductSpace", ")", ":", "norm", "=", "PointwiseNorm", "(", "self", ".", "domain", ",", "2", ")", "(", "x", ")", "else", ":", "norm", "=", "x", ".", "ufuncs", ".", "absolute", "(", ")", "grad", "=", "x", "/", "functional", ".", "gamma", "index", "=", "norm", ".", "ufuncs", ".", "greater_equal", "(", "functional", ".", "gamma", ")", "if", "isinstance", "(", "self", ".", "domain", ",", "ProductSpace", ")", ":", "for", "xi", ",", "gi", "in", "zip", "(", "x", ",", "grad", ")", ":", "gi", "[", "index", "]", "=", "xi", "[", "index", "]", "/", "norm", "[", "index", "]", "else", ":", "grad", "[", "index", "]", "=", "x", "[", "index", "]", "/", "norm", "[", "index", "]", "return", "grad", "return", "HuberGradient", "(", ")" ]
r"""Gradient operator of the functional. The gradient of the Huber functional is given by .. math:: \nabla f_{\gamma}(x) = \begin{cases} \frac{1}{\gamma} x & \text{if } \|x\|_2 \leq \gamma \\ \frac{1}{\|x\|_2} x & \text{else} \end{cases}. Examples -------- Check that the gradient norm is less than the norm of the one element: >>> space = odl.uniform_discr(0, 1, 14) >>> norm_one = space.one().norm() >>> x = odl.phantom.white_noise(space) >>> huber_norm = odl.solvers.Huber(space, gamma=0.1) >>> grad = huber_norm.gradient(x) >>> tol = 1e-5 >>> grad.norm() <= norm_one + tol True Redo previous example for a product space in two dimensions: >>> domain = odl.uniform_discr([0, 0], [1, 1], [5, 5]) >>> space = odl.ProductSpace(domain, 2) >>> norm_one = space.one().norm() >>> x = odl.phantom.white_noise(space) >>> huber_norm = odl.solvers.Huber(space, gamma=0.2) >>> grad = huber_norm.gradient(x) >>> tol = 1e-5 >>> grad.norm() <= norm_one + tol True
[ "r", "Gradient", "operator", "of", "the", "functional", "." ]
python
train
pymoca/pymoca
src/pymoca/backends/xml/model.py
https://github.com/pymoca/pymoca/blob/14b5eb7425e96689de6cc5c10f400895d586a978/src/pymoca/backends/xml/model.py#L174-L186
def split_dae_alg(eqs: SYM, dx: SYM) -> Dict[str, SYM]: """Split equations into differential algebraic and algebraic only""" dae = [] alg = [] for eq in ca.vertsplit(eqs): if ca.depends_on(eq, dx): dae.append(eq) else: alg.append(eq) return { 'dae': ca.vertcat(*dae), 'alg': ca.vertcat(*alg) }
[ "def", "split_dae_alg", "(", "eqs", ":", "SYM", ",", "dx", ":", "SYM", ")", "->", "Dict", "[", "str", ",", "SYM", "]", ":", "dae", "=", "[", "]", "alg", "=", "[", "]", "for", "eq", "in", "ca", ".", "vertsplit", "(", "eqs", ")", ":", "if", "ca", ".", "depends_on", "(", "eq", ",", "dx", ")", ":", "dae", ".", "append", "(", "eq", ")", "else", ":", "alg", ".", "append", "(", "eq", ")", "return", "{", "'dae'", ":", "ca", ".", "vertcat", "(", "*", "dae", ")", ",", "'alg'", ":", "ca", ".", "vertcat", "(", "*", "alg", ")", "}" ]
Split equations into differential algebraic and algebraic only
[ "Split", "equations", "into", "differential", "algebraic", "and", "algebraic", "only" ]
python
train
chardet/chardet
convert_language_model.py
https://github.com/chardet/chardet/blob/b5194bf8250b7d180ac4edff51e09cab9d99febe/convert_language_model.py#L57-L73
def convert_sbcs_model(old_model, alphabet): """Create a SingleByteCharSetModel object representing the charset.""" # Setup tables necessary for computing transition frequencies for model char_to_order = {i: order for i, order in enumerate(old_model['char_to_order_map'])} pos_ratio = old_model['typical_positive_ratio'] keep_ascii_letters = old_model['keep_english_letter'] curr_model = SingleByteCharSetModel(charset_name=old_model['charset_name'], language=old_model['language'], char_to_order_map=char_to_order, # language_model is filled in later language_model=None, typical_positive_ratio=pos_ratio, keep_ascii_letters=keep_ascii_letters, alphabet=alphabet) return curr_model
[ "def", "convert_sbcs_model", "(", "old_model", ",", "alphabet", ")", ":", "# Setup tables necessary for computing transition frequencies for model", "char_to_order", "=", "{", "i", ":", "order", "for", "i", ",", "order", "in", "enumerate", "(", "old_model", "[", "'char_to_order_map'", "]", ")", "}", "pos_ratio", "=", "old_model", "[", "'typical_positive_ratio'", "]", "keep_ascii_letters", "=", "old_model", "[", "'keep_english_letter'", "]", "curr_model", "=", "SingleByteCharSetModel", "(", "charset_name", "=", "old_model", "[", "'charset_name'", "]", ",", "language", "=", "old_model", "[", "'language'", "]", ",", "char_to_order_map", "=", "char_to_order", ",", "# language_model is filled in later", "language_model", "=", "None", ",", "typical_positive_ratio", "=", "pos_ratio", ",", "keep_ascii_letters", "=", "keep_ascii_letters", ",", "alphabet", "=", "alphabet", ")", "return", "curr_model" ]
Create a SingleByteCharSetModel object representing the charset.
[ "Create", "a", "SingleByteCharSetModel", "object", "representing", "the", "charset", "." ]
python
train
cloudera/cm_api
python/examples/aws.py
https://github.com/cloudera/cm_api/blob/5d2512375bd94684b4da36df9e0d9177865ffcbb/python/examples/aws.py#L150-L159
def get_login_credentials(args): """ Gets the login credentials from the user, if not specified while invoking the script. @param args: arguments provided to the script. """ if not args.username: args.username = raw_input("Enter Username: ") if not args.password: args.password = getpass.getpass("Enter Password: ")
[ "def", "get_login_credentials", "(", "args", ")", ":", "if", "not", "args", ".", "username", ":", "args", ".", "username", "=", "raw_input", "(", "\"Enter Username: \"", ")", "if", "not", "args", ".", "password", ":", "args", ".", "password", "=", "getpass", ".", "getpass", "(", "\"Enter Password: \"", ")" ]
Gets the login credentials from the user, if not specified while invoking the script. @param args: arguments provided to the script.
[ "Gets", "the", "login", "credentials", "from", "the", "user", "if", "not", "specified", "while", "invoking", "the", "script", "." ]
python
train
InspectorMustache/base16-builder-python
pybase16_builder/cli.py
https://github.com/InspectorMustache/base16-builder-python/blob/586f1f87ee9f70696ab19c542af6ef55c6548a2e/pybase16_builder/cli.py#L6-L20
def build_mode(arg_namespace): """Check command line arguments and run build function.""" custom_temps = arg_namespace.template or [] temp_paths = [rel_to_cwd('templates', temp) for temp in custom_temps] try: builder.build(templates=temp_paths, schemes=arg_namespace.scheme, base_output_dir=arg_namespace.output) except (LookupError, PermissionError) as exception: if isinstance(exception, LookupError): print('Necessary resources for building not found in current ' 'working directory.') if isinstance(exception, PermissionError): print("No write permission for output directory.")
[ "def", "build_mode", "(", "arg_namespace", ")", ":", "custom_temps", "=", "arg_namespace", ".", "template", "or", "[", "]", "temp_paths", "=", "[", "rel_to_cwd", "(", "'templates'", ",", "temp", ")", "for", "temp", "in", "custom_temps", "]", "try", ":", "builder", ".", "build", "(", "templates", "=", "temp_paths", ",", "schemes", "=", "arg_namespace", ".", "scheme", ",", "base_output_dir", "=", "arg_namespace", ".", "output", ")", "except", "(", "LookupError", ",", "PermissionError", ")", "as", "exception", ":", "if", "isinstance", "(", "exception", ",", "LookupError", ")", ":", "print", "(", "'Necessary resources for building not found in current '", "'working directory.'", ")", "if", "isinstance", "(", "exception", ",", "PermissionError", ")", ":", "print", "(", "\"No write permission for output directory.\"", ")" ]
Check command line arguments and run build function.
[ "Check", "command", "line", "arguments", "and", "run", "build", "function", "." ]
python
train
fronzbot/blinkpy
blinkpy/api.py
https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/api.py#L68-L76
def request_system_arm(blink, network): """ Arm system. :param blink: Blink instance. :param network: Sync module network id. """ url = "{}/network/{}/arm".format(blink.urls.base_url, network) return http_post(blink, url)
[ "def", "request_system_arm", "(", "blink", ",", "network", ")", ":", "url", "=", "\"{}/network/{}/arm\"", ".", "format", "(", "blink", ".", "urls", ".", "base_url", ",", "network", ")", "return", "http_post", "(", "blink", ",", "url", ")" ]
Arm system. :param blink: Blink instance. :param network: Sync module network id.
[ "Arm", "system", "." ]
python
train
CI-WATER/mapkit
mapkit/ColorRampGenerator.py
https://github.com/CI-WATER/mapkit/blob/ce5fbded6af7adabdf1eec85631c6811ef8ecc34/mapkit/ColorRampGenerator.py#L197-L228
def generateDefaultColorRamp(cls, colorRampEnum): """ Returns the color ramp as a list of RGB tuples :param colorRampEnum: One of the """ hue = [(255, 0, 255), (231, 0, 255), (208, 0, 255), (185, 0, 255), (162, 0, 255), (139, 0, 255), (115, 0, 255), (92, 0, 255), (69, 0, 255), (46, 0, 255), (23, 0, 255), # magenta to blue (0, 0, 255), (0, 23, 255), (0, 46, 255), (0, 69, 255), (0, 92, 255), (0, 115, 255), (0, 139, 255), (0, 162, 255), (0, 185, 255), (0, 208, 255), (0, 231, 255), # blue to cyan (0, 255, 255), (0, 255, 231), (0, 255, 208), (0, 255, 185), (0, 255, 162), (0, 255, 139), (0, 255, 115), (0, 255, 92), (0, 255, 69), (0, 255, 46), (0, 255, 23), # cyan to green (0, 255, 0), (23, 255, 0), (46, 255, 0), (69, 255, 0), (92, 255, 0), (115, 255, 0), (139, 255, 0), (162, 255, 0), (185, 255, 0), (208, 255, 0), (231, 255, 0), # green to yellow (255, 255, 0), (255, 243, 0), (255, 231, 0), (255, 220, 0), (255, 208, 0), (255, 197, 0), (255, 185, 0), (255, 174, 0), (255, 162, 0), (255, 151, 0), (255, 139, 0), # yellow to orange (255, 128, 0), (255, 116, 0), (255, 104, 0), (255, 93, 0), (255, 81, 0), (255, 69, 0), (255, 58, 0), (255, 46, 0), (255, 34, 0), (255, 23, 0), (255, 11, 0), # orange to red (255, 0, 0)] # red terrain = [(0, 100, 0), (19, 107, 0), (38, 114, 0), (57, 121, 0), (76, 129, 0), (95, 136, 0), (114, 143, 0), (133, 150, 0), (152, 158, 0), (171, 165, 0), (190, 172, 0), # dark green to golden rod yellow (210, 180, 0), (210, 167, 5), (210, 155, 10), (210, 142, 15), (210, 130, 20), (210, 117, 25), # golden rod yellow to orange brown (210, 105, 30), (188, 94, 25), (166, 83, 21), (145, 72, 17), (123, 61, 13), (101, 50, 9), # orange brown to dark brown (80, 40, 5), (95, 59, 27), (111, 79, 50), (127, 98, 73), (143, 118, 95), (159, 137, 118),(175, 157, 141), (191, 176, 164), (207, 196, 186), (223, 215, 209), (239, 235, 232), # dark brown to white (255, 255, 255)] # white aqua = [(150, 255, 255), (136, 240, 250), (122, 226, 245), (109, 212, 240), (95, 198, 235), (81, 184, 230), (68, 170, 225), (54, 156, 220), (40, 142, 215), (27, 128, 210), (13, 114, 205), # aqua to blue (0, 100, 200), (0, 94, 195), (0, 89, 191), (0, 83, 187), (0, 78, 182), (0, 72, 178), (0, 67, 174), (0, 61, 170), (0, 56, 165), (0, 50, 161), (0, 45, 157), (0, 40, 153), # blue to navy blue (0, 36, 143), (0, 32, 134), (0, 29, 125), (0, 25, 115), (0, 21, 106), (0, 18, 97), (0, 14, 88), (0, 10, 78), (0, 7, 69), (0, 3, 60), (0, 0, 51)] # navy blue to dark navy blue if (colorRampEnum == ColorRampEnum.COLOR_RAMP_HUE): return hue elif (colorRampEnum == ColorRampEnum.COLOR_RAMP_TERRAIN): return terrain elif (colorRampEnum == ColorRampEnum.COLOR_RAMP_AQUA): return aqua
[ "def", "generateDefaultColorRamp", "(", "cls", ",", "colorRampEnum", ")", ":", "hue", "=", "[", "(", "255", ",", "0", ",", "255", ")", ",", "(", "231", ",", "0", ",", "255", ")", ",", "(", "208", ",", "0", ",", "255", ")", ",", "(", "185", ",", "0", ",", "255", ")", ",", "(", "162", ",", "0", ",", "255", ")", ",", "(", "139", ",", "0", ",", "255", ")", ",", "(", "115", ",", "0", ",", "255", ")", ",", "(", "92", ",", "0", ",", "255", ")", ",", "(", "69", ",", "0", ",", "255", ")", ",", "(", "46", ",", "0", ",", "255", ")", ",", "(", "23", ",", "0", ",", "255", ")", ",", "# magenta to blue", "(", "0", ",", "0", ",", "255", ")", ",", "(", "0", ",", "23", ",", "255", ")", ",", "(", "0", ",", "46", ",", "255", ")", ",", "(", "0", ",", "69", ",", "255", ")", ",", "(", "0", ",", "92", ",", "255", ")", ",", "(", "0", ",", "115", ",", "255", ")", ",", "(", "0", ",", "139", ",", "255", ")", ",", "(", "0", ",", "162", ",", "255", ")", ",", "(", "0", ",", "185", ",", "255", ")", ",", "(", "0", ",", "208", ",", "255", ")", ",", "(", "0", ",", "231", ",", "255", ")", ",", "# blue to cyan", "(", "0", ",", "255", ",", "255", ")", ",", "(", "0", ",", "255", ",", "231", ")", ",", "(", "0", ",", "255", ",", "208", ")", ",", "(", "0", ",", "255", ",", "185", ")", ",", "(", "0", ",", "255", ",", "162", ")", ",", "(", "0", ",", "255", ",", "139", ")", ",", "(", "0", ",", "255", ",", "115", ")", ",", "(", "0", ",", "255", ",", "92", ")", ",", "(", "0", ",", "255", ",", "69", ")", ",", "(", "0", ",", "255", ",", "46", ")", ",", "(", "0", ",", "255", ",", "23", ")", ",", "# cyan to green", "(", "0", ",", "255", ",", "0", ")", ",", "(", "23", ",", "255", ",", "0", ")", ",", "(", "46", ",", "255", ",", "0", ")", ",", "(", "69", ",", "255", ",", "0", ")", ",", "(", "92", ",", "255", ",", "0", ")", ",", "(", "115", ",", "255", ",", "0", ")", ",", "(", "139", ",", "255", ",", "0", ")", ",", "(", "162", ",", "255", ",", "0", ")", ",", "(", "185", ",", "255", ",", "0", ")", ",", "(", "208", ",", "255", ",", "0", ")", ",", "(", "231", ",", "255", ",", "0", ")", ",", "# green to yellow", "(", "255", ",", "255", ",", "0", ")", ",", "(", "255", ",", "243", ",", "0", ")", ",", "(", "255", ",", "231", ",", "0", ")", ",", "(", "255", ",", "220", ",", "0", ")", ",", "(", "255", ",", "208", ",", "0", ")", ",", "(", "255", ",", "197", ",", "0", ")", ",", "(", "255", ",", "185", ",", "0", ")", ",", "(", "255", ",", "174", ",", "0", ")", ",", "(", "255", ",", "162", ",", "0", ")", ",", "(", "255", ",", "151", ",", "0", ")", ",", "(", "255", ",", "139", ",", "0", ")", ",", "# yellow to orange", "(", "255", ",", "128", ",", "0", ")", ",", "(", "255", ",", "116", ",", "0", ")", ",", "(", "255", ",", "104", ",", "0", ")", ",", "(", "255", ",", "93", ",", "0", ")", ",", "(", "255", ",", "81", ",", "0", ")", ",", "(", "255", ",", "69", ",", "0", ")", ",", "(", "255", ",", "58", ",", "0", ")", ",", "(", "255", ",", "46", ",", "0", ")", ",", "(", "255", ",", "34", ",", "0", ")", ",", "(", "255", ",", "23", ",", "0", ")", ",", "(", "255", ",", "11", ",", "0", ")", ",", "# orange to red", "(", "255", ",", "0", ",", "0", ")", "]", "# red", "terrain", "=", "[", "(", "0", ",", "100", ",", "0", ")", ",", "(", "19", ",", "107", ",", "0", ")", ",", "(", "38", ",", "114", ",", "0", ")", ",", "(", "57", ",", "121", ",", "0", ")", ",", "(", "76", ",", "129", ",", "0", ")", ",", "(", "95", ",", "136", ",", "0", ")", ",", "(", "114", ",", "143", ",", "0", ")", ",", "(", "133", ",", "150", ",", "0", ")", ",", "(", "152", ",", "158", ",", "0", ")", ",", "(", "171", ",", "165", ",", "0", ")", ",", "(", "190", ",", "172", ",", "0", ")", ",", "# dark green to golden rod yellow", "(", "210", ",", "180", ",", "0", ")", ",", "(", "210", ",", "167", ",", "5", ")", ",", "(", "210", ",", "155", ",", "10", ")", ",", "(", "210", ",", "142", ",", "15", ")", ",", "(", "210", ",", "130", ",", "20", ")", ",", "(", "210", ",", "117", ",", "25", ")", ",", "# golden rod yellow to orange brown", "(", "210", ",", "105", ",", "30", ")", ",", "(", "188", ",", "94", ",", "25", ")", ",", "(", "166", ",", "83", ",", "21", ")", ",", "(", "145", ",", "72", ",", "17", ")", ",", "(", "123", ",", "61", ",", "13", ")", ",", "(", "101", ",", "50", ",", "9", ")", ",", "# orange brown to dark brown", "(", "80", ",", "40", ",", "5", ")", ",", "(", "95", ",", "59", ",", "27", ")", ",", "(", "111", ",", "79", ",", "50", ")", ",", "(", "127", ",", "98", ",", "73", ")", ",", "(", "143", ",", "118", ",", "95", ")", ",", "(", "159", ",", "137", ",", "118", ")", ",", "(", "175", ",", "157", ",", "141", ")", ",", "(", "191", ",", "176", ",", "164", ")", ",", "(", "207", ",", "196", ",", "186", ")", ",", "(", "223", ",", "215", ",", "209", ")", ",", "(", "239", ",", "235", ",", "232", ")", ",", "# dark brown to white", "(", "255", ",", "255", ",", "255", ")", "]", "# white", "aqua", "=", "[", "(", "150", ",", "255", ",", "255", ")", ",", "(", "136", ",", "240", ",", "250", ")", ",", "(", "122", ",", "226", ",", "245", ")", ",", "(", "109", ",", "212", ",", "240", ")", ",", "(", "95", ",", "198", ",", "235", ")", ",", "(", "81", ",", "184", ",", "230", ")", ",", "(", "68", ",", "170", ",", "225", ")", ",", "(", "54", ",", "156", ",", "220", ")", ",", "(", "40", ",", "142", ",", "215", ")", ",", "(", "27", ",", "128", ",", "210", ")", ",", "(", "13", ",", "114", ",", "205", ")", ",", "# aqua to blue", "(", "0", ",", "100", ",", "200", ")", ",", "(", "0", ",", "94", ",", "195", ")", ",", "(", "0", ",", "89", ",", "191", ")", ",", "(", "0", ",", "83", ",", "187", ")", ",", "(", "0", ",", "78", ",", "182", ")", ",", "(", "0", ",", "72", ",", "178", ")", ",", "(", "0", ",", "67", ",", "174", ")", ",", "(", "0", ",", "61", ",", "170", ")", ",", "(", "0", ",", "56", ",", "165", ")", ",", "(", "0", ",", "50", ",", "161", ")", ",", "(", "0", ",", "45", ",", "157", ")", ",", "(", "0", ",", "40", ",", "153", ")", ",", "# blue to navy blue", "(", "0", ",", "36", ",", "143", ")", ",", "(", "0", ",", "32", ",", "134", ")", ",", "(", "0", ",", "29", ",", "125", ")", ",", "(", "0", ",", "25", ",", "115", ")", ",", "(", "0", ",", "21", ",", "106", ")", ",", "(", "0", ",", "18", ",", "97", ")", ",", "(", "0", ",", "14", ",", "88", ")", ",", "(", "0", ",", "10", ",", "78", ")", ",", "(", "0", ",", "7", ",", "69", ")", ",", "(", "0", ",", "3", ",", "60", ")", ",", "(", "0", ",", "0", ",", "51", ")", "]", "# navy blue to dark navy blue", "if", "(", "colorRampEnum", "==", "ColorRampEnum", ".", "COLOR_RAMP_HUE", ")", ":", "return", "hue", "elif", "(", "colorRampEnum", "==", "ColorRampEnum", ".", "COLOR_RAMP_TERRAIN", ")", ":", "return", "terrain", "elif", "(", "colorRampEnum", "==", "ColorRampEnum", ".", "COLOR_RAMP_AQUA", ")", ":", "return", "aqua" ]
Returns the color ramp as a list of RGB tuples :param colorRampEnum: One of the
[ "Returns", "the", "color", "ramp", "as", "a", "list", "of", "RGB", "tuples", ":", "param", "colorRampEnum", ":", "One", "of", "the" ]
python
train
pantsbuild/pants
src/python/pants/goal/products.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/goal/products.py#L438-L450
def get_data(self, typename, init_func=None): """Returns a data product. :API: public If the product isn't found, returns None, unless init_func is set, in which case the product's value is set to the return value of init_func(), and returned. """ if typename not in self.data_products: if not init_func: return None self.data_products[typename] = init_func() return self.data_products.get(typename)
[ "def", "get_data", "(", "self", ",", "typename", ",", "init_func", "=", "None", ")", ":", "if", "typename", "not", "in", "self", ".", "data_products", ":", "if", "not", "init_func", ":", "return", "None", "self", ".", "data_products", "[", "typename", "]", "=", "init_func", "(", ")", "return", "self", ".", "data_products", ".", "get", "(", "typename", ")" ]
Returns a data product. :API: public If the product isn't found, returns None, unless init_func is set, in which case the product's value is set to the return value of init_func(), and returned.
[ "Returns", "a", "data", "product", "." ]
python
train
dahlia/sqlalchemy-imageattach
sqlalchemy_imageattach/store.py
https://github.com/dahlia/sqlalchemy-imageattach/blob/b4bafa73f3bb576ecf67ed7b40b702704a0fbdc8/sqlalchemy_imageattach/store.py#L167-L179
def delete(self, image): """Delete the file of the given ``image``. :param image: the image to delete :type image: :class:`sqlalchemy_imageattach.entity.Image` """ from .entity import Image if not isinstance(image, Image): raise TypeError('image must be a sqlalchemy_imageattach.entity.' 'Image instance, not ' + repr(image)) self.delete_file(image.object_type, image.object_id, image.width, image.height, image.mimetype)
[ "def", "delete", "(", "self", ",", "image", ")", ":", "from", ".", "entity", "import", "Image", "if", "not", "isinstance", "(", "image", ",", "Image", ")", ":", "raise", "TypeError", "(", "'image must be a sqlalchemy_imageattach.entity.'", "'Image instance, not '", "+", "repr", "(", "image", ")", ")", "self", ".", "delete_file", "(", "image", ".", "object_type", ",", "image", ".", "object_id", ",", "image", ".", "width", ",", "image", ".", "height", ",", "image", ".", "mimetype", ")" ]
Delete the file of the given ``image``. :param image: the image to delete :type image: :class:`sqlalchemy_imageattach.entity.Image`
[ "Delete", "the", "file", "of", "the", "given", "image", "." ]
python
train
offu/WeRoBot
werobot/parser.py
https://github.com/offu/WeRoBot/blob/fd42109105b03f9acf45ebd9dcabb9d5cff98f3c/werobot/parser.py#L20-L34
def process_message(message): """ Process a message dict and return a Message Object :param message: Message dict returned by `parse_xml` function :return: Message Object """ message["type"] = message.pop("MsgType").lower() if message["type"] == 'event': message["type"] = str(message.pop("Event")).lower() + '_event' message_type = EventMetaClass.TYPES.get(message["type"], UnknownEvent) else: message_type = MessageMetaClass.TYPES.get( message["type"], UnknownMessage ) return message_type(message)
[ "def", "process_message", "(", "message", ")", ":", "message", "[", "\"type\"", "]", "=", "message", ".", "pop", "(", "\"MsgType\"", ")", ".", "lower", "(", ")", "if", "message", "[", "\"type\"", "]", "==", "'event'", ":", "message", "[", "\"type\"", "]", "=", "str", "(", "message", ".", "pop", "(", "\"Event\"", ")", ")", ".", "lower", "(", ")", "+", "'_event'", "message_type", "=", "EventMetaClass", ".", "TYPES", ".", "get", "(", "message", "[", "\"type\"", "]", ",", "UnknownEvent", ")", "else", ":", "message_type", "=", "MessageMetaClass", ".", "TYPES", ".", "get", "(", "message", "[", "\"type\"", "]", ",", "UnknownMessage", ")", "return", "message_type", "(", "message", ")" ]
Process a message dict and return a Message Object :param message: Message dict returned by `parse_xml` function :return: Message Object
[ "Process", "a", "message", "dict", "and", "return", "a", "Message", "Object", ":", "param", "message", ":", "Message", "dict", "returned", "by", "parse_xml", "function", ":", "return", ":", "Message", "Object" ]
python
train
mayfield/shellish
shellish/command/command.py
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/command/command.py#L333-L340
def attach_session(self): """ Create a session and inject it as context for this command and any subcommands. """ assert self.session is None root = self.find_root() session = self.Session(root) root.inject_context(session=session) return session
[ "def", "attach_session", "(", "self", ")", ":", "assert", "self", ".", "session", "is", "None", "root", "=", "self", ".", "find_root", "(", ")", "session", "=", "self", ".", "Session", "(", "root", ")", "root", ".", "inject_context", "(", "session", "=", "session", ")", "return", "session" ]
Create a session and inject it as context for this command and any subcommands.
[ "Create", "a", "session", "and", "inject", "it", "as", "context", "for", "this", "command", "and", "any", "subcommands", "." ]
python
train
briancappello/flask-unchained
flask_unchained/click.py
https://github.com/briancappello/flask-unchained/blob/4d536cb90e2cc4829c1c05f2c74d3e22901a1399/flask_unchained/click.py#L430-L464
def option(*param_decls, cls=None, **attrs): """ Options are usually optional values on the command line and have some extra features that arguments don't have. :param param_decls: the parameter declarations for this option or argument. This is a list of flags or argument names. :param show_default: controls if the default value should be shown on the help page. Normally, defaults are not shown. :param prompt: if set to `True` or a non empty string then the user will be prompted for input if not set. If set to `True` the prompt will be the option name capitalized. :param confirmation_prompt: if set then the value will need to be confirmed if it was prompted for. :param hide_input: if this is `True` then the input on the prompt will be hidden from the user. This is useful for password input. :param is_flag: forces this option to act as a flag. The default is auto detection. :param flag_value: which value should be used for this flag if it's enabled. This is set to a boolean automatically if the option string contains a slash to mark two options. :param multiple: if this is set to `True` then the argument is accepted multiple times and recorded. This is similar to ``nargs`` in how it works but supports arbitrary number of arguments. :param count: this flag makes an option increment an integer. :param allow_from_autoenv: if this is enabled then the value of this parameter will be pulled from an environment variable in case a prefix is defined on the context. :param help: the help string. """ return click.option(*param_decls, cls=cls or Option, **attrs)
[ "def", "option", "(", "*", "param_decls", ",", "cls", "=", "None", ",", "*", "*", "attrs", ")", ":", "return", "click", ".", "option", "(", "*", "param_decls", ",", "cls", "=", "cls", "or", "Option", ",", "*", "*", "attrs", ")" ]
Options are usually optional values on the command line and have some extra features that arguments don't have. :param param_decls: the parameter declarations for this option or argument. This is a list of flags or argument names. :param show_default: controls if the default value should be shown on the help page. Normally, defaults are not shown. :param prompt: if set to `True` or a non empty string then the user will be prompted for input if not set. If set to `True` the prompt will be the option name capitalized. :param confirmation_prompt: if set then the value will need to be confirmed if it was prompted for. :param hide_input: if this is `True` then the input on the prompt will be hidden from the user. This is useful for password input. :param is_flag: forces this option to act as a flag. The default is auto detection. :param flag_value: which value should be used for this flag if it's enabled. This is set to a boolean automatically if the option string contains a slash to mark two options. :param multiple: if this is set to `True` then the argument is accepted multiple times and recorded. This is similar to ``nargs`` in how it works but supports arbitrary number of arguments. :param count: this flag makes an option increment an integer. :param allow_from_autoenv: if this is enabled then the value of this parameter will be pulled from an environment variable in case a prefix is defined on the context. :param help: the help string.
[ "Options", "are", "usually", "optional", "values", "on", "the", "command", "line", "and", "have", "some", "extra", "features", "that", "arguments", "don", "t", "have", "." ]
python
train
vladsaveliev/TargQC
targqc/utilz/gtf.py
https://github.com/vladsaveliev/TargQC/blob/e887c36b2194dbd73c6ea32989b6cb84c6c0e58d/targqc/utilz/gtf.py#L55-L81
def gtf_to_bed(gtf, alt_out_dir=None): """ create a BED file of transcript-level features with attached gene name or gene ids """ out_file = os.path.splitext(gtf)[0] + '.bed' if file_exists(out_file): return out_file if not os.access(os.path.dirname(out_file), os.W_OK | os.X_OK): if not alt_out_dir: raise IOError('Cannot write transcript BED output file %s' % out_file) else: out_file = os.path.join(alt_out_dir, os.path.basename(out_file)) with open(out_file, "w") as out_handle: db = get_gtf_db(gtf) for feature in db.features_of_type('transcript', order_by=("seqid", "start", "end")): chrom = feature.chrom start = feature.start end = feature.end attributes = feature.attributes.keys() strand = feature.strand name = (feature['gene_name'][0] if 'gene_name' in attributes else feature['gene_id'][0]) line = "\t".join([str(x) for x in [chrom, start, end, name, ".", strand]]) out_handle.write(line + "\n") return out_file
[ "def", "gtf_to_bed", "(", "gtf", ",", "alt_out_dir", "=", "None", ")", ":", "out_file", "=", "os", ".", "path", ".", "splitext", "(", "gtf", ")", "[", "0", "]", "+", "'.bed'", "if", "file_exists", "(", "out_file", ")", ":", "return", "out_file", "if", "not", "os", ".", "access", "(", "os", ".", "path", ".", "dirname", "(", "out_file", ")", ",", "os", ".", "W_OK", "|", "os", ".", "X_OK", ")", ":", "if", "not", "alt_out_dir", ":", "raise", "IOError", "(", "'Cannot write transcript BED output file %s'", "%", "out_file", ")", "else", ":", "out_file", "=", "os", ".", "path", ".", "join", "(", "alt_out_dir", ",", "os", ".", "path", ".", "basename", "(", "out_file", ")", ")", "with", "open", "(", "out_file", ",", "\"w\"", ")", "as", "out_handle", ":", "db", "=", "get_gtf_db", "(", "gtf", ")", "for", "feature", "in", "db", ".", "features_of_type", "(", "'transcript'", ",", "order_by", "=", "(", "\"seqid\"", ",", "\"start\"", ",", "\"end\"", ")", ")", ":", "chrom", "=", "feature", ".", "chrom", "start", "=", "feature", ".", "start", "end", "=", "feature", ".", "end", "attributes", "=", "feature", ".", "attributes", ".", "keys", "(", ")", "strand", "=", "feature", ".", "strand", "name", "=", "(", "feature", "[", "'gene_name'", "]", "[", "0", "]", "if", "'gene_name'", "in", "attributes", "else", "feature", "[", "'gene_id'", "]", "[", "0", "]", ")", "line", "=", "\"\\t\"", ".", "join", "(", "[", "str", "(", "x", ")", "for", "x", "in", "[", "chrom", ",", "start", ",", "end", ",", "name", ",", "\".\"", ",", "strand", "]", "]", ")", "out_handle", ".", "write", "(", "line", "+", "\"\\n\"", ")", "return", "out_file" ]
create a BED file of transcript-level features with attached gene name or gene ids
[ "create", "a", "BED", "file", "of", "transcript", "-", "level", "features", "with", "attached", "gene", "name", "or", "gene", "ids" ]
python
train
fboender/ansible-cmdb
lib/mako/codegen.py
https://github.com/fboender/ansible-cmdb/blob/ebd960ac10684e8c9ec2b12751bba2c4c9504ab7/lib/mako/codegen.py#L556-L571
def write_def_decl(self, node, identifiers): """write a locally-available callable referencing a top-level def""" funcname = node.funcname namedecls = node.get_argument_expressions() nameargs = node.get_argument_expressions(as_call=True) if not self.in_def and ( len(self.identifiers.locally_assigned) > 0 or len(self.identifiers.argument_declared) > 0): nameargs.insert(0, 'context._locals(__M_locals)') else: nameargs.insert(0, 'context') self.printer.writeline("def %s(%s):" % (funcname, ",".join(namedecls))) self.printer.writeline( "return render_%s(%s)" % (funcname, ",".join(nameargs))) self.printer.writeline(None)
[ "def", "write_def_decl", "(", "self", ",", "node", ",", "identifiers", ")", ":", "funcname", "=", "node", ".", "funcname", "namedecls", "=", "node", ".", "get_argument_expressions", "(", ")", "nameargs", "=", "node", ".", "get_argument_expressions", "(", "as_call", "=", "True", ")", "if", "not", "self", ".", "in_def", "and", "(", "len", "(", "self", ".", "identifiers", ".", "locally_assigned", ")", ">", "0", "or", "len", "(", "self", ".", "identifiers", ".", "argument_declared", ")", ">", "0", ")", ":", "nameargs", ".", "insert", "(", "0", ",", "'context._locals(__M_locals)'", ")", "else", ":", "nameargs", ".", "insert", "(", "0", ",", "'context'", ")", "self", ".", "printer", ".", "writeline", "(", "\"def %s(%s):\"", "%", "(", "funcname", ",", "\",\"", ".", "join", "(", "namedecls", ")", ")", ")", "self", ".", "printer", ".", "writeline", "(", "\"return render_%s(%s)\"", "%", "(", "funcname", ",", "\",\"", ".", "join", "(", "nameargs", ")", ")", ")", "self", ".", "printer", ".", "writeline", "(", "None", ")" ]
write a locally-available callable referencing a top-level def
[ "write", "a", "locally", "-", "available", "callable", "referencing", "a", "top", "-", "level", "def" ]
python
train
tanghaibao/jcvi
jcvi/apps/phylo.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/phylo.py#L177-L190
def run_ffitch(distfile, outtreefile, intreefile=None, **kwargs): """ Infer tree branch lengths using ffitch in EMBOSS PHYLIP """ cl = FfitchCommandline(datafile=distfile, outtreefile=outtreefile, \ intreefile=intreefile, **kwargs) r, e = cl.run() if e: print("***ffitch could not run", file=sys.stderr) return None else: print("ffitch:", cl, file=sys.stderr) return outtreefile
[ "def", "run_ffitch", "(", "distfile", ",", "outtreefile", ",", "intreefile", "=", "None", ",", "*", "*", "kwargs", ")", ":", "cl", "=", "FfitchCommandline", "(", "datafile", "=", "distfile", ",", "outtreefile", "=", "outtreefile", ",", "intreefile", "=", "intreefile", ",", "*", "*", "kwargs", ")", "r", ",", "e", "=", "cl", ".", "run", "(", ")", "if", "e", ":", "print", "(", "\"***ffitch could not run\"", ",", "file", "=", "sys", ".", "stderr", ")", "return", "None", "else", ":", "print", "(", "\"ffitch:\"", ",", "cl", ",", "file", "=", "sys", ".", "stderr", ")", "return", "outtreefile" ]
Infer tree branch lengths using ffitch in EMBOSS PHYLIP
[ "Infer", "tree", "branch", "lengths", "using", "ffitch", "in", "EMBOSS", "PHYLIP" ]
python
train
Neurita/boyle
boyle/utils/strings.py
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/utils/strings.py#L99-L111
def append_to_keys(adict, preffix): """ Parameters ---------- adict: preffix: Returns ------- """ return {preffix + str(key): (value if isinstance(value, dict) else value) for key, value in list(adict.items())}
[ "def", "append_to_keys", "(", "adict", ",", "preffix", ")", ":", "return", "{", "preffix", "+", "str", "(", "key", ")", ":", "(", "value", "if", "isinstance", "(", "value", ",", "dict", ")", "else", "value", ")", "for", "key", ",", "value", "in", "list", "(", "adict", ".", "items", "(", ")", ")", "}" ]
Parameters ---------- adict: preffix: Returns -------
[ "Parameters", "----------", "adict", ":", "preffix", ":" ]
python
valid
baguette-io/baguette-messaging
farine/stream/sse.py
https://github.com/baguette-io/baguette-messaging/blob/8d1c4707ea7eace8617fed2d97df2fcc9d0cdee1/farine/stream/sse.py#L40-L63
def run(self, limit=None, timeout=None): """ Consume the event stream. :param timeout: Duration of the connection timeout. :type timeout: int :param limit: Number of events to consume. :type limit: int :rtype: None """ counter = 0 self.stream = sseclient.SSEClient(self.endpoint) while True: with utils.Timeout(timeout): try: event = next(self.stream) except StopIteration: continue else: if not event.data: continue self.main_callback(event.data) counter += 1 if limit and counter >= limit: return
[ "def", "run", "(", "self", ",", "limit", "=", "None", ",", "timeout", "=", "None", ")", ":", "counter", "=", "0", "self", ".", "stream", "=", "sseclient", ".", "SSEClient", "(", "self", ".", "endpoint", ")", "while", "True", ":", "with", "utils", ".", "Timeout", "(", "timeout", ")", ":", "try", ":", "event", "=", "next", "(", "self", ".", "stream", ")", "except", "StopIteration", ":", "continue", "else", ":", "if", "not", "event", ".", "data", ":", "continue", "self", ".", "main_callback", "(", "event", ".", "data", ")", "counter", "+=", "1", "if", "limit", "and", "counter", ">=", "limit", ":", "return" ]
Consume the event stream. :param timeout: Duration of the connection timeout. :type timeout: int :param limit: Number of events to consume. :type limit: int :rtype: None
[ "Consume", "the", "event", "stream", ".", ":", "param", "timeout", ":", "Duration", "of", "the", "connection", "timeout", ".", ":", "type", "timeout", ":", "int", ":", "param", "limit", ":", "Number", "of", "events", "to", "consume", ".", ":", "type", "limit", ":", "int", ":", "rtype", ":", "None" ]
python
train
inveniosoftware/invenio-pidstore
invenio_pidstore/ext.py
https://github.com/inveniosoftware/invenio-pidstore/blob/8bf35f4e62d5dcaf1a2cfe5803245ba5220a9b78/invenio_pidstore/ext.py#L49-L56
def register_minter(self, name, minter): """Register a minter. :param name: Minter name. :param minter: The new minter. """ assert name not in self.minters self.minters[name] = minter
[ "def", "register_minter", "(", "self", ",", "name", ",", "minter", ")", ":", "assert", "name", "not", "in", "self", ".", "minters", "self", ".", "minters", "[", "name", "]", "=", "minter" ]
Register a minter. :param name: Minter name. :param minter: The new minter.
[ "Register", "a", "minter", "." ]
python
train
mayfield/cellulario
cellulario/tier.py
https://github.com/mayfield/cellulario/blob/e9dc10532a0357bc90ebaa2655b36822f9249673/cellulario/tier.py#L144-L148
def coord_wrap(self, *args): """ Wrap the coroutine with coordination throttles. """ yield from self.cell.coord.start(self) yield from self.coro(*args) yield from self.cell.coord.finish(self)
[ "def", "coord_wrap", "(", "self", ",", "*", "args", ")", ":", "yield", "from", "self", ".", "cell", ".", "coord", ".", "start", "(", "self", ")", "yield", "from", "self", ".", "coro", "(", "*", "args", ")", "yield", "from", "self", ".", "cell", ".", "coord", ".", "finish", "(", "self", ")" ]
Wrap the coroutine with coordination throttles.
[ "Wrap", "the", "coroutine", "with", "coordination", "throttles", "." ]
python
train
psd-tools/psd-tools
src/psd_tools/psd/descriptor.py
https://github.com/psd-tools/psd-tools/blob/4952b57bcf1cf2c1f16fd9d6d51d4fa0b53bce4e/src/psd_tools/psd/descriptor.py#L60-L66
def write_length_and_key(fp, value): """ Helper to write descriptor key. """ written = write_fmt(fp, 'I', 0 if value in _TERMS else len(value)) written += write_bytes(fp, value) return written
[ "def", "write_length_and_key", "(", "fp", ",", "value", ")", ":", "written", "=", "write_fmt", "(", "fp", ",", "'I'", ",", "0", "if", "value", "in", "_TERMS", "else", "len", "(", "value", ")", ")", "written", "+=", "write_bytes", "(", "fp", ",", "value", ")", "return", "written" ]
Helper to write descriptor key.
[ "Helper", "to", "write", "descriptor", "key", "." ]
python
train
cpburnz/python-path-specification
pathspec/patterns/gitwildmatch.py
https://github.com/cpburnz/python-path-specification/blob/6fc7567a58cb68ec7d72cc287e7fb97dbe22c017/pathspec/patterns/gitwildmatch.py#L177-L280
def _translate_segment_glob(pattern): """ Translates the glob pattern to a regular expression. This is used in the constructor to translate a path segment glob pattern to its corresponding regular expression. *pattern* (:class:`str`) is the glob pattern. Returns the regular expression (:class:`str`). """ # NOTE: This is derived from `fnmatch.translate()` and is similar to # the POSIX function `fnmatch()` with the `FNM_PATHNAME` flag set. escape = False regex = '' i, end = 0, len(pattern) while i < end: # Get next character. char = pattern[i] i += 1 if escape: # Escape the character. escape = False regex += re.escape(char) elif char == '\\': # Escape character, escape next character. escape = True elif char == '*': # Multi-character wildcard. Match any string (except slashes), # including an empty string. regex += '[^/]*' elif char == '?': # Single-character wildcard. Match any single character (except # a slash). regex += '[^/]' elif char == '[': # Braket expression wildcard. Except for the beginning # exclamation mark, the whole braket expression can be used # directly as regex but we have to find where the expression # ends. # - "[][!]" matchs ']', '[' and '!'. # - "[]-]" matchs ']' and '-'. # - "[!]a-]" matchs any character except ']', 'a' and '-'. j = i # Pass brack expression negation. if j < end and pattern[j] == '!': j += 1 # Pass first closing braket if it is at the beginning of the # expression. if j < end and pattern[j] == ']': j += 1 # Find closing braket. Stop once we reach the end or find it. while j < end and pattern[j] != ']': j += 1 if j < end: # Found end of braket expression. Increment j to be one past # the closing braket: # # [...] # ^ ^ # i j # j += 1 expr = '[' if pattern[i] == '!': # Braket expression needs to be negated. expr += '^' i += 1 elif pattern[i] == '^': # POSIX declares that the regex braket expression negation # "[^...]" is undefined in a glob pattern. Python's # `fnmatch.translate()` escapes the caret ('^') as a # literal. To maintain consistency with undefined behavior, # I am escaping the '^' as well. expr += '\\^' i += 1 # Build regex braket expression. Escape slashes so they are # treated as literal slashes by regex as defined by POSIX. expr += pattern[i:j].replace('\\', '\\\\') # Add regex braket expression to regex result. regex += expr # Set i to one past the closing braket. i = j else: # Failed to find closing braket, treat opening braket as a # braket literal instead of as an expression. regex += '\\[' else: # Regular character, escape it for regex. regex += re.escape(char) return regex
[ "def", "_translate_segment_glob", "(", "pattern", ")", ":", "# NOTE: This is derived from `fnmatch.translate()` and is similar to", "# the POSIX function `fnmatch()` with the `FNM_PATHNAME` flag set.", "escape", "=", "False", "regex", "=", "''", "i", ",", "end", "=", "0", ",", "len", "(", "pattern", ")", "while", "i", "<", "end", ":", "# Get next character.", "char", "=", "pattern", "[", "i", "]", "i", "+=", "1", "if", "escape", ":", "# Escape the character.", "escape", "=", "False", "regex", "+=", "re", ".", "escape", "(", "char", ")", "elif", "char", "==", "'\\\\'", ":", "# Escape character, escape next character.", "escape", "=", "True", "elif", "char", "==", "'*'", ":", "# Multi-character wildcard. Match any string (except slashes),", "# including an empty string.", "regex", "+=", "'[^/]*'", "elif", "char", "==", "'?'", ":", "# Single-character wildcard. Match any single character (except", "# a slash).", "regex", "+=", "'[^/]'", "elif", "char", "==", "'['", ":", "# Braket expression wildcard. Except for the beginning", "# exclamation mark, the whole braket expression can be used", "# directly as regex but we have to find where the expression", "# ends.", "# - \"[][!]\" matchs ']', '[' and '!'.", "# - \"[]-]\" matchs ']' and '-'.", "# - \"[!]a-]\" matchs any character except ']', 'a' and '-'.", "j", "=", "i", "# Pass brack expression negation.", "if", "j", "<", "end", "and", "pattern", "[", "j", "]", "==", "'!'", ":", "j", "+=", "1", "# Pass first closing braket if it is at the beginning of the", "# expression.", "if", "j", "<", "end", "and", "pattern", "[", "j", "]", "==", "']'", ":", "j", "+=", "1", "# Find closing braket. Stop once we reach the end or find it.", "while", "j", "<", "end", "and", "pattern", "[", "j", "]", "!=", "']'", ":", "j", "+=", "1", "if", "j", "<", "end", ":", "# Found end of braket expression. Increment j to be one past", "# the closing braket:", "#", "# [...]", "# ^ ^", "# i j", "#", "j", "+=", "1", "expr", "=", "'['", "if", "pattern", "[", "i", "]", "==", "'!'", ":", "# Braket expression needs to be negated.", "expr", "+=", "'^'", "i", "+=", "1", "elif", "pattern", "[", "i", "]", "==", "'^'", ":", "# POSIX declares that the regex braket expression negation", "# \"[^...]\" is undefined in a glob pattern. Python's", "# `fnmatch.translate()` escapes the caret ('^') as a", "# literal. To maintain consistency with undefined behavior,", "# I am escaping the '^' as well.", "expr", "+=", "'\\\\^'", "i", "+=", "1", "# Build regex braket expression. Escape slashes so they are", "# treated as literal slashes by regex as defined by POSIX.", "expr", "+=", "pattern", "[", "i", ":", "j", "]", ".", "replace", "(", "'\\\\'", ",", "'\\\\\\\\'", ")", "# Add regex braket expression to regex result.", "regex", "+=", "expr", "# Set i to one past the closing braket.", "i", "=", "j", "else", ":", "# Failed to find closing braket, treat opening braket as a", "# braket literal instead of as an expression.", "regex", "+=", "'\\\\['", "else", ":", "# Regular character, escape it for regex.", "regex", "+=", "re", ".", "escape", "(", "char", ")", "return", "regex" ]
Translates the glob pattern to a regular expression. This is used in the constructor to translate a path segment glob pattern to its corresponding regular expression. *pattern* (:class:`str`) is the glob pattern. Returns the regular expression (:class:`str`).
[ "Translates", "the", "glob", "pattern", "to", "a", "regular", "expression", ".", "This", "is", "used", "in", "the", "constructor", "to", "translate", "a", "path", "segment", "glob", "pattern", "to", "its", "corresponding", "regular", "expression", "." ]
python
train
MoseleyBioinformaticsLab/mwtab
mwtab/fileio.py
https://github.com/MoseleyBioinformaticsLab/mwtab/blob/8c0ae8ab2aa621662f99589ed41e481cf8b7152b/mwtab/fileio.py#L128-L183
def open(self): """Generator that opens and yields filehandles using appropriate facilities: test if path represents a local file or file over URL, if file is compressed or not. :return: Filehandle to be processed into an instance. """ is_url = self.is_url(self.path) compression_type = self.is_compressed(self.path) if not compression_type: if is_url: filehandle = urlopen(self.path) else: filehandle = open(self.path, "r") source = self.path yield filehandle, source filehandle.close() elif compression_type: if is_url: response = urlopen(self.path) path = response.read() response.close() else: path = self.path if compression_type == "zip": ziparchive = zipfile.ZipFile(io.BytesIO(path), "r") if is_url else zipfile.ZipFile(path) for name in ziparchive.infolist(): if not name.filename.endswith("/"): filehandle = ziparchive.open(name) source = self.path + "/" + name.filename yield filehandle, source filehandle.close() elif compression_type in ("tar", "tar.bz2", "tar.gz"): tararchive = tarfile.open(fileobj=io.BytesIO(path)) if is_url else tarfile.open(path) for name in tararchive: if name.isfile(): filehandle = tararchive.extractfile(name) source = self.path + "/" + name.name yield filehandle, source filehandle.close() elif compression_type == "bz2": filehandle = bz2.BZ2File(io.BytesIO(path)) if is_url else bz2.BZ2File(path) source = self.path yield filehandle, source filehandle.close() elif compression_type == "gz": filehandle = gzip.open(io.BytesIO(path)) if is_url else gzip.open(path) source = self.path yield filehandle, source filehandle.close()
[ "def", "open", "(", "self", ")", ":", "is_url", "=", "self", ".", "is_url", "(", "self", ".", "path", ")", "compression_type", "=", "self", ".", "is_compressed", "(", "self", ".", "path", ")", "if", "not", "compression_type", ":", "if", "is_url", ":", "filehandle", "=", "urlopen", "(", "self", ".", "path", ")", "else", ":", "filehandle", "=", "open", "(", "self", ".", "path", ",", "\"r\"", ")", "source", "=", "self", ".", "path", "yield", "filehandle", ",", "source", "filehandle", ".", "close", "(", ")", "elif", "compression_type", ":", "if", "is_url", ":", "response", "=", "urlopen", "(", "self", ".", "path", ")", "path", "=", "response", ".", "read", "(", ")", "response", ".", "close", "(", ")", "else", ":", "path", "=", "self", ".", "path", "if", "compression_type", "==", "\"zip\"", ":", "ziparchive", "=", "zipfile", ".", "ZipFile", "(", "io", ".", "BytesIO", "(", "path", ")", ",", "\"r\"", ")", "if", "is_url", "else", "zipfile", ".", "ZipFile", "(", "path", ")", "for", "name", "in", "ziparchive", ".", "infolist", "(", ")", ":", "if", "not", "name", ".", "filename", ".", "endswith", "(", "\"/\"", ")", ":", "filehandle", "=", "ziparchive", ".", "open", "(", "name", ")", "source", "=", "self", ".", "path", "+", "\"/\"", "+", "name", ".", "filename", "yield", "filehandle", ",", "source", "filehandle", ".", "close", "(", ")", "elif", "compression_type", "in", "(", "\"tar\"", ",", "\"tar.bz2\"", ",", "\"tar.gz\"", ")", ":", "tararchive", "=", "tarfile", ".", "open", "(", "fileobj", "=", "io", ".", "BytesIO", "(", "path", ")", ")", "if", "is_url", "else", "tarfile", ".", "open", "(", "path", ")", "for", "name", "in", "tararchive", ":", "if", "name", ".", "isfile", "(", ")", ":", "filehandle", "=", "tararchive", ".", "extractfile", "(", "name", ")", "source", "=", "self", ".", "path", "+", "\"/\"", "+", "name", ".", "name", "yield", "filehandle", ",", "source", "filehandle", ".", "close", "(", ")", "elif", "compression_type", "==", "\"bz2\"", ":", "filehandle", "=", "bz2", ".", "BZ2File", "(", "io", ".", "BytesIO", "(", "path", ")", ")", "if", "is_url", "else", "bz2", ".", "BZ2File", "(", "path", ")", "source", "=", "self", ".", "path", "yield", "filehandle", ",", "source", "filehandle", ".", "close", "(", ")", "elif", "compression_type", "==", "\"gz\"", ":", "filehandle", "=", "gzip", ".", "open", "(", "io", ".", "BytesIO", "(", "path", ")", ")", "if", "is_url", "else", "gzip", ".", "open", "(", "path", ")", "source", "=", "self", ".", "path", "yield", "filehandle", ",", "source", "filehandle", ".", "close", "(", ")" ]
Generator that opens and yields filehandles using appropriate facilities: test if path represents a local file or file over URL, if file is compressed or not. :return: Filehandle to be processed into an instance.
[ "Generator", "that", "opens", "and", "yields", "filehandles", "using", "appropriate", "facilities", ":", "test", "if", "path", "represents", "a", "local", "file", "or", "file", "over", "URL", "if", "file", "is", "compressed", "or", "not", "." ]
python
train
manns/pyspread
pyspread/src/interfaces/xls.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/interfaces/xls.py#L820-L835
def _col_widths2xls(self, worksheets): """Writes col_widths to xls file Format: <col>\t<tab>\t<value>\n """ xls_max_cols, xls_max_tabs = self.xls_max_cols, self.xls_max_tabs dict_grid = self.code_array.dict_grid for col, tab in dict_grid.col_widths: if col < xls_max_cols and tab < xls_max_tabs: pys_width = dict_grid.col_widths[(col, tab)] xls_width = self.pys_width2xls_width(pys_width) worksheets[tab].col(col).width = xls_width
[ "def", "_col_widths2xls", "(", "self", ",", "worksheets", ")", ":", "xls_max_cols", ",", "xls_max_tabs", "=", "self", ".", "xls_max_cols", ",", "self", ".", "xls_max_tabs", "dict_grid", "=", "self", ".", "code_array", ".", "dict_grid", "for", "col", ",", "tab", "in", "dict_grid", ".", "col_widths", ":", "if", "col", "<", "xls_max_cols", "and", "tab", "<", "xls_max_tabs", ":", "pys_width", "=", "dict_grid", ".", "col_widths", "[", "(", "col", ",", "tab", ")", "]", "xls_width", "=", "self", ".", "pys_width2xls_width", "(", "pys_width", ")", "worksheets", "[", "tab", "]", ".", "col", "(", "col", ")", ".", "width", "=", "xls_width" ]
Writes col_widths to xls file Format: <col>\t<tab>\t<value>\n
[ "Writes", "col_widths", "to", "xls", "file" ]
python
train
CalebBell/fluids
fluids/fittings.py
https://github.com/CalebBell/fluids/blob/57f556752e039f1d3e5a822f408c184783db2828/fluids/fittings.py#L4408-L4515
def K_branch_diverging_Crane(D_run, D_branch, Q_run, Q_branch, angle=90): r'''Returns the loss coefficient for the branch of a diverging tee or wye according to the Crane method [1]_. .. math:: K_{branch} = G\left[1 + H\left(\frac{Q_{branch}}{Q_{comb} \beta_{branch}^2}\right)^2 - J\left(\frac{Q_{branch}}{Q_{comb} \beta_{branch}^2}\right)\cos\theta\right] .. math:: \beta_{branch} = \frac{D_{branch}}{D_{comb}} See the notes for definitions of H, J, and G. Parameters ---------- D_run : float Diameter of the straight-through inlet portion of the tee or wye [m] D_branch : float Diameter of the pipe attached at an angle to the straight-through, [m] Q_run : float Volumetric flow rate in the straight-through outlet of the tee or wye, [m^3/s] Q_branch : float Volumetric flow rate in the pipe attached at an angle to the straight- through, [m^3/s] angle : float, optional Angle the branch makes with the straight-through (tee=90, wye<90) [degrees] Returns ------- K : float Loss coefficient of branch with respect to the velocity and inside diameter of the combined flow inlet [-] Notes ----- If :math:`\beta_{branch} = 1, \theta = 90^\circ`, H = 0.3 and J = 0. Otherwise H = 1 and J = 2. G is determined according to the following pseudocode: .. code-block:: python if angle < 75: if beta2 <= 0.35: if Q_ratio <= 0.4: G = 1.1 - 0.7*Q_ratio else: G = 0.85 else: if Q_ratio <= 0.6: G = 1.0 - 0.6*Q_ratio else: G = 0.6 else: if beta2 <= 2/3.: G = 1 else: G = 1 + 0.3*Q_ratio*Q_ratio Note that there are several errors in the text of [1]_; the errata can be obtained here: http://www.flowoffluids.com/publications/tp-410-errata.aspx Examples -------- Example 7-36 of [1]_. A DN150 schedule 80 wye has 1515 liters/minute of water exiting the straight leg, and 950 liters/minute of water exiting it through a 45° branch. Calculate the loss coefficient in the branch. The calculated value there is 0.4640. >>> K_branch_diverging_Crane(0.146, 0.146, 0.02525, 0.01583, angle=45) 0.4639895627496694 References ---------- .. [1] Crane Co. Flow of Fluids Through Valves, Fittings, and Pipe. Crane, 2009. ''' beta = (D_branch/D_run) beta2 = beta*beta Q_comb = Q_run + Q_branch Q_ratio = Q_branch/Q_comb if angle < 60 or beta <= 2/3.: H, J = 1., 2. else: H, J = 0.3, 0 if angle < 75: if beta2 <= 0.35: if Q_ratio <= 0.4: G = 1.1 - 0.7*Q_ratio else: G = 0.85 else: if Q_ratio <= 0.6: G = 1.0 - 0.6*Q_ratio else: G = 0.6 else: if beta2 <= 2/3.: G = 1 else: G = 1 + 0.3*Q_ratio*Q_ratio angle_rad = radians(angle) K_branch = G*(1 + H*(Q_ratio/beta2)**2 - J*(Q_ratio/beta2)*cos(angle_rad)) return K_branch
[ "def", "K_branch_diverging_Crane", "(", "D_run", ",", "D_branch", ",", "Q_run", ",", "Q_branch", ",", "angle", "=", "90", ")", ":", "beta", "=", "(", "D_branch", "/", "D_run", ")", "beta2", "=", "beta", "*", "beta", "Q_comb", "=", "Q_run", "+", "Q_branch", "Q_ratio", "=", "Q_branch", "/", "Q_comb", "if", "angle", "<", "60", "or", "beta", "<=", "2", "/", "3.", ":", "H", ",", "J", "=", "1.", ",", "2.", "else", ":", "H", ",", "J", "=", "0.3", ",", "0", "if", "angle", "<", "75", ":", "if", "beta2", "<=", "0.35", ":", "if", "Q_ratio", "<=", "0.4", ":", "G", "=", "1.1", "-", "0.7", "*", "Q_ratio", "else", ":", "G", "=", "0.85", "else", ":", "if", "Q_ratio", "<=", "0.6", ":", "G", "=", "1.0", "-", "0.6", "*", "Q_ratio", "else", ":", "G", "=", "0.6", "else", ":", "if", "beta2", "<=", "2", "/", "3.", ":", "G", "=", "1", "else", ":", "G", "=", "1", "+", "0.3", "*", "Q_ratio", "*", "Q_ratio", "angle_rad", "=", "radians", "(", "angle", ")", "K_branch", "=", "G", "*", "(", "1", "+", "H", "*", "(", "Q_ratio", "/", "beta2", ")", "**", "2", "-", "J", "*", "(", "Q_ratio", "/", "beta2", ")", "*", "cos", "(", "angle_rad", ")", ")", "return", "K_branch" ]
r'''Returns the loss coefficient for the branch of a diverging tee or wye according to the Crane method [1]_. .. math:: K_{branch} = G\left[1 + H\left(\frac{Q_{branch}}{Q_{comb} \beta_{branch}^2}\right)^2 - J\left(\frac{Q_{branch}}{Q_{comb} \beta_{branch}^2}\right)\cos\theta\right] .. math:: \beta_{branch} = \frac{D_{branch}}{D_{comb}} See the notes for definitions of H, J, and G. Parameters ---------- D_run : float Diameter of the straight-through inlet portion of the tee or wye [m] D_branch : float Diameter of the pipe attached at an angle to the straight-through, [m] Q_run : float Volumetric flow rate in the straight-through outlet of the tee or wye, [m^3/s] Q_branch : float Volumetric flow rate in the pipe attached at an angle to the straight- through, [m^3/s] angle : float, optional Angle the branch makes with the straight-through (tee=90, wye<90) [degrees] Returns ------- K : float Loss coefficient of branch with respect to the velocity and inside diameter of the combined flow inlet [-] Notes ----- If :math:`\beta_{branch} = 1, \theta = 90^\circ`, H = 0.3 and J = 0. Otherwise H = 1 and J = 2. G is determined according to the following pseudocode: .. code-block:: python if angle < 75: if beta2 <= 0.35: if Q_ratio <= 0.4: G = 1.1 - 0.7*Q_ratio else: G = 0.85 else: if Q_ratio <= 0.6: G = 1.0 - 0.6*Q_ratio else: G = 0.6 else: if beta2 <= 2/3.: G = 1 else: G = 1 + 0.3*Q_ratio*Q_ratio Note that there are several errors in the text of [1]_; the errata can be obtained here: http://www.flowoffluids.com/publications/tp-410-errata.aspx Examples -------- Example 7-36 of [1]_. A DN150 schedule 80 wye has 1515 liters/minute of water exiting the straight leg, and 950 liters/minute of water exiting it through a 45° branch. Calculate the loss coefficient in the branch. The calculated value there is 0.4640. >>> K_branch_diverging_Crane(0.146, 0.146, 0.02525, 0.01583, angle=45) 0.4639895627496694 References ---------- .. [1] Crane Co. Flow of Fluids Through Valves, Fittings, and Pipe. Crane, 2009.
[ "r", "Returns", "the", "loss", "coefficient", "for", "the", "branch", "of", "a", "diverging", "tee", "or", "wye", "according", "to", "the", "Crane", "method", "[", "1", "]", "_", ".", "..", "math", "::", "K_", "{", "branch", "}", "=", "G", "\\", "left", "[", "1", "+", "H", "\\", "left", "(", "\\", "frac", "{", "Q_", "{", "branch", "}}", "{", "Q_", "{", "comb", "}", "\\", "beta_", "{", "branch", "}", "^2", "}", "\\", "right", ")", "^2", "-", "J", "\\", "left", "(", "\\", "frac", "{", "Q_", "{", "branch", "}}", "{", "Q_", "{", "comb", "}", "\\", "beta_", "{", "branch", "}", "^2", "}", "\\", "right", ")", "\\", "cos", "\\", "theta", "\\", "right", "]", "..", "math", "::", "\\", "beta_", "{", "branch", "}", "=", "\\", "frac", "{", "D_", "{", "branch", "}}", "{", "D_", "{", "comb", "}}", "See", "the", "notes", "for", "definitions", "of", "H", "J", "and", "G", "." ]
python
train
jmbeach/KEP.py
src/keppy/simulator_device.py
https://github.com/jmbeach/KEP.py/blob/68cda64ab649640a486534867c81274c41e39446/src/keppy/simulator_device.py#L48-L58
def process_tag(self, tag): """Processes tag and detects which function to use""" try: if not self._is_function(tag): self._tag_type_processor[tag.data_type](tag) except KeyError as ex: raise Exception('Tag type {0} not recognized for tag {1}' .format( tag.data_type, tag.name), ex)
[ "def", "process_tag", "(", "self", ",", "tag", ")", ":", "try", ":", "if", "not", "self", ".", "_is_function", "(", "tag", ")", ":", "self", ".", "_tag_type_processor", "[", "tag", ".", "data_type", "]", "(", "tag", ")", "except", "KeyError", "as", "ex", ":", "raise", "Exception", "(", "'Tag type {0} not recognized for tag {1}'", ".", "format", "(", "tag", ".", "data_type", ",", "tag", ".", "name", ")", ",", "ex", ")" ]
Processes tag and detects which function to use
[ "Processes", "tag", "and", "detects", "which", "function", "to", "use" ]
python
train
CalebBell/fluids
fluids/two_phase.py
https://github.com/CalebBell/fluids/blob/57f556752e039f1d3e5a822f408c184783db2828/fluids/two_phase.py#L1539-L1634
def Wang_Chiang_Lu(m, x, rhol, rhog, mul, mug, D, roughness=0, L=1): r'''Calculates two-phase pressure drop with the Wang, Chiang, and Lu (1997) correlation given in [1]_ and reviewed in [2]_ and [3]_. .. math:: \Delta P = \Delta P_{g} \phi_g^2 .. math:: \phi_g^2 = 1 + 9.397X^{0.62} + 0.564X^{2.45} \text{ for } G >= 200 kg/m^2/s .. math:: \phi_g^2 = 1 + CX + X^2 \text{ for lower mass fluxes} .. math:: C = 0.000004566X^{0.128}Re_{lo}^{0.938}\left(\frac{\rho_l}{\rho_g} \right)^{-2.15}\left(\frac{\mu_l}{\mu_g}\right)^{5.1} .. math:: X^2 = \frac{\Delta P_l}{\Delta P_g} Parameters ---------- m : float Mass flow rate of fluid, [kg/s] x : float Quality of fluid, [-] rhol : float Liquid density, [kg/m^3] rhog : float Gas density, [kg/m^3] mul : float Viscosity of liquid, [Pa*s] mug : float Viscosity of gas, [Pa*s] D : float Diameter of pipe, [m] roughness : float, optional Roughness of pipe for use in calculating friction factor, [m] L : float, optional Length of pipe, [m] Returns ------- dP : float Pressure drop of the two-phase flow, [Pa] Notes ----- Examples -------- >>> Wang_Chiang_Lu(m=0.6, x=0.1, rhol=915., rhog=2.67, mul=180E-6, ... mug=14E-6, D=0.05, roughness=0, L=1) 448.29981978639154 References ---------- .. [1] Wang, Chi-Chuan, Ching-Shan Chiang, and Ding-Chong Lu. "Visual Observation of Two-Phase Flow Pattern of R-22, R-134a, and R-407C in a 6.5-Mm Smooth Tube." Experimental Thermal and Fluid Science 15, no. 4 (November 1, 1997): 395-405. doi:10.1016/S0894-1777(97)00007-1. .. [2] Kim, Sung-Min, and Issam Mudawar. "Universal Approach to Predicting Two-Phase Frictional Pressure Drop for Adiabatic and Condensing Mini/ Micro-Channel Flows." International Journal of Heat and Mass Transfer 55, no. 11-12 (May 2012): 3246-61. doi:10.1016/j.ijheatmasstransfer.2012.02.047. .. [3] Xu, Yu, Xiande Fang, Xianghui Su, Zhanru Zhou, and Weiwei Chen. "Evaluation of Frictional Pressure Drop Correlations for Two-Phase Flow in Pipes." Nuclear Engineering and Design, SI : CFD4NRS-3, 253 (December 2012): 86-97. doi:10.1016/j.nucengdes.2012.08.007. ''' G_tp = m/(pi/4*D**2) # Actual Liquid flow v_l = m*(1-x)/rhol/(pi/4*D**2) Re_l = Reynolds(V=v_l, rho=rhol, mu=mul, D=D) fd_l = friction_factor(Re=Re_l, eD=roughness/D) dP_l = fd_l*L/D*(0.5*rhol*v_l**2) # Actual gas flow v_g = m*x/rhog/(pi/4*D**2) Re_g = Reynolds(V=v_g, rho=rhog, mu=mug, D=D) fd_g = friction_factor(Re=Re_g, eD=roughness/D) dP_g = fd_g*L/D*(0.5*rhog*v_g**2) X = (dP_l/dP_g)**0.5 if G_tp >= 200: phi_g2 = 1 + 9.397*X**0.62 + 0.564*X**2.45 else: # Liquid-only flow; Re_lo is oddly needed v_lo = m/rhol/(pi/4*D**2) Re_lo = Reynolds(V=v_lo, rho=rhol, mu=mul, D=D) C = 0.000004566*X**0.128*Re_lo**0.938*(rhol/rhog)**-2.15*(mul/mug)**5.1 phi_g2 = 1 + C*X + X**2 return dP_g*phi_g2
[ "def", "Wang_Chiang_Lu", "(", "m", ",", "x", ",", "rhol", ",", "rhog", ",", "mul", ",", "mug", ",", "D", ",", "roughness", "=", "0", ",", "L", "=", "1", ")", ":", "G_tp", "=", "m", "/", "(", "pi", "/", "4", "*", "D", "**", "2", ")", "# Actual Liquid flow", "v_l", "=", "m", "*", "(", "1", "-", "x", ")", "/", "rhol", "/", "(", "pi", "/", "4", "*", "D", "**", "2", ")", "Re_l", "=", "Reynolds", "(", "V", "=", "v_l", ",", "rho", "=", "rhol", ",", "mu", "=", "mul", ",", "D", "=", "D", ")", "fd_l", "=", "friction_factor", "(", "Re", "=", "Re_l", ",", "eD", "=", "roughness", "/", "D", ")", "dP_l", "=", "fd_l", "*", "L", "/", "D", "*", "(", "0.5", "*", "rhol", "*", "v_l", "**", "2", ")", "# Actual gas flow", "v_g", "=", "m", "*", "x", "/", "rhog", "/", "(", "pi", "/", "4", "*", "D", "**", "2", ")", "Re_g", "=", "Reynolds", "(", "V", "=", "v_g", ",", "rho", "=", "rhog", ",", "mu", "=", "mug", ",", "D", "=", "D", ")", "fd_g", "=", "friction_factor", "(", "Re", "=", "Re_g", ",", "eD", "=", "roughness", "/", "D", ")", "dP_g", "=", "fd_g", "*", "L", "/", "D", "*", "(", "0.5", "*", "rhog", "*", "v_g", "**", "2", ")", "X", "=", "(", "dP_l", "/", "dP_g", ")", "**", "0.5", "if", "G_tp", ">=", "200", ":", "phi_g2", "=", "1", "+", "9.397", "*", "X", "**", "0.62", "+", "0.564", "*", "X", "**", "2.45", "else", ":", "# Liquid-only flow; Re_lo is oddly needed", "v_lo", "=", "m", "/", "rhol", "/", "(", "pi", "/", "4", "*", "D", "**", "2", ")", "Re_lo", "=", "Reynolds", "(", "V", "=", "v_lo", ",", "rho", "=", "rhol", ",", "mu", "=", "mul", ",", "D", "=", "D", ")", "C", "=", "0.000004566", "*", "X", "**", "0.128", "*", "Re_lo", "**", "0.938", "*", "(", "rhol", "/", "rhog", ")", "**", "-", "2.15", "*", "(", "mul", "/", "mug", ")", "**", "5.1", "phi_g2", "=", "1", "+", "C", "*", "X", "+", "X", "**", "2", "return", "dP_g", "*", "phi_g2" ]
r'''Calculates two-phase pressure drop with the Wang, Chiang, and Lu (1997) correlation given in [1]_ and reviewed in [2]_ and [3]_. .. math:: \Delta P = \Delta P_{g} \phi_g^2 .. math:: \phi_g^2 = 1 + 9.397X^{0.62} + 0.564X^{2.45} \text{ for } G >= 200 kg/m^2/s .. math:: \phi_g^2 = 1 + CX + X^2 \text{ for lower mass fluxes} .. math:: C = 0.000004566X^{0.128}Re_{lo}^{0.938}\left(\frac{\rho_l}{\rho_g} \right)^{-2.15}\left(\frac{\mu_l}{\mu_g}\right)^{5.1} .. math:: X^2 = \frac{\Delta P_l}{\Delta P_g} Parameters ---------- m : float Mass flow rate of fluid, [kg/s] x : float Quality of fluid, [-] rhol : float Liquid density, [kg/m^3] rhog : float Gas density, [kg/m^3] mul : float Viscosity of liquid, [Pa*s] mug : float Viscosity of gas, [Pa*s] D : float Diameter of pipe, [m] roughness : float, optional Roughness of pipe for use in calculating friction factor, [m] L : float, optional Length of pipe, [m] Returns ------- dP : float Pressure drop of the two-phase flow, [Pa] Notes ----- Examples -------- >>> Wang_Chiang_Lu(m=0.6, x=0.1, rhol=915., rhog=2.67, mul=180E-6, ... mug=14E-6, D=0.05, roughness=0, L=1) 448.29981978639154 References ---------- .. [1] Wang, Chi-Chuan, Ching-Shan Chiang, and Ding-Chong Lu. "Visual Observation of Two-Phase Flow Pattern of R-22, R-134a, and R-407C in a 6.5-Mm Smooth Tube." Experimental Thermal and Fluid Science 15, no. 4 (November 1, 1997): 395-405. doi:10.1016/S0894-1777(97)00007-1. .. [2] Kim, Sung-Min, and Issam Mudawar. "Universal Approach to Predicting Two-Phase Frictional Pressure Drop for Adiabatic and Condensing Mini/ Micro-Channel Flows." International Journal of Heat and Mass Transfer 55, no. 11-12 (May 2012): 3246-61. doi:10.1016/j.ijheatmasstransfer.2012.02.047. .. [3] Xu, Yu, Xiande Fang, Xianghui Su, Zhanru Zhou, and Weiwei Chen. "Evaluation of Frictional Pressure Drop Correlations for Two-Phase Flow in Pipes." Nuclear Engineering and Design, SI : CFD4NRS-3, 253 (December 2012): 86-97. doi:10.1016/j.nucengdes.2012.08.007.
[ "r", "Calculates", "two", "-", "phase", "pressure", "drop", "with", "the", "Wang", "Chiang", "and", "Lu", "(", "1997", ")", "correlation", "given", "in", "[", "1", "]", "_", "and", "reviewed", "in", "[", "2", "]", "_", "and", "[", "3", "]", "_", "." ]
python
train
hyperledger/indy-plenum
plenum/common/ledger.py
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/common/ledger.py#L129-L143
def treeWithAppliedTxns(self, txns: List, currentTree=None): """ Return a copy of merkle tree after applying the txns :param txns: :return: """ currentTree = currentTree or self.tree # Copying the tree is not a problem since its a Compact Merkle Tree # so the size of the tree would be 32*(lg n) bytes where n is the # number of leaves (no. of txns) tempTree = copy(currentTree) for txn in txns: s = self.serialize_for_tree(txn) tempTree.append(s) return tempTree
[ "def", "treeWithAppliedTxns", "(", "self", ",", "txns", ":", "List", ",", "currentTree", "=", "None", ")", ":", "currentTree", "=", "currentTree", "or", "self", ".", "tree", "# Copying the tree is not a problem since its a Compact Merkle Tree", "# so the size of the tree would be 32*(lg n) bytes where n is the", "# number of leaves (no. of txns)", "tempTree", "=", "copy", "(", "currentTree", ")", "for", "txn", "in", "txns", ":", "s", "=", "self", ".", "serialize_for_tree", "(", "txn", ")", "tempTree", ".", "append", "(", "s", ")", "return", "tempTree" ]
Return a copy of merkle tree after applying the txns :param txns: :return:
[ "Return", "a", "copy", "of", "merkle", "tree", "after", "applying", "the", "txns", ":", "param", "txns", ":", ":", "return", ":" ]
python
train
apache/spark
python/pyspark/rdd.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L223-L229
def cache(self): """ Persist this RDD with the default storage level (C{MEMORY_ONLY}). """ self.is_cached = True self.persist(StorageLevel.MEMORY_ONLY) return self
[ "def", "cache", "(", "self", ")", ":", "self", ".", "is_cached", "=", "True", "self", ".", "persist", "(", "StorageLevel", ".", "MEMORY_ONLY", ")", "return", "self" ]
Persist this RDD with the default storage level (C{MEMORY_ONLY}).
[ "Persist", "this", "RDD", "with", "the", "default", "storage", "level", "(", "C", "{", "MEMORY_ONLY", "}", ")", "." ]
python
train
dereneaton/ipyrad
ipyrad/analysis/tetrad2.py
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/tetrad2.py#L290-L326
def _refresh(self): """ Remove all existing results files and reinit the h5 arrays so that the tetrad object is just like fresh from a CLI start. """ ## clear any existing results files oldfiles = [self.files.qdump] + \ self.database.__dict__.values() + \ self.trees.__dict__.values() for oldfile in oldfiles: if oldfile: if os.path.exists(oldfile): os.remove(oldfile) ## store old ipcluster info oldcluster = copy.deepcopy(self._ipcluster) ## reinit the tetrad object data. self.__init__( name=self.name, data=self.files.data, mapfile=self.files.mapfile, workdir=self.dirs, method=self.params.method, guidetree=self.files.tree, resolve_ambigs=self.params.resolve_ambigs, save_invariants=self.params.save_invariants, nboots=self.params.nboots, nquartets=self.params.nquartets, initarr=True, quiet=True, cli=self.kwargs.get("cli") ) ## retain the same ipcluster info self._ipcluster = oldcluster
[ "def", "_refresh", "(", "self", ")", ":", "## clear any existing results files", "oldfiles", "=", "[", "self", ".", "files", ".", "qdump", "]", "+", "self", ".", "database", ".", "__dict__", ".", "values", "(", ")", "+", "self", ".", "trees", ".", "__dict__", ".", "values", "(", ")", "for", "oldfile", "in", "oldfiles", ":", "if", "oldfile", ":", "if", "os", ".", "path", ".", "exists", "(", "oldfile", ")", ":", "os", ".", "remove", "(", "oldfile", ")", "## store old ipcluster info", "oldcluster", "=", "copy", ".", "deepcopy", "(", "self", ".", "_ipcluster", ")", "## reinit the tetrad object data.", "self", ".", "__init__", "(", "name", "=", "self", ".", "name", ",", "data", "=", "self", ".", "files", ".", "data", ",", "mapfile", "=", "self", ".", "files", ".", "mapfile", ",", "workdir", "=", "self", ".", "dirs", ",", "method", "=", "self", ".", "params", ".", "method", ",", "guidetree", "=", "self", ".", "files", ".", "tree", ",", "resolve_ambigs", "=", "self", ".", "params", ".", "resolve_ambigs", ",", "save_invariants", "=", "self", ".", "params", ".", "save_invariants", ",", "nboots", "=", "self", ".", "params", ".", "nboots", ",", "nquartets", "=", "self", ".", "params", ".", "nquartets", ",", "initarr", "=", "True", ",", "quiet", "=", "True", ",", "cli", "=", "self", ".", "kwargs", ".", "get", "(", "\"cli\"", ")", ")", "## retain the same ipcluster info", "self", ".", "_ipcluster", "=", "oldcluster" ]
Remove all existing results files and reinit the h5 arrays so that the tetrad object is just like fresh from a CLI start.
[ "Remove", "all", "existing", "results", "files", "and", "reinit", "the", "h5", "arrays", "so", "that", "the", "tetrad", "object", "is", "just", "like", "fresh", "from", "a", "CLI", "start", "." ]
python
valid
poldracklab/niworkflows
niworkflows/utils/misc.py
https://github.com/poldracklab/niworkflows/blob/254f4b4fcc5e6ecb29d2f4602a30786b913ecce5/niworkflows/utils/misc.py#L116-L132
def splitext(fname): """Splits filename and extension (.gz safe) >>> splitext('some/file.nii.gz') ('file', '.nii.gz') >>> splitext('some/other/file.nii') ('file', '.nii') >>> splitext('otherext.tar.gz') ('otherext', '.tar.gz') >>> splitext('text.txt') ('text', '.txt') """ from pathlib import Path basename = str(Path(fname).name) stem = Path(basename.rstrip('.gz')).stem return stem, basename[len(stem):]
[ "def", "splitext", "(", "fname", ")", ":", "from", "pathlib", "import", "Path", "basename", "=", "str", "(", "Path", "(", "fname", ")", ".", "name", ")", "stem", "=", "Path", "(", "basename", ".", "rstrip", "(", "'.gz'", ")", ")", ".", "stem", "return", "stem", ",", "basename", "[", "len", "(", "stem", ")", ":", "]" ]
Splits filename and extension (.gz safe) >>> splitext('some/file.nii.gz') ('file', '.nii.gz') >>> splitext('some/other/file.nii') ('file', '.nii') >>> splitext('otherext.tar.gz') ('otherext', '.tar.gz') >>> splitext('text.txt') ('text', '.txt')
[ "Splits", "filename", "and", "extension", "(", ".", "gz", "safe", ")" ]
python
train
brmscheiner/ideogram
ideogram/writer.py
https://github.com/brmscheiner/ideogram/blob/422bf566c51fd56f7bbb6e75b16d18d52b4c7568/ideogram/writer.py#L179-L196
def tagAttributes_while(fdef_master_list,root): '''Tag each node under root with the appropriate depth. ''' depth = 0 current = root untagged_nodes = [root] while untagged_nodes: current = untagged_nodes.pop() for x in fdef_master_list: if jsName(x.path,x.name) == current['name']: current['path'] = x.path if children in current: for child in children: child["depth"] = depth untagged_nodes.append(child) if depth not in current: current["depth"] = depth depth += 1 return root
[ "def", "tagAttributes_while", "(", "fdef_master_list", ",", "root", ")", ":", "depth", "=", "0", "current", "=", "root", "untagged_nodes", "=", "[", "root", "]", "while", "untagged_nodes", ":", "current", "=", "untagged_nodes", ".", "pop", "(", ")", "for", "x", "in", "fdef_master_list", ":", "if", "jsName", "(", "x", ".", "path", ",", "x", ".", "name", ")", "==", "current", "[", "'name'", "]", ":", "current", "[", "'path'", "]", "=", "x", ".", "path", "if", "children", "in", "current", ":", "for", "child", "in", "children", ":", "child", "[", "\"depth\"", "]", "=", "depth", "untagged_nodes", ".", "append", "(", "child", ")", "if", "depth", "not", "in", "current", ":", "current", "[", "\"depth\"", "]", "=", "depth", "depth", "+=", "1", "return", "root" ]
Tag each node under root with the appropriate depth.
[ "Tag", "each", "node", "under", "root", "with", "the", "appropriate", "depth", "." ]
python
train
Iotic-Labs/py-IoticAgent
src/IoticAgent/IOT/ResourceMeta.py
https://github.com/Iotic-Labs/py-IoticAgent/blob/893e8582ad1dacfe32dfc0ee89452bbd6f57d28d/src/IoticAgent/IOT/ResourceMeta.py#L196-L213
def set_description(self, description, lang=None): """Sets the `description` metadata property on your Thing/Point. Only one description is allowed per language, so any other descriptions in this language are removed before adding this one Raises `ValueError` containing an error message if the parameters fail validation `description` (mandatory) (string) the new text of the description `lang` (optional) (string) The two-character ISO 639-1 language code to use for your label. None means use the default language for your agent. See [Config](./Config.m.html#IoticAgent.IOT.Config.Config.__init__) """ description = Validation.description_check_convert(description) lang = Validation.lang_check_convert(lang, default=self._default_lang) # remove any other descriptions with this language before adding self.delete_description(lang) subj = self._get_uuid_uriref() self._graph.add((subj, self._commentPredicate, Literal(description, lang)))
[ "def", "set_description", "(", "self", ",", "description", ",", "lang", "=", "None", ")", ":", "description", "=", "Validation", ".", "description_check_convert", "(", "description", ")", "lang", "=", "Validation", ".", "lang_check_convert", "(", "lang", ",", "default", "=", "self", ".", "_default_lang", ")", "# remove any other descriptions with this language before adding", "self", ".", "delete_description", "(", "lang", ")", "subj", "=", "self", ".", "_get_uuid_uriref", "(", ")", "self", ".", "_graph", ".", "add", "(", "(", "subj", ",", "self", ".", "_commentPredicate", ",", "Literal", "(", "description", ",", "lang", ")", ")", ")" ]
Sets the `description` metadata property on your Thing/Point. Only one description is allowed per language, so any other descriptions in this language are removed before adding this one Raises `ValueError` containing an error message if the parameters fail validation `description` (mandatory) (string) the new text of the description `lang` (optional) (string) The two-character ISO 639-1 language code to use for your label. None means use the default language for your agent. See [Config](./Config.m.html#IoticAgent.IOT.Config.Config.__init__)
[ "Sets", "the", "description", "metadata", "property", "on", "your", "Thing", "/", "Point", ".", "Only", "one", "description", "is", "allowed", "per", "language", "so", "any", "other", "descriptions", "in", "this", "language", "are", "removed", "before", "adding", "this", "one" ]
python
train
dturanski/springcloudstream
springcloudstream/stdio/stream.py
https://github.com/dturanski/springcloudstream/blob/208b542f9eba82e97882d52703af8e965a62a980/springcloudstream/stdio/stream.py#L62-L69
def validate(self, options): """ Validate the options or exit() """ try: codecs.getencoder(options.char_encoding) except LookupError: self.parser.error("invalid 'char-encoding' %s" % options.char_encoding)
[ "def", "validate", "(", "self", ",", "options", ")", ":", "try", ":", "codecs", ".", "getencoder", "(", "options", ".", "char_encoding", ")", "except", "LookupError", ":", "self", ".", "parser", ".", "error", "(", "\"invalid 'char-encoding' %s\"", "%", "options", ".", "char_encoding", ")" ]
Validate the options or exit()
[ "Validate", "the", "options", "or", "exit", "()" ]
python
train
edx/pa11ycrawler
pa11ycrawler/spiders/edx.py
https://github.com/edx/pa11ycrawler/blob/fc672d4524463bc050ade4c7c97801c0d5bf8c9e/pa11ycrawler/spiders/edx.py#L230-L258
def after_initial_csrf(self, response): """ This method is called *only* if the crawler is started with an email and password combination. In order to log in, we need a CSRF token from a GET request. This method takes the result of a GET request, extracts the CSRF token, and uses it to make a login request. The response to this login request will be handled by the `after_initial_login` method. """ login_url = ( URLObject("http://") .with_hostname(self.domain) .with_port(self.port) .with_path(LOGIN_API_PATH) ) credentials = { "email": self.login_email, "password": self.login_password, } headers = { b"X-CSRFToken": get_csrf_token(response), } yield scrapy.FormRequest( login_url, formdata=credentials, headers=headers, callback=self.after_initial_login, errback=self.handle_error )
[ "def", "after_initial_csrf", "(", "self", ",", "response", ")", ":", "login_url", "=", "(", "URLObject", "(", "\"http://\"", ")", ".", "with_hostname", "(", "self", ".", "domain", ")", ".", "with_port", "(", "self", ".", "port", ")", ".", "with_path", "(", "LOGIN_API_PATH", ")", ")", "credentials", "=", "{", "\"email\"", ":", "self", ".", "login_email", ",", "\"password\"", ":", "self", ".", "login_password", ",", "}", "headers", "=", "{", "b\"X-CSRFToken\"", ":", "get_csrf_token", "(", "response", ")", ",", "}", "yield", "scrapy", ".", "FormRequest", "(", "login_url", ",", "formdata", "=", "credentials", ",", "headers", "=", "headers", ",", "callback", "=", "self", ".", "after_initial_login", ",", "errback", "=", "self", ".", "handle_error", ")" ]
This method is called *only* if the crawler is started with an email and password combination. In order to log in, we need a CSRF token from a GET request. This method takes the result of a GET request, extracts the CSRF token, and uses it to make a login request. The response to this login request will be handled by the `after_initial_login` method.
[ "This", "method", "is", "called", "*", "only", "*", "if", "the", "crawler", "is", "started", "with", "an", "email", "and", "password", "combination", ".", "In", "order", "to", "log", "in", "we", "need", "a", "CSRF", "token", "from", "a", "GET", "request", ".", "This", "method", "takes", "the", "result", "of", "a", "GET", "request", "extracts", "the", "CSRF", "token", "and", "uses", "it", "to", "make", "a", "login", "request", ".", "The", "response", "to", "this", "login", "request", "will", "be", "handled", "by", "the", "after_initial_login", "method", "." ]
python
train
hendrix/hendrix
hendrix/utils/__init__.py
https://github.com/hendrix/hendrix/blob/175af011a7e5822b772bfec0e11a46466bb8688d/hendrix/utils/__init__.py#L27-L44
def responseInColor(request, status, headers, prefix='Response', opts=None): "Prints the response info in color" code, message = status.split(None, 1) message = '%s [%s] => Request %s %s %s on pid %d' % ( prefix, code, str(request.host), request.method, request.path, os.getpid() ) signal = int(code) / 100 if signal == 2: chalk.green(message, opts=opts) elif signal == 3: chalk.blue(message, opts=opts) else: chalk.red(message, opts=opts)
[ "def", "responseInColor", "(", "request", ",", "status", ",", "headers", ",", "prefix", "=", "'Response'", ",", "opts", "=", "None", ")", ":", "code", ",", "message", "=", "status", ".", "split", "(", "None", ",", "1", ")", "message", "=", "'%s [%s] => Request %s %s %s on pid %d'", "%", "(", "prefix", ",", "code", ",", "str", "(", "request", ".", "host", ")", ",", "request", ".", "method", ",", "request", ".", "path", ",", "os", ".", "getpid", "(", ")", ")", "signal", "=", "int", "(", "code", ")", "/", "100", "if", "signal", "==", "2", ":", "chalk", ".", "green", "(", "message", ",", "opts", "=", "opts", ")", "elif", "signal", "==", "3", ":", "chalk", ".", "blue", "(", "message", ",", "opts", "=", "opts", ")", "else", ":", "chalk", ".", "red", "(", "message", ",", "opts", "=", "opts", ")" ]
Prints the response info in color
[ "Prints", "the", "response", "info", "in", "color" ]
python
train
CalebBell/thermo
thermo/thermal_conductivity.py
https://github.com/CalebBell/thermo/blob/3857ed023a3e64fd3039a32d53576c24990ef1c3/thermo/thermal_conductivity.py#L369-L423
def Bahadori_liquid(T, M): r'''Estimates the thermal conductivity of parafin liquid hydrocarbons. Fits their data well, and is useful as only MW is required. X is the Molecular weight, and Y the temperature. .. math:: K = a + bY + CY^2 + dY^3 a = A_1 + B_1 X + C_1 X^2 + D_1 X^3 b = A_2 + B_2 X + C_2 X^2 + D_2 X^3 c = A_3 + B_3 X + C_3 X^2 + D_3 X^3 d = A_4 + B_4 X + C_4 X^2 + D_4 X^3 Parameters ---------- T : float Temperature of the fluid [K] M : float Molecular weight of the fluid [g/mol] Returns ------- kl : float Estimated liquid thermal conductivity [W/m/k] Notes ----- The accuracy of this equation has not been reviewed. Examples -------- Data point from [1]_. >>> Bahadori_liquid(273.15, 170) 0.14274278108272603 References ---------- .. [1] Bahadori, Alireza, and Saeid Mokhatab. "Estimating Thermal Conductivity of Hydrocarbons." Chemical Engineering 115, no. 13 (December 2008): 52-54 ''' A = [-6.48326E-2, 2.715015E-3, -1.08580E-5, 9.853917E-9] B = [1.565612E-2, -1.55833E-4, 5.051114E-7, -4.68030E-10] C = [-1.80304E-4, 1.758693E-6, -5.55224E-9, 5.201365E-12] D = [5.880443E-7, -5.65898E-9, 1.764384E-11, -1.65944E-14] X, Y = M, T a = A[0] + B[0]*X + C[0]*X**2 + D[0]*X**3 b = A[1] + B[1]*X + C[1]*X**2 + D[1]*X**3 c = A[2] + B[2]*X + C[2]*X**2 + D[2]*X**3 d = A[3] + B[3]*X + C[3]*X**2 + D[3]*X**3 return a + b*Y + c*Y**2 + d*Y**3
[ "def", "Bahadori_liquid", "(", "T", ",", "M", ")", ":", "A", "=", "[", "-", "6.48326E-2", ",", "2.715015E-3", ",", "-", "1.08580E-5", ",", "9.853917E-9", "]", "B", "=", "[", "1.565612E-2", ",", "-", "1.55833E-4", ",", "5.051114E-7", ",", "-", "4.68030E-10", "]", "C", "=", "[", "-", "1.80304E-4", ",", "1.758693E-6", ",", "-", "5.55224E-9", ",", "5.201365E-12", "]", "D", "=", "[", "5.880443E-7", ",", "-", "5.65898E-9", ",", "1.764384E-11", ",", "-", "1.65944E-14", "]", "X", ",", "Y", "=", "M", ",", "T", "a", "=", "A", "[", "0", "]", "+", "B", "[", "0", "]", "*", "X", "+", "C", "[", "0", "]", "*", "X", "**", "2", "+", "D", "[", "0", "]", "*", "X", "**", "3", "b", "=", "A", "[", "1", "]", "+", "B", "[", "1", "]", "*", "X", "+", "C", "[", "1", "]", "*", "X", "**", "2", "+", "D", "[", "1", "]", "*", "X", "**", "3", "c", "=", "A", "[", "2", "]", "+", "B", "[", "2", "]", "*", "X", "+", "C", "[", "2", "]", "*", "X", "**", "2", "+", "D", "[", "2", "]", "*", "X", "**", "3", "d", "=", "A", "[", "3", "]", "+", "B", "[", "3", "]", "*", "X", "+", "C", "[", "3", "]", "*", "X", "**", "2", "+", "D", "[", "3", "]", "*", "X", "**", "3", "return", "a", "+", "b", "*", "Y", "+", "c", "*", "Y", "**", "2", "+", "d", "*", "Y", "**", "3" ]
r'''Estimates the thermal conductivity of parafin liquid hydrocarbons. Fits their data well, and is useful as only MW is required. X is the Molecular weight, and Y the temperature. .. math:: K = a + bY + CY^2 + dY^3 a = A_1 + B_1 X + C_1 X^2 + D_1 X^3 b = A_2 + B_2 X + C_2 X^2 + D_2 X^3 c = A_3 + B_3 X + C_3 X^2 + D_3 X^3 d = A_4 + B_4 X + C_4 X^2 + D_4 X^3 Parameters ---------- T : float Temperature of the fluid [K] M : float Molecular weight of the fluid [g/mol] Returns ------- kl : float Estimated liquid thermal conductivity [W/m/k] Notes ----- The accuracy of this equation has not been reviewed. Examples -------- Data point from [1]_. >>> Bahadori_liquid(273.15, 170) 0.14274278108272603 References ---------- .. [1] Bahadori, Alireza, and Saeid Mokhatab. "Estimating Thermal Conductivity of Hydrocarbons." Chemical Engineering 115, no. 13 (December 2008): 52-54
[ "r", "Estimates", "the", "thermal", "conductivity", "of", "parafin", "liquid", "hydrocarbons", ".", "Fits", "their", "data", "well", "and", "is", "useful", "as", "only", "MW", "is", "required", ".", "X", "is", "the", "Molecular", "weight", "and", "Y", "the", "temperature", "." ]
python
valid
kodexlab/reliure
reliure/offline.py
https://github.com/kodexlab/reliure/blob/0450c7a9254c5c003162738458bbe0c49e777ba5/reliure/offline.py#L119-L142
def main(): """ Small run usage exemple """ #TODO: need to be mv in .rst doc from reliure.pipeline import Composable @Composable def doc_analyse(docs): for doc in docs: yield { "title": doc, "url": "http://lost.com/%s" % doc, } @Composable def print_ulrs(docs): for doc in docs: print(doc["url"]) yield doc pipeline = doc_analyse | print_ulrs documents = ("doc_%s" % d for d in xrange(20)) res = run_parallel(pipeline, documents, ncpu=2, chunksize=5) print(res)
[ "def", "main", "(", ")", ":", "#TODO: need to be mv in .rst doc", "from", "reliure", ".", "pipeline", "import", "Composable", "@", "Composable", "def", "doc_analyse", "(", "docs", ")", ":", "for", "doc", "in", "docs", ":", "yield", "{", "\"title\"", ":", "doc", ",", "\"url\"", ":", "\"http://lost.com/%s\"", "%", "doc", ",", "}", "@", "Composable", "def", "print_ulrs", "(", "docs", ")", ":", "for", "doc", "in", "docs", ":", "print", "(", "doc", "[", "\"url\"", "]", ")", "yield", "doc", "pipeline", "=", "doc_analyse", "|", "print_ulrs", "documents", "=", "(", "\"doc_%s\"", "%", "d", "for", "d", "in", "xrange", "(", "20", ")", ")", "res", "=", "run_parallel", "(", "pipeline", ",", "documents", ",", "ncpu", "=", "2", ",", "chunksize", "=", "5", ")", "print", "(", "res", ")" ]
Small run usage exemple
[ "Small", "run", "usage", "exemple" ]
python
train
alefnula/tea
tea/utils/crypto.py
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/utils/crypto.py#L437-L448
def decrypt(data, digest=True): """Decrypt provided data.""" alg, _, data = data.rpartition("$") if not alg: return data data = _from_hex_digest(data) if digest else data try: return implementations["decryption"][alg]( data, implementations["get_key"]() ) except KeyError: raise CryptError("Can not decrypt key for algorithm: %s" % alg)
[ "def", "decrypt", "(", "data", ",", "digest", "=", "True", ")", ":", "alg", ",", "_", ",", "data", "=", "data", ".", "rpartition", "(", "\"$\"", ")", "if", "not", "alg", ":", "return", "data", "data", "=", "_from_hex_digest", "(", "data", ")", "if", "digest", "else", "data", "try", ":", "return", "implementations", "[", "\"decryption\"", "]", "[", "alg", "]", "(", "data", ",", "implementations", "[", "\"get_key\"", "]", "(", ")", ")", "except", "KeyError", ":", "raise", "CryptError", "(", "\"Can not decrypt key for algorithm: %s\"", "%", "alg", ")" ]
Decrypt provided data.
[ "Decrypt", "provided", "data", "." ]
python
train
debrouwere/google-analytics
googleanalytics/utils/functional.py
https://github.com/debrouwere/google-analytics/blob/7d585c2f6f5ca191e975e6e3eaf7d5e2424fa11c/googleanalytics/utils/functional.py#L20-L45
def vectorize(fn): """ Allows a method to accept one or more values, but internally deal only with a single item, and returning a list or a single item depending on what is desired. """ @functools.wraps(fn) def vectorized_method(self, values, *vargs, **kwargs): wrap = not isinstance(values, (list, tuple)) should_unwrap = not kwargs.setdefault('wrap', False) unwrap = wrap and should_unwrap del kwargs['wrap'] if wrap: values = [values] results = [fn(self, value, *vargs, **kwargs) for value in values] if unwrap: results = results[0] return results return vectorized_method
[ "def", "vectorize", "(", "fn", ")", ":", "@", "functools", ".", "wraps", "(", "fn", ")", "def", "vectorized_method", "(", "self", ",", "values", ",", "*", "vargs", ",", "*", "*", "kwargs", ")", ":", "wrap", "=", "not", "isinstance", "(", "values", ",", "(", "list", ",", "tuple", ")", ")", "should_unwrap", "=", "not", "kwargs", ".", "setdefault", "(", "'wrap'", ",", "False", ")", "unwrap", "=", "wrap", "and", "should_unwrap", "del", "kwargs", "[", "'wrap'", "]", "if", "wrap", ":", "values", "=", "[", "values", "]", "results", "=", "[", "fn", "(", "self", ",", "value", ",", "*", "vargs", ",", "*", "*", "kwargs", ")", "for", "value", "in", "values", "]", "if", "unwrap", ":", "results", "=", "results", "[", "0", "]", "return", "results", "return", "vectorized_method" ]
Allows a method to accept one or more values, but internally deal only with a single item, and returning a list or a single item depending on what is desired.
[ "Allows", "a", "method", "to", "accept", "one", "or", "more", "values", "but", "internally", "deal", "only", "with", "a", "single", "item", "and", "returning", "a", "list", "or", "a", "single", "item", "depending", "on", "what", "is", "desired", "." ]
python
train
ambitioninc/django-query-builder
querybuilder/tables.py
https://github.com/ambitioninc/django-query-builder/blob/113a7d845d3ddc6a45621b9880308e756f87c5bf/querybuilder/tables.py#L311-L328
def find_field(self, field=None, alias=None): """ Finds a field by name or alias. :param field: string of the field name or alias, dict of {'alias': field}, or a Field instance :type field: str or dict or Field :returns: The field if it is found, otherwise None :rtype: :class:`Field <querybuilder.fields.Field>` or None """ if alias: field = alias field = FieldFactory(field, table=self, alias=alias) identifier = field.get_identifier() for field in self.fields: if field.get_identifier() == identifier: return field return None
[ "def", "find_field", "(", "self", ",", "field", "=", "None", ",", "alias", "=", "None", ")", ":", "if", "alias", ":", "field", "=", "alias", "field", "=", "FieldFactory", "(", "field", ",", "table", "=", "self", ",", "alias", "=", "alias", ")", "identifier", "=", "field", ".", "get_identifier", "(", ")", "for", "field", "in", "self", ".", "fields", ":", "if", "field", ".", "get_identifier", "(", ")", "==", "identifier", ":", "return", "field", "return", "None" ]
Finds a field by name or alias. :param field: string of the field name or alias, dict of {'alias': field}, or a Field instance :type field: str or dict or Field :returns: The field if it is found, otherwise None :rtype: :class:`Field <querybuilder.fields.Field>` or None
[ "Finds", "a", "field", "by", "name", "or", "alias", "." ]
python
train
aiidateam/aiida-codtools
aiida_codtools/parsers/cif_cod_deposit.py
https://github.com/aiidateam/aiida-codtools/blob/da5e4259b7a2e86cf0cc3f997e11dd36d445fa94/aiida_codtools/parsers/cif_cod_deposit.py#L35-L63
def _get_output_nodes(self, output_path, error_path): """ Extracts output nodes from the standard output and standard error files """ status = cod_deposition_states.UNKNOWN messages = [] if output_path is not None: content = None with open(output_path) as f: content = f.read() status, message = CifCodDepositParser._deposit_result(content) messages.extend(message.split('\n')) if error_path is not None: with open(error_path) as f: content = f.readlines() lines = [x.strip('\n') for x in content] messages.extend(lines) parameters = {'output_messages': messages, 'status': status} output_nodes = [] output_nodes.append(('messages', Dict(dict=parameters))) if status == cod_deposition_states.SUCCESS: return True, output_nodes return False, output_nodes
[ "def", "_get_output_nodes", "(", "self", ",", "output_path", ",", "error_path", ")", ":", "status", "=", "cod_deposition_states", ".", "UNKNOWN", "messages", "=", "[", "]", "if", "output_path", "is", "not", "None", ":", "content", "=", "None", "with", "open", "(", "output_path", ")", "as", "f", ":", "content", "=", "f", ".", "read", "(", ")", "status", ",", "message", "=", "CifCodDepositParser", ".", "_deposit_result", "(", "content", ")", "messages", ".", "extend", "(", "message", ".", "split", "(", "'\\n'", ")", ")", "if", "error_path", "is", "not", "None", ":", "with", "open", "(", "error_path", ")", "as", "f", ":", "content", "=", "f", ".", "readlines", "(", ")", "lines", "=", "[", "x", ".", "strip", "(", "'\\n'", ")", "for", "x", "in", "content", "]", "messages", ".", "extend", "(", "lines", ")", "parameters", "=", "{", "'output_messages'", ":", "messages", ",", "'status'", ":", "status", "}", "output_nodes", "=", "[", "]", "output_nodes", ".", "append", "(", "(", "'messages'", ",", "Dict", "(", "dict", "=", "parameters", ")", ")", ")", "if", "status", "==", "cod_deposition_states", ".", "SUCCESS", ":", "return", "True", ",", "output_nodes", "return", "False", ",", "output_nodes" ]
Extracts output nodes from the standard output and standard error files
[ "Extracts", "output", "nodes", "from", "the", "standard", "output", "and", "standard", "error", "files" ]
python
train