repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/SConf.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/SConf.py#L453-L476
def Define(self, name, value = None, comment = None): """ Define a pre processor symbol name, with the optional given value in the current config header. If value is None (default), then #define name is written. If value is not none, then #define name value is written. comment is a string which will be put as a C comment in the header, to explain the meaning of the value (appropriate C comments will be added automatically). """ lines = [] if comment: comment_str = "/* %s */" % comment lines.append(comment_str) if value is not None: define_str = "#define %s %s" % (name, value) else: define_str = "#define %s" % name lines.append(define_str) lines.append('') self.config_h_text = self.config_h_text + '\n'.join(lines)
[ "def", "Define", "(", "self", ",", "name", ",", "value", "=", "None", ",", "comment", "=", "None", ")", ":", "lines", "=", "[", "]", "if", "comment", ":", "comment_str", "=", "\"/* %s */\"", "%", "comment", "lines", ".", "append", "(", "comment_str", ")", "if", "value", "is", "not", "None", ":", "define_str", "=", "\"#define %s %s\"", "%", "(", "name", ",", "value", ")", "else", ":", "define_str", "=", "\"#define %s\"", "%", "name", "lines", ".", "append", "(", "define_str", ")", "lines", ".", "append", "(", "''", ")", "self", ".", "config_h_text", "=", "self", ".", "config_h_text", "+", "'\\n'", ".", "join", "(", "lines", ")" ]
Define a pre processor symbol name, with the optional given value in the current config header. If value is None (default), then #define name is written. If value is not none, then #define name value is written. comment is a string which will be put as a C comment in the header, to explain the meaning of the value (appropriate C comments will be added automatically).
[ "Define", "a", "pre", "processor", "symbol", "name", "with", "the", "optional", "given", "value", "in", "the", "current", "config", "header", "." ]
python
train
kodexlab/reliure
reliure/types.py
https://github.com/kodexlab/reliure/blob/0450c7a9254c5c003162738458bbe0c49e777ba5/reliure/types.py#L139-L154
def as_dict(self): """ returns a dictionary view of the option :returns: the option converted in a dict :rtype: dict """ info = {} info["type"] = self.__class__.__name__ info["help"] = self.help info["default"] = self.default info["multi"] = self.multi info["uniq"] = self.uniq info["choices"] = self.choices # TODO appel rec sur les attrs #info["attrs"] = self.attrs return info
[ "def", "as_dict", "(", "self", ")", ":", "info", "=", "{", "}", "info", "[", "\"type\"", "]", "=", "self", ".", "__class__", ".", "__name__", "info", "[", "\"help\"", "]", "=", "self", ".", "help", "info", "[", "\"default\"", "]", "=", "self", ".", "default", "info", "[", "\"multi\"", "]", "=", "self", ".", "multi", "info", "[", "\"uniq\"", "]", "=", "self", ".", "uniq", "info", "[", "\"choices\"", "]", "=", "self", ".", "choices", "# TODO appel rec sur les attrs", "#info[\"attrs\"] = self.attrs", "return", "info" ]
returns a dictionary view of the option :returns: the option converted in a dict :rtype: dict
[ "returns", "a", "dictionary", "view", "of", "the", "option", ":", "returns", ":", "the", "option", "converted", "in", "a", "dict", ":", "rtype", ":", "dict" ]
python
train
iotaledger/iota.lib.py
iota/crypto/kerl/pykerl.py
https://github.com/iotaledger/iota.lib.py/blob/97cdd1e241498446b46157b79b2a1ea2ec6d387a/iota/crypto/kerl/pykerl.py#L27-L80
def absorb(self, trits, offset=0, length=None): # type: (MutableSequence[int], int, Optional[int]) -> None """ Absorb trits into the sponge from a buffer. :param trits: Buffer that contains the trits to absorb. :param offset: Starting offset in ``trits``. :param length: Number of trits to absorb. Defaults to ``len(trits)``. """ # Pad input if necessary, so that it can be divided evenly into # hashes. # Note that this operation creates a COPY of ``trits``; the # incoming buffer is not modified! pad = ((len(trits) % TRIT_HASH_LENGTH) or TRIT_HASH_LENGTH) trits += [0] * (TRIT_HASH_LENGTH - pad) if length is None: length = len(trits) if length < 1: raise with_context( exc=ValueError('Invalid length passed to ``absorb``.'), context={ 'trits': trits, 'offset': offset, 'length': length, }, ) while offset < length: stop = min(offset + TRIT_HASH_LENGTH, length) # If we're copying over a full chunk, zero last trit. if stop - offset == TRIT_HASH_LENGTH: trits[stop - 1] = 0 signed_nums = conv.convertToBytes(trits[offset:stop]) # Convert signed bytes into their equivalent unsigned # representation, in order to use Python's built-in bytes # type. unsigned_bytes = bytearray( conv.convert_sign(b) for b in signed_nums ) self.k.update(unsigned_bytes) offset += TRIT_HASH_LENGTH
[ "def", "absorb", "(", "self", ",", "trits", ",", "offset", "=", "0", ",", "length", "=", "None", ")", ":", "# type: (MutableSequence[int], int, Optional[int]) -> None", "# Pad input if necessary, so that it can be divided evenly into", "# hashes.", "# Note that this operation creates a COPY of ``trits``; the", "# incoming buffer is not modified!", "pad", "=", "(", "(", "len", "(", "trits", ")", "%", "TRIT_HASH_LENGTH", ")", "or", "TRIT_HASH_LENGTH", ")", "trits", "+=", "[", "0", "]", "*", "(", "TRIT_HASH_LENGTH", "-", "pad", ")", "if", "length", "is", "None", ":", "length", "=", "len", "(", "trits", ")", "if", "length", "<", "1", ":", "raise", "with_context", "(", "exc", "=", "ValueError", "(", "'Invalid length passed to ``absorb``.'", ")", ",", "context", "=", "{", "'trits'", ":", "trits", ",", "'offset'", ":", "offset", ",", "'length'", ":", "length", ",", "}", ",", ")", "while", "offset", "<", "length", ":", "stop", "=", "min", "(", "offset", "+", "TRIT_HASH_LENGTH", ",", "length", ")", "# If we're copying over a full chunk, zero last trit.", "if", "stop", "-", "offset", "==", "TRIT_HASH_LENGTH", ":", "trits", "[", "stop", "-", "1", "]", "=", "0", "signed_nums", "=", "conv", ".", "convertToBytes", "(", "trits", "[", "offset", ":", "stop", "]", ")", "# Convert signed bytes into their equivalent unsigned", "# representation, in order to use Python's built-in bytes", "# type.", "unsigned_bytes", "=", "bytearray", "(", "conv", ".", "convert_sign", "(", "b", ")", "for", "b", "in", "signed_nums", ")", "self", ".", "k", ".", "update", "(", "unsigned_bytes", ")", "offset", "+=", "TRIT_HASH_LENGTH" ]
Absorb trits into the sponge from a buffer. :param trits: Buffer that contains the trits to absorb. :param offset: Starting offset in ``trits``. :param length: Number of trits to absorb. Defaults to ``len(trits)``.
[ "Absorb", "trits", "into", "the", "sponge", "from", "a", "buffer", "." ]
python
test
steven-lang/bottr
bottr/bot.py
https://github.com/steven-lang/bottr/blob/c1b92becc31adfbd5a7b77179b852a51da70b193/bottr/bot.py#L79-L115
def _listen_comments(self): """Start listening to comments, using a separate thread.""" # Collect comments in a queue comments_queue = Queue(maxsize=self._n_jobs * 4) threads = [] # type: List[BotQueueWorker] try: # Create n_jobs CommentsThreads for i in range(self._n_jobs): t = BotQueueWorker(name='CommentThread-t-{}'.format(i), jobs=comments_queue, target=self._process_comment) t.start() threads.append(t) # Iterate over all comments in the comment stream for comment in self._reddit.subreddit('+'.join(self._subs)).stream.comments(): # Check for stopping if self._stop: self._do_stop(comments_queue, threads) break comments_queue.put(comment) self.log.debug('Listen comments stopped') except Exception as e: self._do_stop(comments_queue, threads) self.log.error('Exception while listening to comments:') self.log.error(str(e)) self.log.error('Waiting for 10 minutes and trying again.') time.sleep(10 * 60) # Retry self._listen_comments()
[ "def", "_listen_comments", "(", "self", ")", ":", "# Collect comments in a queue", "comments_queue", "=", "Queue", "(", "maxsize", "=", "self", ".", "_n_jobs", "*", "4", ")", "threads", "=", "[", "]", "# type: List[BotQueueWorker]", "try", ":", "# Create n_jobs CommentsThreads", "for", "i", "in", "range", "(", "self", ".", "_n_jobs", ")", ":", "t", "=", "BotQueueWorker", "(", "name", "=", "'CommentThread-t-{}'", ".", "format", "(", "i", ")", ",", "jobs", "=", "comments_queue", ",", "target", "=", "self", ".", "_process_comment", ")", "t", ".", "start", "(", ")", "threads", ".", "append", "(", "t", ")", "# Iterate over all comments in the comment stream", "for", "comment", "in", "self", ".", "_reddit", ".", "subreddit", "(", "'+'", ".", "join", "(", "self", ".", "_subs", ")", ")", ".", "stream", ".", "comments", "(", ")", ":", "# Check for stopping", "if", "self", ".", "_stop", ":", "self", ".", "_do_stop", "(", "comments_queue", ",", "threads", ")", "break", "comments_queue", ".", "put", "(", "comment", ")", "self", ".", "log", ".", "debug", "(", "'Listen comments stopped'", ")", "except", "Exception", "as", "e", ":", "self", ".", "_do_stop", "(", "comments_queue", ",", "threads", ")", "self", ".", "log", ".", "error", "(", "'Exception while listening to comments:'", ")", "self", ".", "log", ".", "error", "(", "str", "(", "e", ")", ")", "self", ".", "log", ".", "error", "(", "'Waiting for 10 minutes and trying again.'", ")", "time", ".", "sleep", "(", "10", "*", "60", ")", "# Retry", "self", ".", "_listen_comments", "(", ")" ]
Start listening to comments, using a separate thread.
[ "Start", "listening", "to", "comments", "using", "a", "separate", "thread", "." ]
python
train
ska-sa/purr
Purr/Editors.py
https://github.com/ska-sa/purr/blob/4c848768d0485d0f88b30850d0d5372221b21b66/Purr/Editors.py#L819-L851
def updateEntry(self): """Updates entry object with current content of dialog. In new entry mode (setEntry() not called, so self.entry=None), creates new entry object. In old entry mode (setEntry() called), updates and saves old entry object. """ # form up new entry title = str(self.wtitle.text()) comment = str(self.comment_doc.toPlainText()) # process comment string -- eliminate single newlines, make double-newlines into separate paragraphs # exception are paragraphs that start with "#LOG:", these get special treatment and the single newlines are # left intact pars = [] for paragraph in comment.split("\n\n"): if paragraph.startswith("LOG:"): pars.append(paragraph.replace("\n", "<BR>")) else: pars.append(paragraph.replace("\n", " ")) comment = "\n".join(pars) # go through data products and decide what to do with each one busy = Purr.BusyIndicator() # get list of DPs dps, updated = self.wdplv.buildDPList() # emit signal for all newly-created DPs for dp in dps: if not dp.archived: self.emit(SIGNAL("creatingDataProduct"), dp.sourcepath) # update or return new entry if self.entry: self.entry.update(title=title, comment=comment, dps=dps) self.entry.save(refresh_index=updated) return self.entry else: return Purr.LogEntry(time.time(), title, comment, dps)
[ "def", "updateEntry", "(", "self", ")", ":", "# form up new entry", "title", "=", "str", "(", "self", ".", "wtitle", ".", "text", "(", ")", ")", "comment", "=", "str", "(", "self", ".", "comment_doc", ".", "toPlainText", "(", ")", ")", "# process comment string -- eliminate single newlines, make double-newlines into separate paragraphs", "# exception are paragraphs that start with \"#LOG:\", these get special treatment and the single newlines are", "# left intact", "pars", "=", "[", "]", "for", "paragraph", "in", "comment", ".", "split", "(", "\"\\n\\n\"", ")", ":", "if", "paragraph", ".", "startswith", "(", "\"LOG:\"", ")", ":", "pars", ".", "append", "(", "paragraph", ".", "replace", "(", "\"\\n\"", ",", "\"<BR>\"", ")", ")", "else", ":", "pars", ".", "append", "(", "paragraph", ".", "replace", "(", "\"\\n\"", ",", "\" \"", ")", ")", "comment", "=", "\"\\n\"", ".", "join", "(", "pars", ")", "# go through data products and decide what to do with each one", "busy", "=", "Purr", ".", "BusyIndicator", "(", ")", "# get list of DPs", "dps", ",", "updated", "=", "self", ".", "wdplv", ".", "buildDPList", "(", ")", "# emit signal for all newly-created DPs", "for", "dp", "in", "dps", ":", "if", "not", "dp", ".", "archived", ":", "self", ".", "emit", "(", "SIGNAL", "(", "\"creatingDataProduct\"", ")", ",", "dp", ".", "sourcepath", ")", "# update or return new entry", "if", "self", ".", "entry", ":", "self", ".", "entry", ".", "update", "(", "title", "=", "title", ",", "comment", "=", "comment", ",", "dps", "=", "dps", ")", "self", ".", "entry", ".", "save", "(", "refresh_index", "=", "updated", ")", "return", "self", ".", "entry", "else", ":", "return", "Purr", ".", "LogEntry", "(", "time", ".", "time", "(", ")", ",", "title", ",", "comment", ",", "dps", ")" ]
Updates entry object with current content of dialog. In new entry mode (setEntry() not called, so self.entry=None), creates new entry object. In old entry mode (setEntry() called), updates and saves old entry object.
[ "Updates", "entry", "object", "with", "current", "content", "of", "dialog", ".", "In", "new", "entry", "mode", "(", "setEntry", "()", "not", "called", "so", "self", ".", "entry", "=", "None", ")", "creates", "new", "entry", "object", ".", "In", "old", "entry", "mode", "(", "setEntry", "()", "called", ")", "updates", "and", "saves", "old", "entry", "object", "." ]
python
train
bitesofcode/projex
projex/rest.py
https://github.com/bitesofcode/projex/blob/d31743ec456a41428709968ab11a2cf6c6c76247/projex/rest.py#L92-L98
def jsonify(py_data, default=None, indent=4, sort_keys=True): """ Converts the inputted Python data to JSON format. :param py_data | <variant> """ return json.dumps(py_data, default=py2json, indent=indent, sort_keys=sort_keys)
[ "def", "jsonify", "(", "py_data", ",", "default", "=", "None", ",", "indent", "=", "4", ",", "sort_keys", "=", "True", ")", ":", "return", "json", ".", "dumps", "(", "py_data", ",", "default", "=", "py2json", ",", "indent", "=", "indent", ",", "sort_keys", "=", "sort_keys", ")" ]
Converts the inputted Python data to JSON format. :param py_data | <variant>
[ "Converts", "the", "inputted", "Python", "data", "to", "JSON", "format", ".", ":", "param", "py_data", "|", "<variant", ">" ]
python
train
Chilipp/model-organization
model_organization/__init__.py
https://github.com/Chilipp/model-organization/blob/694d1219c7ed7e1b2b17153afa11bdc21169bca2/model_organization/__init__.py#L1595-L1616
def is_archived(self, experiment, ignore_missing=True): """ Convenience function to determine whether the given experiment has been archived already Parameters ---------- experiment: str The experiment to check Returns ------- str or None The path to the archive if it has been archived, otherwise None """ if ignore_missing: if isinstance(self.config.experiments.get(experiment, True), Archive): return self.config.experiments.get(experiment, True) else: if isinstance(self.config.experiments[experiment], Archive): return self.config.experiments[experiment]
[ "def", "is_archived", "(", "self", ",", "experiment", ",", "ignore_missing", "=", "True", ")", ":", "if", "ignore_missing", ":", "if", "isinstance", "(", "self", ".", "config", ".", "experiments", ".", "get", "(", "experiment", ",", "True", ")", ",", "Archive", ")", ":", "return", "self", ".", "config", ".", "experiments", ".", "get", "(", "experiment", ",", "True", ")", "else", ":", "if", "isinstance", "(", "self", ".", "config", ".", "experiments", "[", "experiment", "]", ",", "Archive", ")", ":", "return", "self", ".", "config", ".", "experiments", "[", "experiment", "]" ]
Convenience function to determine whether the given experiment has been archived already Parameters ---------- experiment: str The experiment to check Returns ------- str or None The path to the archive if it has been archived, otherwise None
[ "Convenience", "function", "to", "determine", "whether", "the", "given", "experiment", "has", "been", "archived", "already" ]
python
train
newville/wxmplot
examples/tifffile.py
https://github.com/newville/wxmplot/blob/8e0dc037453e5cdf18c968dc5a3d29efd761edee/examples/tifffile.py#L497-L561
def series(self): """Return series of TIFFpage with compatible shape and properties.""" if self.is_ome: series = self._omeseries() elif self.is_fluoview: dims = {b'X': 'X', b'Y': 'Y', b'Z': 'Z', b'T': 'T', b'WAVELENGTH': 'C', b'TIME': 'T', b'XY': 'R', b'EVENT': 'V', b'EXPOSURE': 'L'} mmhd = list(reversed(self.pages[0].mm_header.dimensions)) series = [Record( axes=''.join(dims.get(i[0].strip().upper(), 'O') for i in mmhd if i[1] > 1), shape=tuple(int(i[1]) for i in mmhd if i[1] > 1), pages=self.pages, dtype=numpy.dtype(self.pages[0].dtype))] elif self.is_lsm: lsmi = self.pages[0].cz_lsm_info axes = CZ_SCAN_TYPES[lsmi.scan_type] if self.pages[0].is_rgb: axes = axes.replace('C', '').replace('XY', 'XYC') axes = axes[::-1] shape = [getattr(lsmi, CZ_DIMENSIONS[i]) for i in axes] pages = [p for p in self.pages if not p.is_reduced] series = [Record(axes=axes, shape=shape, pages=pages, dtype=numpy.dtype(pages[0].dtype))] if len(pages) != len(self.pages): # reduced RGB pages pages = [p for p in self.pages if p.is_reduced] cp = 1 i = 0 while cp < len(pages) and i < len(shape)-2: cp *= shape[i] i += 1 shape = shape[:i] + list(pages[0].shape) axes = axes[:i] + 'CYX' series.append(Record(axes=axes, shape=shape, pages=pages, dtype=numpy.dtype(pages[0].dtype))) elif self.is_nih: series = [Record(pages=self.pages, shape=(len(self.pages),) + self.pages[0].shape, axes='I' + self.pages[0].axes, dtype=numpy.dtype(self.pages[0].dtype))] elif self.pages[0].is_shaped: shape = self.pages[0].tags['image_description'].value[7:-1] shape = tuple(int(i) for i in shape.split(b',')) series = [Record(pages=self.pages, shape=shape, axes='O' * len(shape), dtype=numpy.dtype(self.pages[0].dtype))] else: shapes = [] pages = {} for page in self.pages: shape = page.shape + (page.axes, page.compression in TIFF_DECOMPESSORS) if not shape in pages: shapes.append(shape) pages[shape] = [page] else: pages[shape].append(page) series = [Record(pages=pages[s], axes=(('I' + s[-2]) if len(pages[s]) > 1 else s[-2]), dtype=numpy.dtype(pages[s][0].dtype), shape=((len(pages[s]), ) + s[:-2] if len(pages[s]) > 1 else s[:-2])) for s in shapes] return series
[ "def", "series", "(", "self", ")", ":", "if", "self", ".", "is_ome", ":", "series", "=", "self", ".", "_omeseries", "(", ")", "elif", "self", ".", "is_fluoview", ":", "dims", "=", "{", "b'X'", ":", "'X'", ",", "b'Y'", ":", "'Y'", ",", "b'Z'", ":", "'Z'", ",", "b'T'", ":", "'T'", ",", "b'WAVELENGTH'", ":", "'C'", ",", "b'TIME'", ":", "'T'", ",", "b'XY'", ":", "'R'", ",", "b'EVENT'", ":", "'V'", ",", "b'EXPOSURE'", ":", "'L'", "}", "mmhd", "=", "list", "(", "reversed", "(", "self", ".", "pages", "[", "0", "]", ".", "mm_header", ".", "dimensions", ")", ")", "series", "=", "[", "Record", "(", "axes", "=", "''", ".", "join", "(", "dims", ".", "get", "(", "i", "[", "0", "]", ".", "strip", "(", ")", ".", "upper", "(", ")", ",", "'O'", ")", "for", "i", "in", "mmhd", "if", "i", "[", "1", "]", ">", "1", ")", ",", "shape", "=", "tuple", "(", "int", "(", "i", "[", "1", "]", ")", "for", "i", "in", "mmhd", "if", "i", "[", "1", "]", ">", "1", ")", ",", "pages", "=", "self", ".", "pages", ",", "dtype", "=", "numpy", ".", "dtype", "(", "self", ".", "pages", "[", "0", "]", ".", "dtype", ")", ")", "]", "elif", "self", ".", "is_lsm", ":", "lsmi", "=", "self", ".", "pages", "[", "0", "]", ".", "cz_lsm_info", "axes", "=", "CZ_SCAN_TYPES", "[", "lsmi", ".", "scan_type", "]", "if", "self", ".", "pages", "[", "0", "]", ".", "is_rgb", ":", "axes", "=", "axes", ".", "replace", "(", "'C'", ",", "''", ")", ".", "replace", "(", "'XY'", ",", "'XYC'", ")", "axes", "=", "axes", "[", ":", ":", "-", "1", "]", "shape", "=", "[", "getattr", "(", "lsmi", ",", "CZ_DIMENSIONS", "[", "i", "]", ")", "for", "i", "in", "axes", "]", "pages", "=", "[", "p", "for", "p", "in", "self", ".", "pages", "if", "not", "p", ".", "is_reduced", "]", "series", "=", "[", "Record", "(", "axes", "=", "axes", ",", "shape", "=", "shape", ",", "pages", "=", "pages", ",", "dtype", "=", "numpy", ".", "dtype", "(", "pages", "[", "0", "]", ".", "dtype", ")", ")", "]", "if", "len", "(", "pages", ")", "!=", "len", "(", "self", ".", "pages", ")", ":", "# reduced RGB pages", "pages", "=", "[", "p", "for", "p", "in", "self", ".", "pages", "if", "p", ".", "is_reduced", "]", "cp", "=", "1", "i", "=", "0", "while", "cp", "<", "len", "(", "pages", ")", "and", "i", "<", "len", "(", "shape", ")", "-", "2", ":", "cp", "*=", "shape", "[", "i", "]", "i", "+=", "1", "shape", "=", "shape", "[", ":", "i", "]", "+", "list", "(", "pages", "[", "0", "]", ".", "shape", ")", "axes", "=", "axes", "[", ":", "i", "]", "+", "'CYX'", "series", ".", "append", "(", "Record", "(", "axes", "=", "axes", ",", "shape", "=", "shape", ",", "pages", "=", "pages", ",", "dtype", "=", "numpy", ".", "dtype", "(", "pages", "[", "0", "]", ".", "dtype", ")", ")", ")", "elif", "self", ".", "is_nih", ":", "series", "=", "[", "Record", "(", "pages", "=", "self", ".", "pages", ",", "shape", "=", "(", "len", "(", "self", ".", "pages", ")", ",", ")", "+", "self", ".", "pages", "[", "0", "]", ".", "shape", ",", "axes", "=", "'I'", "+", "self", ".", "pages", "[", "0", "]", ".", "axes", ",", "dtype", "=", "numpy", ".", "dtype", "(", "self", ".", "pages", "[", "0", "]", ".", "dtype", ")", ")", "]", "elif", "self", ".", "pages", "[", "0", "]", ".", "is_shaped", ":", "shape", "=", "self", ".", "pages", "[", "0", "]", ".", "tags", "[", "'image_description'", "]", ".", "value", "[", "7", ":", "-", "1", "]", "shape", "=", "tuple", "(", "int", "(", "i", ")", "for", "i", "in", "shape", ".", "split", "(", "b','", ")", ")", "series", "=", "[", "Record", "(", "pages", "=", "self", ".", "pages", ",", "shape", "=", "shape", ",", "axes", "=", "'O'", "*", "len", "(", "shape", ")", ",", "dtype", "=", "numpy", ".", "dtype", "(", "self", ".", "pages", "[", "0", "]", ".", "dtype", ")", ")", "]", "else", ":", "shapes", "=", "[", "]", "pages", "=", "{", "}", "for", "page", "in", "self", ".", "pages", ":", "shape", "=", "page", ".", "shape", "+", "(", "page", ".", "axes", ",", "page", ".", "compression", "in", "TIFF_DECOMPESSORS", ")", "if", "not", "shape", "in", "pages", ":", "shapes", ".", "append", "(", "shape", ")", "pages", "[", "shape", "]", "=", "[", "page", "]", "else", ":", "pages", "[", "shape", "]", ".", "append", "(", "page", ")", "series", "=", "[", "Record", "(", "pages", "=", "pages", "[", "s", "]", ",", "axes", "=", "(", "(", "'I'", "+", "s", "[", "-", "2", "]", ")", "if", "len", "(", "pages", "[", "s", "]", ")", ">", "1", "else", "s", "[", "-", "2", "]", ")", ",", "dtype", "=", "numpy", ".", "dtype", "(", "pages", "[", "s", "]", "[", "0", "]", ".", "dtype", ")", ",", "shape", "=", "(", "(", "len", "(", "pages", "[", "s", "]", ")", ",", ")", "+", "s", "[", ":", "-", "2", "]", "if", "len", "(", "pages", "[", "s", "]", ")", ">", "1", "else", "s", "[", ":", "-", "2", "]", ")", ")", "for", "s", "in", "shapes", "]", "return", "series" ]
Return series of TIFFpage with compatible shape and properties.
[ "Return", "series", "of", "TIFFpage", "with", "compatible", "shape", "and", "properties", "." ]
python
train
NuGrid/NuGridPy
nugridpy/mesa.py
https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/mesa.py#L2070-L2600
def kip_cont(self, ifig=110, modstart=0, modstop=-1,t0_model=0, outfile='out.png', xlims=None, ylims=None, xres=1000, yres=1000, ixaxis='model_number', mix_zones=20, burn_zones=20, plot_radius=False, engenPlus=True, engenMinus=False, landscape_plot=False, rad_lines=False, profiles=[], showfig=True, outlines=True, boundaries=True, c12_boundary=False, rasterise=False, yscale='1.', engenlevels=None,CBM=False,fsize=14): """ This function creates a Kippenhahn plot with energy flux using contours. This plot uses mixing_regions and burning_regions written to your history.data or star.log. Set both variables in the log_columns.list file to 20 as a start. The output log file should then contain columns called "mix_type_n", "mix_qtop_n", "burn_type_n" and "burn_qtop_n". The number of columns (i.e. the biggest value of n) is what goes in the arguments as mix_zones and burn_zones. DO NOT WORRY! if you do not have these columns, just leave the default values alone and the script should recognise that you do not have these columns and make the most detailed plot that is available to you. Defaults are set to get some plot, that may not look great if you zoom in interactively. Play with xres and yres as well as setting the xlims to ylims to the region you are interested in. Parameters ---------- ifig : integer, optional Figure frame number. The default is 110. modstart : integer, optional Model from which you want to plot (be careful if your history.data or star.log output is sparse...). If it is 0 then it starts from the beginning, works even if log_cnt > 1. The default is 0. modstop : integer, optional Model to which you wish to plot, -1 corresponds to end [if log_cnt>1, devide modstart and modstop by log_cnt, this needs to be improved! SJ: this should be ficed now]. The defalut is -1. t0_model : integer, optional Model number from which to reset the time to 0. Typically, if modstart!=0, t0_model=modstart is a good choice, but we leave the choice to the user in case the time is wished to start from 0 at a different key point of the evolution. The default value is 0. outfile : sting, optional 'filename + extension' where you want to save the figure. The defalut is "out.png". xlims, ylims : list, optional Plot limits, however these are somewhat obsolete now that we have modstart and modstop. Leaving them as 0. is probably no slower, and you can always zoom in afterwards in mpl. ylims is important for well resolved thermal pulse etc plots; it's best to get the upper and lower limits of he-intershell using s.kippenhahn_CO(1,'model') first. The default is [0., 0.]. xres, yres : integer, optional plot resolution. Needless to say that increasing these values will yield a nicer plot with some slow-down in plotting time. You will most commonly change xres. For a prelim plot, try xres~200, then bump it up to anywhere from 1000-10000 for real nicely resolved, publication quality plots. The default is 1000. ixaxis : string, optional Either 'log_time_left', 'age', or 'model_number'. The default is "model_number". mix_zones, burn_zones : integer, optional As described above, if you have more detailed output about your convection and energy generation boundaries in columns mix_type_n, mix_qtop_n, burn_type_n and burn_qtop_n, you need to specify the total number of columns for mixing zones and burning zones that you have. Can't work this out from your history.data or star.log file? Check the history_columns.list that you used, it'll be the number after "mixing regions" and "burning regions". Can't see these columns? leave it and 2 conv zones and 2 burn zones will be drawn using other data that you certainly should have in your history.data or star.log file. The defalut for both is 20. plot_radius : boolean, optional Whether on a second y-axis you want to plot the radius of the surface and the he-free core. The default is False. engenPlus : boolean Plot energy generation contours for eps_nuc>0. The default is True. endgenMinus : boolean, optional Plot energy generation contours for eos_nuc<0. The default is True. landscape_plot : boolean, optionla The default is False. rad_lines : boolean, optional The deafault is False. profiles : list, optional The default is []. showfig : boolean, optional The default is True. outlines : boolean, optional Whether or not to plot outlines of conv zones in darker colour. boundaries : boolean, optional Whether or not to plot H-, He- and C-free boundaries. c12_boundary : boolean, optional The default is False. rasterise : boolean, optional Whether or not to rasterise the contour regions to make smaller vector graphics figures. The default is False. yscale : string, optional Re-scale the y-axis by this amount engenlevels : list Give cusstom levels to the engenPlus contour. If None, the levels are chosen automatically. The default is None. CBM : boolean, optional plot contours for where CBM is active? fsize : integer font size for labels Notes ----- The parameter xlims is depricated. """ if ylims is None: ylims=[0.,0.] if xlims is None: xlims=[0.,0.] # Find correct modstart and modstop: mod=np.array([int(i) for i in self.get('model_number')]) mod1=np.abs(mod-modstart).argmin() mod2=np.abs(mod-modstop).argmin() if modstart != 0 : modstart=mod1 if modstop != -1 : modstop=mod2 xxyy=[self.get('star_age')[modstart:modstop],self.get('star_age')[modstart:modstop]] mup = max(float(self.get('star_mass')[0])*1.02,1.0) nmodels=len(self.get('model_number')[modstart:modstop]) if ylims == [0.,0.]: mup = max(float(self.get('star_mass')[0])*1.02,1.0) mDOWN = 0. else: mup = ylims[1] mDOWN = ylims[0] # y-axis resolution ny=yres #dy=mup/float(ny) dy = old_div((mup-mDOWN),float(ny)) # x-axis resolution maxpoints=xres dx=int(max(1,old_div(nmodels,maxpoints))) #y = np.arange(0., mup, dy) y = np.arange(mDOWN, mup, dy) x = np.arange(0, nmodels, dx) Msol=1.98892E+33 engenstyle = 'full' B1=np.zeros([len(y),len(x)],float) B2=np.zeros([len(y),len(x)],float) try: self.get('burn_qtop_1') except: engenstyle = 'twozone' if engenstyle == 'full' and (engenPlus == True or engenMinus == True): ulimit_array = np.array([self.get('burn_qtop_'+str(j))[modstart:modstop:dx]*\ self.get('star_mass')[modstart:modstop:dx] for j in range(1,burn_zones+1)]) #ulimit_array = np.around(ulimit_array,decimals=len(str(dy))-2) llimit_array = np.delete(ulimit_array,-1,0) llimit_array = np.insert(ulimit_array,0,0.,0) #llimit_array = np.around(llimit_array,decimals=len(str(dy))-2) btype_array = np.array([self.get('burn_type_'+str(j))[modstart:modstop:dx] for j in range(1,burn_zones+1)]) old_percent = 0 for i in range(len(x)): # writing status percent = int(i*100/(len(x) - 1)) if percent >= old_percent + 5: sys.stdout.flush() sys.stdout.write("\r creating color map burn " + "...%d%%" % percent) old_percent = percent for j in range(burn_zones): if btype_array[j,i] > 0. and abs(btype_array[j,i]) < 99.: B1[(np.abs(y-llimit_array[j][i])).argmin():(np.abs(y-ulimit_array[j][i])).argmin()+1,i] = 10.0**(btype_array[j,i]) elif btype_array[j,i] < 0. and abs(btype_array[j,i]) < 99.: B2[(np.abs(y-llimit_array[j][i])).argmin():(np.abs(y-ulimit_array[j][i])).argmin()+1,i] = 10.0**(abs(btype_array[j,i])) print(' \n') if engenstyle == 'twozone' and (engenPlus == True or engenMinus == True): V=np.zeros([len(y),len(x)],float) old_percent = 0 for i in range(len(x)): # writing status percent = int(i*100/(len(x) - 1)) if percent >= old_percent + 5: sys.stdout.flush() sys.stdout.write("\r creating color map1 " + "...%d%%" % percent) old_percent = percent llimitl1=old_div(self.get('epsnuc_M_1')[modstart:modstop][i*dx],Msol) ulimitl1=old_div(self.get('epsnuc_M_4')[modstart:modstop][i*dx],Msol) llimitl2=old_div(self.get('epsnuc_M_5')[modstart:modstop][i*dx],Msol) ulimitl2=old_div(self.get('epsnuc_M_8')[modstart:modstop][i*dx],Msol) llimith1=old_div(self.get('epsnuc_M_2')[modstart:modstop][i*dx],Msol) ulimith1=old_div(self.get('epsnuc_M_3')[modstart:modstop][i*dx],Msol) llimith2=old_div(self.get('epsnuc_M_6')[modstart:modstop][i*dx],Msol) ulimith2=old_div(self.get('epsnuc_M_7')[modstart:modstop][i*dx],Msol) # lower thresh first, then upper thresh: if llimitl1!=ulimitl1: for k in range(ny): if llimitl1<=y[k] and ulimitl1>y[k]: V[k,i]=10. if llimitl2!=ulimitl2: for k in range(ny): if llimitl2<=y[k] and ulimitl2>y[k]: V[k,i]=10. if llimith1!=ulimith1: for k in range(ny): if llimith1<=y[k] and ulimith1>y[k]: V[k,i]=30. if llimith2!=ulimith2: for k in range(ny): if llimith2<=y[k] and ulimith2>y[k]: V[k,i]=30. print(' \n') mixstyle = 'full' try: self.get('mix_qtop_1') except: mixstyle = 'twozone' if mixstyle == 'full': old_percent = 0 Z=np.zeros([len(y),len(x)],float) if CBM: Zcbm=np.zeros([len(y),len(x)],float) ulimit_array = np.array([self.get('mix_qtop_'+str(j))[modstart:modstop:dx]*self.get('star_mass')[modstart:modstop:dx] for j in range(1,mix_zones+1)]) llimit_array = np.delete(ulimit_array,-1,0) llimit_array = np.insert(ulimit_array,0,0.,0) mtype_array = np.array([self.get('mix_type_'+str(j))[modstart:modstop:dx] for j in range(1,mix_zones+1)]) for i in range(len(x)): # writing status percent = int(i*100/(len(x) - 1)) if percent >= old_percent + 5: sys.stdout.flush() sys.stdout.write("\r creating color map mix " + "...%d%%" % percent) old_percent = percent for j in range(mix_zones): if mtype_array[j,i] == 1.: Z[(np.abs(y-llimit_array[j][i])).argmin():(np.abs(y-ulimit_array[j][i])).argmin()+1,i] = 1. if CBM: if mtype_array[j,i] == 2.: Zcbm[(np.abs(y-llimit_array[j][i])).argmin():(np.abs(y-ulimit_array[j][i])).argmin()+1,i] = 1. print(' \n') if mixstyle == 'twozone': Z=np.zeros([len(y),len(x)],float) old_percent = 0 for i in range(len(x)): # writing reading status # writing status percent = int(i*100/(len(x) - 1)) if percent >= old_percent + 5: sys.stdout.flush() sys.stdout.write("\r creating color map mix " + "...%d%%" % percent) old_percent = percent ulimit=self.get('conv_mx1_top')[modstart:modstop][i*dx]*self.get('star_mass')[modstart:modstop][i*dx] llimit=self.get('conv_mx1_bot')[modstart:modstop][i*dx]*self.get('star_mass')[modstart:modstop][i*dx] if llimit!=ulimit: for k in range(ny): if llimit<=y[k] and ulimit>y[k]: Z[k,i]=1. ulimit=self.get('conv_mx2_top')[modstart:modstop][i*dx]*self.get('star_mass')[modstart:modstop][i*dx] llimit=self.get('conv_mx2_bot')[modstart:modstop][i*dx]*self.get('star_mass')[modstart:modstop][i*dx] if llimit!=ulimit: for k in range(ny): if llimit<=y[k] and ulimit>y[k]: Z[k,i]=1. print(' \n') if rad_lines == True: masses = np.arange(0.1,1.5,0.1) rads=[[],[],[],[],[],[],[],[],[],[],[],[],[],[]] modno=[] for i in range(len(profiles)): p=mesa_profile('./LOGS',profiles[i]) modno.append(p.header_attr['model_number']) for j in range(len(masses)): idx=np.abs(p.get('mass')-masses[j]).argmin() rads[j].append(p.get('radius')[idx]) print('engenstyle was ', engenstyle) print('mixstyle was ', mixstyle) print('\n finished preparing color map') ######################################################################## #----------------------------------plot--------------------------------# fig = pyl.figure(ifig) #fsize=20 if landscape_plot == True: fig.set_size_inches(9,4) pl.gcf().subplots_adjust(bottom=0.2) pl.gcf().subplots_adjust(right=0.85) params = {'axes.labelsize': fsize, 'axes.labelsize': fsize, 'font.size': fsize, 'legend.fontsize': fsize, 'xtick.labelsize': fsize, 'ytick.labelsize': fsize, 'text.usetex': False} pyl.rcParams.update(params) #ax=pl.axes([0.1,0.1,0.9,0.8]) #fig=pl.figure() ax=pl.axes() if ixaxis == 'log_time_left': # log of time left until core collapse gage= self.get('star_age') lage=np.zeros(len(gage)) agemin = max(old_div(abs(gage[-1]-gage[-2]),5.),1.e-10) for i in np.arange(len(gage)): if gage[-1]-gage[i]>agemin: lage[i]=np.log10(gage[-1]-gage[i]+agemin) else : lage[i]=np.log10(agemin) xxx = lage[modstart:modstop] print('plot versus time left') ax.set_xlabel('$ \\log_{10}(t-t_\mathrm{end})\ /\ \mathrm{[yr]}$',fontsize=fsize) if xlims[1] == 0.: xlims = [xxx[0],xxx[-1]] elif ixaxis =='model_number': xxx= self.get('model_number')[modstart:modstop] print('plot versus model number') ax.set_xlabel('Model number',fontsize=fsize) if xlims[1] == 0.: xlims = [self.get('model_number')[modstart],self.get('model_number')[modstop]] elif ixaxis =='age': if t0_model != 0: t0_mod=np.abs(mod-t0_model).argmin() xxx= self.get('star_age')[modstart:modstop] - self.get('star_age')[t0_mod] print('plot versus age') ax.set_xlabel('t - %.5e / [yr]' %self.get('star_age')[modstart],fontsize=fsize) else: xxx= old_div(self.get('star_age')[modstart:modstop],1.e6) ax.set_xlabel('t [Myr]',fontsize=fsize) if xlims[1] == 0.: xlims = [xxx[0],xxx[-1]] ax.set_ylabel('$\mathrm{enclosed\ mass\ /\ [M_\odot]}$',fontsize=fsize) # some stuff for rasterizing only the contour part of the plot, for nice, but light, eps: class ListCollection(Collection): def __init__(self, collections, **kwargs): Collection.__init__(self, **kwargs) self.set_collections(collections) def set_collections(self, collections): self._collections = collections def get_collections(self): return self._collections @allow_rasterization def draw(self, renderer): for _c in self._collections: _c.draw(renderer) def insert_rasterized_contour_plot(c): collections = c.collections for _c in collections: _c.remove() cc = ListCollection(collections, rasterized=True) ax = pl.gca() ax.add_artist(cc) return cc cmapMIX = matplotlib.colors.ListedColormap(['w','#8B8386']) # rose grey if CBM: cmapCBM = matplotlib.colors.ListedColormap(['w','g']) # green cmapB1 = pyl.cm.get_cmap('Blues') cmapB2 = pl.cm.get_cmap('Reds') ylims1=[0.,0.] ylims1[0]=ylims[0] ylims1[1]=ylims[1] if ylims == [0.,0.]: ylims[0] = 0. ylims[1] = mup #print("Setting ylims[1] to mup="+str(mup)) if ylims[0] != 0.: ylab='$(\mathrm{Mass }$ - '+str(ylims[0]) if yscale!='1.': ylab+=') / '+yscale+' $M_\odot$' else: ylab+=') / $M_\odot$' ax.set_ylabel(ylab) y = y - ylims[0] y = y*float(yscale) # SJONES tweak ylims[0] = y[0] ylims[1] = y[-1] print('plotting contours') CMIX = ax.contourf(xxx[::dx],y,Z, cmap=cmapMIX,alpha=0.6,levels=[0.5,1.5]) #CMIX = ax.pcolor(xxx[::dx],y,Z, cmap=cmapMIX,alpha=0.6,vmin=0.5,vmax=1.5) if rasterise==True: insert_rasterized_contour_plot(CMIX) if outlines == True: CMIX_outlines = ax.contour(xxx[::dx],y,Z, cmap=cmapMIX) if rasterise==True: insert_rasterized_contour_plot(CMIX_outlines) if CBM: CCBM = ax.contourf(xxx[::dx],y,Zcbm, cmap=cmapCBM,alpha=0.6,levels=[0.5,1.5]) if rasterise==True: insert_rasterized_contour_plot(CCBM) if outlines == True: CCBM_outlines = ax.contour(xxx[::dx],y,Zcbm, cmap=cmapCBM) if rasterise==True: insert_rasterized_contour_plot(CCBM_outlines) if engenstyle == 'full' and engenPlus == True: if engenlevels!= None: CBURN1 = ax.contourf(xxx[::dx],y,B1, cmap=cmapB1, alpha=0.5,\ locator=matplotlib.ticker.LogLocator(),levels=engenlevels) if outlines: CB1_outlines = ax.contour(xxx[::dx],y,B1, cmap=cmapB1, alpha=0.7, \ locator=matplotlib.ticker.LogLocator(),levels=engenlevels) else: CBURN1 = ax.contourf(xxx[::dx],y,B1, cmap=cmapB1, alpha=0.5, \ locator=matplotlib.ticker.LogLocator()) if outlines: CB1_outlines = ax.contour(xxx[::dx],y,B1, cmap=cmapB1, alpha=0.7, \ locator=matplotlib.ticker.LogLocator()) CBARBURN1 = pyl.colorbar(CBURN1) CBARBURN1.set_label('$|\epsilon_\mathrm{nuc}-\epsilon_{\\nu}| \; (\mathrm{erg\,g}^{-1}\mathrm{\,s}^{-1})$',fontsize=fsize) if rasterise==True: insert_rasterized_contour_plot(CBURN1) if outlines: insert_rasterized_contour_plot(CB1_outlines) if engenstyle == 'full' and engenMinus == True: CBURN2 = ax.contourf(xxx[::dx],y,B2, cmap=cmapB2, alpha=0.5, locator=matplotlib.ticker.LogLocator()) if outlines: CBURN2_outlines = ax.contour(xxx[::dx],y,B2, cmap=cmapB2, alpha=0.7, locator=matplotlib.ticker.LogLocator()) CBARBURN2 = pl.colorbar(CBURN2) if engenPlus == False: CBARBURN2.set_label('$|\epsilon_\mathrm{nuc}-\epsilon_{\\nu}| \; (\mathrm{erg\,g}^{-1}\mathrm{\,s}^{-1})$',fontsize=fsize) if rasterise==True: insert_rasterized_contour_plot(CBURN2) if outlines: insert_rasterized_contour_plot(CB2_outlines) if engenstyle == 'twozone' and (engenPlus == True or engenMinus == True): ax.contourf(xxx[::dx],y,V, cmap=cmapB1, alpha=0.5) print('plotting patches') mtot=self.get('star_mass')[modstart:modstop][::dx] mtot1=(mtot-ylims1[0])*float(yscale) ax.plot(xxx[::dx],mtot1,'k-') if boundaries == True: print('plotting abund boundaries') try: bound=self.get('h1_boundary_mass')[modstart:modstop] bound1=(bound-ylims1[0])*float(yscale) ax.plot(xxx,bound1,label='H boundary',linestyle='-') bound=self.get('he4_boundary_mass')[modstart:modstop] bound1=(bound-ylims1[0])*float(yscale) ax.plot(xxx,bound1,label='He boundary',linestyle='--') bound=self.get('c12_boundary_mass')[modstart:modstop] bound1=(bound-ylims1[0])*float(yscale) ax.plot(xxx,bound1,label='C boundary',linestyle='-.') except: try: bound=self.get('he_core_mass')[modstart:modstop] bound1=(bound-ylims1[0])*float(yscale) ax.plot(xxx,bound1,label='H boundary',linestyle='-') bound=self.get('c_core_mass')[modstart:modstop]-ylims[0] bound1=(bound-ylims1[0])*float(yscale) ax.plot(xxx,bound1,label='He boundary',linestyle='--') bound=self.get('o_core_mass')[modstart:modstop]-ylims[0] bound1=(bound-ylims1[0])*float(yscale) ax.plot(xxx,bound1,label='C boundary',linestyle='-.') bound=self.get('si_core_mass')[modstart:modstop]-ylims[0] bound1=(bound-ylims1[0])*float(yscale) ax.plot(xxx,bound1,label='C boundary',linestyle='-.') bound=self.get('fe_core_mass')[modstart:modstop]-ylims[0] bound1=(bound-ylims1[0])*float(yscale) ax.plot(xxx,bound1,label='C boundary',linestyle='-.') except: # print 'problem to plot boundaries for this plot' pass ax.axis([xlims[0],xlims[1],ylims[0],ylims[1]]) if plot_radius == True: ax2=pyl.twinx() ax2.plot(xxx,np.log10(self.get('he4_boundary_radius')[modstart:modstop]),label='He boundary radius',color='k',linewidth=1.,linestyle='-.') ax2.plot(xxx,self.get('log_R')[modstart:modstop],label='radius',color='k',linewidth=1.,linestyle='-.') ax2.set_ylabel('log(radius)') if rad_lines == True: ax2=pyl.twinx() for i in range(len(masses)): ax2.plot(modno,np.log10(rads[i]),color='k') if outfile[-3:]=='png': fig.savefig(outfile,dpi=300) elif outfile[-3:]=='eps': fig.savefig(outfile,format='eps') elif outfile[-3:]=='pdf': fig.savefig(outfile,format='pdf') if showfig == True: pyl.show()
[ "def", "kip_cont", "(", "self", ",", "ifig", "=", "110", ",", "modstart", "=", "0", ",", "modstop", "=", "-", "1", ",", "t0_model", "=", "0", ",", "outfile", "=", "'out.png'", ",", "xlims", "=", "None", ",", "ylims", "=", "None", ",", "xres", "=", "1000", ",", "yres", "=", "1000", ",", "ixaxis", "=", "'model_number'", ",", "mix_zones", "=", "20", ",", "burn_zones", "=", "20", ",", "plot_radius", "=", "False", ",", "engenPlus", "=", "True", ",", "engenMinus", "=", "False", ",", "landscape_plot", "=", "False", ",", "rad_lines", "=", "False", ",", "profiles", "=", "[", "]", ",", "showfig", "=", "True", ",", "outlines", "=", "True", ",", "boundaries", "=", "True", ",", "c12_boundary", "=", "False", ",", "rasterise", "=", "False", ",", "yscale", "=", "'1.'", ",", "engenlevels", "=", "None", ",", "CBM", "=", "False", ",", "fsize", "=", "14", ")", ":", "if", "ylims", "is", "None", ":", "ylims", "=", "[", "0.", ",", "0.", "]", "if", "xlims", "is", "None", ":", "xlims", "=", "[", "0.", ",", "0.", "]", "# Find correct modstart and modstop:", "mod", "=", "np", ".", "array", "(", "[", "int", "(", "i", ")", "for", "i", "in", "self", ".", "get", "(", "'model_number'", ")", "]", ")", "mod1", "=", "np", ".", "abs", "(", "mod", "-", "modstart", ")", ".", "argmin", "(", ")", "mod2", "=", "np", ".", "abs", "(", "mod", "-", "modstop", ")", ".", "argmin", "(", ")", "if", "modstart", "!=", "0", ":", "modstart", "=", "mod1", "if", "modstop", "!=", "-", "1", ":", "modstop", "=", "mod2", "xxyy", "=", "[", "self", ".", "get", "(", "'star_age'", ")", "[", "modstart", ":", "modstop", "]", ",", "self", ".", "get", "(", "'star_age'", ")", "[", "modstart", ":", "modstop", "]", "]", "mup", "=", "max", "(", "float", "(", "self", ".", "get", "(", "'star_mass'", ")", "[", "0", "]", ")", "*", "1.02", ",", "1.0", ")", "nmodels", "=", "len", "(", "self", ".", "get", "(", "'model_number'", ")", "[", "modstart", ":", "modstop", "]", ")", "if", "ylims", "==", "[", "0.", ",", "0.", "]", ":", "mup", "=", "max", "(", "float", "(", "self", ".", "get", "(", "'star_mass'", ")", "[", "0", "]", ")", "*", "1.02", ",", "1.0", ")", "mDOWN", "=", "0.", "else", ":", "mup", "=", "ylims", "[", "1", "]", "mDOWN", "=", "ylims", "[", "0", "]", "# y-axis resolution", "ny", "=", "yres", "#dy=mup/float(ny)", "dy", "=", "old_div", "(", "(", "mup", "-", "mDOWN", ")", ",", "float", "(", "ny", ")", ")", "# x-axis resolution", "maxpoints", "=", "xres", "dx", "=", "int", "(", "max", "(", "1", ",", "old_div", "(", "nmodels", ",", "maxpoints", ")", ")", ")", "#y = np.arange(0., mup, dy)", "y", "=", "np", ".", "arange", "(", "mDOWN", ",", "mup", ",", "dy", ")", "x", "=", "np", ".", "arange", "(", "0", ",", "nmodels", ",", "dx", ")", "Msol", "=", "1.98892E+33", "engenstyle", "=", "'full'", "B1", "=", "np", ".", "zeros", "(", "[", "len", "(", "y", ")", ",", "len", "(", "x", ")", "]", ",", "float", ")", "B2", "=", "np", ".", "zeros", "(", "[", "len", "(", "y", ")", ",", "len", "(", "x", ")", "]", ",", "float", ")", "try", ":", "self", ".", "get", "(", "'burn_qtop_1'", ")", "except", ":", "engenstyle", "=", "'twozone'", "if", "engenstyle", "==", "'full'", "and", "(", "engenPlus", "==", "True", "or", "engenMinus", "==", "True", ")", ":", "ulimit_array", "=", "np", ".", "array", "(", "[", "self", ".", "get", "(", "'burn_qtop_'", "+", "str", "(", "j", ")", ")", "[", "modstart", ":", "modstop", ":", "dx", "]", "*", "self", ".", "get", "(", "'star_mass'", ")", "[", "modstart", ":", "modstop", ":", "dx", "]", "for", "j", "in", "range", "(", "1", ",", "burn_zones", "+", "1", ")", "]", ")", "#ulimit_array = np.around(ulimit_array,decimals=len(str(dy))-2)", "llimit_array", "=", "np", ".", "delete", "(", "ulimit_array", ",", "-", "1", ",", "0", ")", "llimit_array", "=", "np", ".", "insert", "(", "ulimit_array", ",", "0", ",", "0.", ",", "0", ")", "#llimit_array = np.around(llimit_array,decimals=len(str(dy))-2)", "btype_array", "=", "np", ".", "array", "(", "[", "self", ".", "get", "(", "'burn_type_'", "+", "str", "(", "j", ")", ")", "[", "modstart", ":", "modstop", ":", "dx", "]", "for", "j", "in", "range", "(", "1", ",", "burn_zones", "+", "1", ")", "]", ")", "old_percent", "=", "0", "for", "i", "in", "range", "(", "len", "(", "x", ")", ")", ":", "# writing status", "percent", "=", "int", "(", "i", "*", "100", "/", "(", "len", "(", "x", ")", "-", "1", ")", ")", "if", "percent", ">=", "old_percent", "+", "5", ":", "sys", ".", "stdout", ".", "flush", "(", ")", "sys", ".", "stdout", ".", "write", "(", "\"\\r creating color map burn \"", "+", "\"...%d%%\"", "%", "percent", ")", "old_percent", "=", "percent", "for", "j", "in", "range", "(", "burn_zones", ")", ":", "if", "btype_array", "[", "j", ",", "i", "]", ">", "0.", "and", "abs", "(", "btype_array", "[", "j", ",", "i", "]", ")", "<", "99.", ":", "B1", "[", "(", "np", ".", "abs", "(", "y", "-", "llimit_array", "[", "j", "]", "[", "i", "]", ")", ")", ".", "argmin", "(", ")", ":", "(", "np", ".", "abs", "(", "y", "-", "ulimit_array", "[", "j", "]", "[", "i", "]", ")", ")", ".", "argmin", "(", ")", "+", "1", ",", "i", "]", "=", "10.0", "**", "(", "btype_array", "[", "j", ",", "i", "]", ")", "elif", "btype_array", "[", "j", ",", "i", "]", "<", "0.", "and", "abs", "(", "btype_array", "[", "j", ",", "i", "]", ")", "<", "99.", ":", "B2", "[", "(", "np", ".", "abs", "(", "y", "-", "llimit_array", "[", "j", "]", "[", "i", "]", ")", ")", ".", "argmin", "(", ")", ":", "(", "np", ".", "abs", "(", "y", "-", "ulimit_array", "[", "j", "]", "[", "i", "]", ")", ")", ".", "argmin", "(", ")", "+", "1", ",", "i", "]", "=", "10.0", "**", "(", "abs", "(", "btype_array", "[", "j", ",", "i", "]", ")", ")", "print", "(", "' \\n'", ")", "if", "engenstyle", "==", "'twozone'", "and", "(", "engenPlus", "==", "True", "or", "engenMinus", "==", "True", ")", ":", "V", "=", "np", ".", "zeros", "(", "[", "len", "(", "y", ")", ",", "len", "(", "x", ")", "]", ",", "float", ")", "old_percent", "=", "0", "for", "i", "in", "range", "(", "len", "(", "x", ")", ")", ":", "# writing status", "percent", "=", "int", "(", "i", "*", "100", "/", "(", "len", "(", "x", ")", "-", "1", ")", ")", "if", "percent", ">=", "old_percent", "+", "5", ":", "sys", ".", "stdout", ".", "flush", "(", ")", "sys", ".", "stdout", ".", "write", "(", "\"\\r creating color map1 \"", "+", "\"...%d%%\"", "%", "percent", ")", "old_percent", "=", "percent", "llimitl1", "=", "old_div", "(", "self", ".", "get", "(", "'epsnuc_M_1'", ")", "[", "modstart", ":", "modstop", "]", "[", "i", "*", "dx", "]", ",", "Msol", ")", "ulimitl1", "=", "old_div", "(", "self", ".", "get", "(", "'epsnuc_M_4'", ")", "[", "modstart", ":", "modstop", "]", "[", "i", "*", "dx", "]", ",", "Msol", ")", "llimitl2", "=", "old_div", "(", "self", ".", "get", "(", "'epsnuc_M_5'", ")", "[", "modstart", ":", "modstop", "]", "[", "i", "*", "dx", "]", ",", "Msol", ")", "ulimitl2", "=", "old_div", "(", "self", ".", "get", "(", "'epsnuc_M_8'", ")", "[", "modstart", ":", "modstop", "]", "[", "i", "*", "dx", "]", ",", "Msol", ")", "llimith1", "=", "old_div", "(", "self", ".", "get", "(", "'epsnuc_M_2'", ")", "[", "modstart", ":", "modstop", "]", "[", "i", "*", "dx", "]", ",", "Msol", ")", "ulimith1", "=", "old_div", "(", "self", ".", "get", "(", "'epsnuc_M_3'", ")", "[", "modstart", ":", "modstop", "]", "[", "i", "*", "dx", "]", ",", "Msol", ")", "llimith2", "=", "old_div", "(", "self", ".", "get", "(", "'epsnuc_M_6'", ")", "[", "modstart", ":", "modstop", "]", "[", "i", "*", "dx", "]", ",", "Msol", ")", "ulimith2", "=", "old_div", "(", "self", ".", "get", "(", "'epsnuc_M_7'", ")", "[", "modstart", ":", "modstop", "]", "[", "i", "*", "dx", "]", ",", "Msol", ")", "# lower thresh first, then upper thresh:", "if", "llimitl1", "!=", "ulimitl1", ":", "for", "k", "in", "range", "(", "ny", ")", ":", "if", "llimitl1", "<=", "y", "[", "k", "]", "and", "ulimitl1", ">", "y", "[", "k", "]", ":", "V", "[", "k", ",", "i", "]", "=", "10.", "if", "llimitl2", "!=", "ulimitl2", ":", "for", "k", "in", "range", "(", "ny", ")", ":", "if", "llimitl2", "<=", "y", "[", "k", "]", "and", "ulimitl2", ">", "y", "[", "k", "]", ":", "V", "[", "k", ",", "i", "]", "=", "10.", "if", "llimith1", "!=", "ulimith1", ":", "for", "k", "in", "range", "(", "ny", ")", ":", "if", "llimith1", "<=", "y", "[", "k", "]", "and", "ulimith1", ">", "y", "[", "k", "]", ":", "V", "[", "k", ",", "i", "]", "=", "30.", "if", "llimith2", "!=", "ulimith2", ":", "for", "k", "in", "range", "(", "ny", ")", ":", "if", "llimith2", "<=", "y", "[", "k", "]", "and", "ulimith2", ">", "y", "[", "k", "]", ":", "V", "[", "k", ",", "i", "]", "=", "30.", "print", "(", "' \\n'", ")", "mixstyle", "=", "'full'", "try", ":", "self", ".", "get", "(", "'mix_qtop_1'", ")", "except", ":", "mixstyle", "=", "'twozone'", "if", "mixstyle", "==", "'full'", ":", "old_percent", "=", "0", "Z", "=", "np", ".", "zeros", "(", "[", "len", "(", "y", ")", ",", "len", "(", "x", ")", "]", ",", "float", ")", "if", "CBM", ":", "Zcbm", "=", "np", ".", "zeros", "(", "[", "len", "(", "y", ")", ",", "len", "(", "x", ")", "]", ",", "float", ")", "ulimit_array", "=", "np", ".", "array", "(", "[", "self", ".", "get", "(", "'mix_qtop_'", "+", "str", "(", "j", ")", ")", "[", "modstart", ":", "modstop", ":", "dx", "]", "*", "self", ".", "get", "(", "'star_mass'", ")", "[", "modstart", ":", "modstop", ":", "dx", "]", "for", "j", "in", "range", "(", "1", ",", "mix_zones", "+", "1", ")", "]", ")", "llimit_array", "=", "np", ".", "delete", "(", "ulimit_array", ",", "-", "1", ",", "0", ")", "llimit_array", "=", "np", ".", "insert", "(", "ulimit_array", ",", "0", ",", "0.", ",", "0", ")", "mtype_array", "=", "np", ".", "array", "(", "[", "self", ".", "get", "(", "'mix_type_'", "+", "str", "(", "j", ")", ")", "[", "modstart", ":", "modstop", ":", "dx", "]", "for", "j", "in", "range", "(", "1", ",", "mix_zones", "+", "1", ")", "]", ")", "for", "i", "in", "range", "(", "len", "(", "x", ")", ")", ":", "# writing status", "percent", "=", "int", "(", "i", "*", "100", "/", "(", "len", "(", "x", ")", "-", "1", ")", ")", "if", "percent", ">=", "old_percent", "+", "5", ":", "sys", ".", "stdout", ".", "flush", "(", ")", "sys", ".", "stdout", ".", "write", "(", "\"\\r creating color map mix \"", "+", "\"...%d%%\"", "%", "percent", ")", "old_percent", "=", "percent", "for", "j", "in", "range", "(", "mix_zones", ")", ":", "if", "mtype_array", "[", "j", ",", "i", "]", "==", "1.", ":", "Z", "[", "(", "np", ".", "abs", "(", "y", "-", "llimit_array", "[", "j", "]", "[", "i", "]", ")", ")", ".", "argmin", "(", ")", ":", "(", "np", ".", "abs", "(", "y", "-", "ulimit_array", "[", "j", "]", "[", "i", "]", ")", ")", ".", "argmin", "(", ")", "+", "1", ",", "i", "]", "=", "1.", "if", "CBM", ":", "if", "mtype_array", "[", "j", ",", "i", "]", "==", "2.", ":", "Zcbm", "[", "(", "np", ".", "abs", "(", "y", "-", "llimit_array", "[", "j", "]", "[", "i", "]", ")", ")", ".", "argmin", "(", ")", ":", "(", "np", ".", "abs", "(", "y", "-", "ulimit_array", "[", "j", "]", "[", "i", "]", ")", ")", ".", "argmin", "(", ")", "+", "1", ",", "i", "]", "=", "1.", "print", "(", "' \\n'", ")", "if", "mixstyle", "==", "'twozone'", ":", "Z", "=", "np", ".", "zeros", "(", "[", "len", "(", "y", ")", ",", "len", "(", "x", ")", "]", ",", "float", ")", "old_percent", "=", "0", "for", "i", "in", "range", "(", "len", "(", "x", ")", ")", ":", "# writing reading status", "# writing status", "percent", "=", "int", "(", "i", "*", "100", "/", "(", "len", "(", "x", ")", "-", "1", ")", ")", "if", "percent", ">=", "old_percent", "+", "5", ":", "sys", ".", "stdout", ".", "flush", "(", ")", "sys", ".", "stdout", ".", "write", "(", "\"\\r creating color map mix \"", "+", "\"...%d%%\"", "%", "percent", ")", "old_percent", "=", "percent", "ulimit", "=", "self", ".", "get", "(", "'conv_mx1_top'", ")", "[", "modstart", ":", "modstop", "]", "[", "i", "*", "dx", "]", "*", "self", ".", "get", "(", "'star_mass'", ")", "[", "modstart", ":", "modstop", "]", "[", "i", "*", "dx", "]", "llimit", "=", "self", ".", "get", "(", "'conv_mx1_bot'", ")", "[", "modstart", ":", "modstop", "]", "[", "i", "*", "dx", "]", "*", "self", ".", "get", "(", "'star_mass'", ")", "[", "modstart", ":", "modstop", "]", "[", "i", "*", "dx", "]", "if", "llimit", "!=", "ulimit", ":", "for", "k", "in", "range", "(", "ny", ")", ":", "if", "llimit", "<=", "y", "[", "k", "]", "and", "ulimit", ">", "y", "[", "k", "]", ":", "Z", "[", "k", ",", "i", "]", "=", "1.", "ulimit", "=", "self", ".", "get", "(", "'conv_mx2_top'", ")", "[", "modstart", ":", "modstop", "]", "[", "i", "*", "dx", "]", "*", "self", ".", "get", "(", "'star_mass'", ")", "[", "modstart", ":", "modstop", "]", "[", "i", "*", "dx", "]", "llimit", "=", "self", ".", "get", "(", "'conv_mx2_bot'", ")", "[", "modstart", ":", "modstop", "]", "[", "i", "*", "dx", "]", "*", "self", ".", "get", "(", "'star_mass'", ")", "[", "modstart", ":", "modstop", "]", "[", "i", "*", "dx", "]", "if", "llimit", "!=", "ulimit", ":", "for", "k", "in", "range", "(", "ny", ")", ":", "if", "llimit", "<=", "y", "[", "k", "]", "and", "ulimit", ">", "y", "[", "k", "]", ":", "Z", "[", "k", ",", "i", "]", "=", "1.", "print", "(", "' \\n'", ")", "if", "rad_lines", "==", "True", ":", "masses", "=", "np", ".", "arange", "(", "0.1", ",", "1.5", ",", "0.1", ")", "rads", "=", "[", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", "]", "modno", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "profiles", ")", ")", ":", "p", "=", "mesa_profile", "(", "'./LOGS'", ",", "profiles", "[", "i", "]", ")", "modno", ".", "append", "(", "p", ".", "header_attr", "[", "'model_number'", "]", ")", "for", "j", "in", "range", "(", "len", "(", "masses", ")", ")", ":", "idx", "=", "np", ".", "abs", "(", "p", ".", "get", "(", "'mass'", ")", "-", "masses", "[", "j", "]", ")", ".", "argmin", "(", ")", "rads", "[", "j", "]", ".", "append", "(", "p", ".", "get", "(", "'radius'", ")", "[", "idx", "]", ")", "print", "(", "'engenstyle was '", ",", "engenstyle", ")", "print", "(", "'mixstyle was '", ",", "mixstyle", ")", "print", "(", "'\\n finished preparing color map'", ")", "########################################################################", "#----------------------------------plot--------------------------------#", "fig", "=", "pyl", ".", "figure", "(", "ifig", ")", "#fsize=20", "if", "landscape_plot", "==", "True", ":", "fig", ".", "set_size_inches", "(", "9", ",", "4", ")", "pl", ".", "gcf", "(", ")", ".", "subplots_adjust", "(", "bottom", "=", "0.2", ")", "pl", ".", "gcf", "(", ")", ".", "subplots_adjust", "(", "right", "=", "0.85", ")", "params", "=", "{", "'axes.labelsize'", ":", "fsize", ",", "'axes.labelsize'", ":", "fsize", ",", "'font.size'", ":", "fsize", ",", "'legend.fontsize'", ":", "fsize", ",", "'xtick.labelsize'", ":", "fsize", ",", "'ytick.labelsize'", ":", "fsize", ",", "'text.usetex'", ":", "False", "}", "pyl", ".", "rcParams", ".", "update", "(", "params", ")", "#ax=pl.axes([0.1,0.1,0.9,0.8])", "#fig=pl.figure()", "ax", "=", "pl", ".", "axes", "(", ")", "if", "ixaxis", "==", "'log_time_left'", ":", "# log of time left until core collapse", "gage", "=", "self", ".", "get", "(", "'star_age'", ")", "lage", "=", "np", ".", "zeros", "(", "len", "(", "gage", ")", ")", "agemin", "=", "max", "(", "old_div", "(", "abs", "(", "gage", "[", "-", "1", "]", "-", "gage", "[", "-", "2", "]", ")", ",", "5.", ")", ",", "1.e-10", ")", "for", "i", "in", "np", ".", "arange", "(", "len", "(", "gage", ")", ")", ":", "if", "gage", "[", "-", "1", "]", "-", "gage", "[", "i", "]", ">", "agemin", ":", "lage", "[", "i", "]", "=", "np", ".", "log10", "(", "gage", "[", "-", "1", "]", "-", "gage", "[", "i", "]", "+", "agemin", ")", "else", ":", "lage", "[", "i", "]", "=", "np", ".", "log10", "(", "agemin", ")", "xxx", "=", "lage", "[", "modstart", ":", "modstop", "]", "print", "(", "'plot versus time left'", ")", "ax", ".", "set_xlabel", "(", "'$ \\\\log_{10}(t-t_\\mathrm{end})\\ /\\ \\mathrm{[yr]}$'", ",", "fontsize", "=", "fsize", ")", "if", "xlims", "[", "1", "]", "==", "0.", ":", "xlims", "=", "[", "xxx", "[", "0", "]", ",", "xxx", "[", "-", "1", "]", "]", "elif", "ixaxis", "==", "'model_number'", ":", "xxx", "=", "self", ".", "get", "(", "'model_number'", ")", "[", "modstart", ":", "modstop", "]", "print", "(", "'plot versus model number'", ")", "ax", ".", "set_xlabel", "(", "'Model number'", ",", "fontsize", "=", "fsize", ")", "if", "xlims", "[", "1", "]", "==", "0.", ":", "xlims", "=", "[", "self", ".", "get", "(", "'model_number'", ")", "[", "modstart", "]", ",", "self", ".", "get", "(", "'model_number'", ")", "[", "modstop", "]", "]", "elif", "ixaxis", "==", "'age'", ":", "if", "t0_model", "!=", "0", ":", "t0_mod", "=", "np", ".", "abs", "(", "mod", "-", "t0_model", ")", ".", "argmin", "(", ")", "xxx", "=", "self", ".", "get", "(", "'star_age'", ")", "[", "modstart", ":", "modstop", "]", "-", "self", ".", "get", "(", "'star_age'", ")", "[", "t0_mod", "]", "print", "(", "'plot versus age'", ")", "ax", ".", "set_xlabel", "(", "'t - %.5e / [yr]'", "%", "self", ".", "get", "(", "'star_age'", ")", "[", "modstart", "]", ",", "fontsize", "=", "fsize", ")", "else", ":", "xxx", "=", "old_div", "(", "self", ".", "get", "(", "'star_age'", ")", "[", "modstart", ":", "modstop", "]", ",", "1.e6", ")", "ax", ".", "set_xlabel", "(", "'t [Myr]'", ",", "fontsize", "=", "fsize", ")", "if", "xlims", "[", "1", "]", "==", "0.", ":", "xlims", "=", "[", "xxx", "[", "0", "]", ",", "xxx", "[", "-", "1", "]", "]", "ax", ".", "set_ylabel", "(", "'", "\\m", "\\ ", "\\ ", "\\ ", "o", "dot", "]", "}", "$", "'", ",", "fontsize", "=", "fsize", ")", "# some stuff for rasterizing only the contour part of the plot, for nice, but light, eps:", "class", "ListCollection", "(", "Collection", ")", ":", "def", "__init__", "(", "self", ",", "collections", ",", "*", "*", "kwargs", ")", ":", "Collection", ".", "__init__", "(", "self", ",", "*", "*", "kwargs", ")", "self", ".", "set_collections", "(", "collections", ")", "def", "set_collections", "(", "self", ",", "collections", ")", ":", "self", ".", "_collections", "=", "collections", "def", "get_collections", "(", "self", ")", ":", "return", "self", ".", "_collections", "@", "allow_rasterization", "def", "draw", "(", "self", ",", "renderer", ")", ":", "for", "_c", "in", "self", ".", "_collections", ":", "_c", ".", "draw", "(", "renderer", ")", "def", "insert_rasterized_contour_plot", "(", "c", ")", ":", "collections", "=", "c", ".", "collections", "for", "_c", "in", "collections", ":", "_c", ".", "remove", "(", ")", "cc", "=", "ListCollection", "(", "collections", ",", "rasterized", "=", "True", ")", "ax", "=", "pl", ".", "gca", "(", ")", "ax", ".", "add_artist", "(", "cc", ")", "return", "cc", "cmapMIX", "=", "matplotlib", ".", "colors", ".", "ListedColormap", "(", "[", "'w'", ",", "'#8B8386'", "]", ")", "# rose grey", "if", "CBM", ":", "cmapCBM", "=", "matplotlib", ".", "colors", ".", "ListedColormap", "(", "[", "'w'", ",", "'g'", "]", ")", "# green", "cmapB1", "=", "pyl", ".", "cm", ".", "get_cmap", "(", "'Blues'", ")", "cmapB2", "=", "pl", ".", "cm", ".", "get_cmap", "(", "'Reds'", ")", "ylims1", "=", "[", "0.", ",", "0.", "]", "ylims1", "[", "0", "]", "=", "ylims", "[", "0", "]", "ylims1", "[", "1", "]", "=", "ylims", "[", "1", "]", "if", "ylims", "==", "[", "0.", ",", "0.", "]", ":", "ylims", "[", "0", "]", "=", "0.", "ylims", "[", "1", "]", "=", "mup", "#print(\"Setting ylims[1] to mup=\"+str(mup))", "if", "ylims", "[", "0", "]", "!=", "0.", ":", "ylab", "=", "'$(\\mathrm{Mass }$ - '", "+", "str", "(", "ylims", "[", "0", "]", ")", "if", "yscale", "!=", "'1.'", ":", "ylab", "+=", "') / '", "+", "yscale", "+", "'", "o", "dot", "$", "'", "else", ":", "ylab", "+", "=", "'", "o", "dot", "$", "'", "ax", ".", "set_ylabel", "(", "ylab", ")", "y", "=", "y", "-", "ylims", "[", "0", "]", "y", "=", "y", "*", "float", "(", "yscale", ")", "# SJONES tweak", "ylims", "[", "0", "]", "=", "y", "[", "0", "]", "ylims", "[", "1", "]", "=", "y", "[", "-", "1", "]", "print", "(", "'plotting contours'", ")", "CMIX", "=", "ax", ".", "contourf", "(", "xxx", "[", ":", ":", "dx", "]", ",", "y", ",", "Z", ",", "cmap", "=", "cmapMIX", ",", "alpha", "=", "0.6", ",", "levels", "=", "[", "0.5", ",", "1.5", "]", ")", "#CMIX = ax.pcolor(xxx[::dx],y,Z, cmap=cmapMIX,alpha=0.6,vmin=0.5,vmax=1.5)", "if", "rasterise", "==", "True", ":", "insert_rasterized_contour_plot", "(", "CMIX", ")", "if", "outlines", "==", "True", ":", "CMIX_outlines", "=", "ax", ".", "contour", "(", "xxx", "[", ":", ":", "dx", "]", ",", "y", ",", "Z", ",", "cmap", "=", "cmapMIX", ")", "if", "rasterise", "==", "True", ":", "insert_rasterized_contour_plot", "(", "CMIX_outlines", ")", "if", "CBM", ":", "CCBM", "=", "ax", ".", "contourf", "(", "xxx", "[", ":", ":", "dx", "]", ",", "y", ",", "Zcbm", ",", "cmap", "=", "cmapCBM", ",", "alpha", "=", "0.6", ",", "levels", "=", "[", "0.5", ",", "1.5", "]", ")", "if", "rasterise", "==", "True", ":", "insert_rasterized_contour_plot", "(", "CCBM", ")", "if", "outlines", "==", "True", ":", "CCBM_outlines", "=", "ax", ".", "contour", "(", "xxx", "[", ":", ":", "dx", "]", ",", "y", ",", "Zcbm", ",", "cmap", "=", "cmapCBM", ")", "if", "rasterise", "==", "True", ":", "insert_rasterized_contour_plot", "(", "CCBM_outlines", ")", "if", "engenstyle", "==", "'full'", "and", "engenPlus", "==", "True", ":", "if", "engenlevels", "!=", "None", ":", "CBURN1", "=", "ax", ".", "contourf", "(", "xxx", "[", ":", ":", "dx", "]", ",", "y", ",", "B1", ",", "cmap", "=", "cmapB1", ",", "alpha", "=", "0.5", ",", "locator", "=", "matplotlib", ".", "ticker", ".", "LogLocator", "(", ")", ",", "levels", "=", "engenlevels", ")", "if", "outlines", ":", "CB1_outlines", "=", "ax", ".", "contour", "(", "xxx", "[", ":", ":", "dx", "]", ",", "y", ",", "B1", ",", "cmap", "=", "cmapB1", ",", "alpha", "=", "0.7", ",", "locator", "=", "matplotlib", ".", "ticker", ".", "LogLocator", "(", ")", ",", "levels", "=", "engenlevels", ")", "else", ":", "CBURN1", "=", "ax", ".", "contourf", "(", "xxx", "[", ":", ":", "dx", "]", ",", "y", ",", "B1", ",", "cmap", "=", "cmapB1", ",", "alpha", "=", "0.5", ",", "locator", "=", "matplotlib", ".", "ticker", ".", "LogLocator", "(", ")", ")", "if", "outlines", ":", "CB1_outlines", "=", "ax", ".", "contour", "(", "xxx", "[", ":", ":", "dx", "]", ",", "y", ",", "B1", ",", "cmap", "=", "cmapB1", ",", "alpha", "=", "0.7", ",", "locator", "=", "matplotlib", ".", "ticker", ".", "LogLocator", "(", ")", ")", "CBARBURN1", "=", "pyl", ".", "colorbar", "(", "CBURN1", ")", "CBARBURN1", ".", "set_label", "(", "'$|\\epsilon_\\mathrm{nuc}-\\epsilon_{\\\\nu}| \\; (\\mathrm{erg\\,g}^{-1}\\mathrm{\\,s}^{-1})$'", ",", "fontsize", "=", "fsize", ")", "if", "rasterise", "==", "True", ":", "insert_rasterized_contour_plot", "(", "CBURN1", ")", "if", "outlines", ":", "insert_rasterized_contour_plot", "(", "CB1_outlines", ")", "if", "engenstyle", "==", "'full'", "and", "engenMinus", "==", "True", ":", "CBURN2", "=", "ax", ".", "contourf", "(", "xxx", "[", ":", ":", "dx", "]", ",", "y", ",", "B2", ",", "cmap", "=", "cmapB2", ",", "alpha", "=", "0.5", ",", "locator", "=", "matplotlib", ".", "ticker", ".", "LogLocator", "(", ")", ")", "if", "outlines", ":", "CBURN2_outlines", "=", "ax", ".", "contour", "(", "xxx", "[", ":", ":", "dx", "]", ",", "y", ",", "B2", ",", "cmap", "=", "cmapB2", ",", "alpha", "=", "0.7", ",", "locator", "=", "matplotlib", ".", "ticker", ".", "LogLocator", "(", ")", ")", "CBARBURN2", "=", "pl", ".", "colorbar", "(", "CBURN2", ")", "if", "engenPlus", "==", "False", ":", "CBARBURN2", ".", "set_label", "(", "'$|\\epsilon_\\mathrm{nuc}-\\epsilon_{\\\\nu}| \\; (\\mathrm{erg\\,g}^{-1}\\mathrm{\\,s}^{-1})$'", ",", "fontsize", "=", "fsize", ")", "if", "rasterise", "==", "True", ":", "insert_rasterized_contour_plot", "(", "CBURN2", ")", "if", "outlines", ":", "insert_rasterized_contour_plot", "(", "CB2_outlines", ")", "if", "engenstyle", "==", "'twozone'", "and", "(", "engenPlus", "==", "True", "or", "engenMinus", "==", "True", ")", ":", "ax", ".", "contourf", "(", "xxx", "[", ":", ":", "dx", "]", ",", "y", ",", "V", ",", "cmap", "=", "cmapB1", ",", "alpha", "=", "0.5", ")", "print", "(", "'plotting patches'", ")", "mtot", "=", "self", ".", "get", "(", "'star_mass'", ")", "[", "modstart", ":", "modstop", "]", "[", ":", ":", "dx", "]", "mtot1", "=", "(", "mtot", "-", "ylims1", "[", "0", "]", ")", "*", "float", "(", "yscale", ")", "ax", ".", "plot", "(", "xxx", "[", ":", ":", "dx", "]", ",", "mtot1", ",", "'k-'", ")", "if", "boundaries", "==", "True", ":", "print", "(", "'plotting abund boundaries'", ")", "try", ":", "bound", "=", "self", ".", "get", "(", "'h1_boundary_mass'", ")", "[", "modstart", ":", "modstop", "]", "bound1", "=", "(", "bound", "-", "ylims1", "[", "0", "]", ")", "*", "float", "(", "yscale", ")", "ax", ".", "plot", "(", "xxx", ",", "bound1", ",", "label", "=", "'H boundary'", ",", "linestyle", "=", "'-'", ")", "bound", "=", "self", ".", "get", "(", "'he4_boundary_mass'", ")", "[", "modstart", ":", "modstop", "]", "bound1", "=", "(", "bound", "-", "ylims1", "[", "0", "]", ")", "*", "float", "(", "yscale", ")", "ax", ".", "plot", "(", "xxx", ",", "bound1", ",", "label", "=", "'He boundary'", ",", "linestyle", "=", "'--'", ")", "bound", "=", "self", ".", "get", "(", "'c12_boundary_mass'", ")", "[", "modstart", ":", "modstop", "]", "bound1", "=", "(", "bound", "-", "ylims1", "[", "0", "]", ")", "*", "float", "(", "yscale", ")", "ax", ".", "plot", "(", "xxx", ",", "bound1", ",", "label", "=", "'C boundary'", ",", "linestyle", "=", "'-.'", ")", "except", ":", "try", ":", "bound", "=", "self", ".", "get", "(", "'he_core_mass'", ")", "[", "modstart", ":", "modstop", "]", "bound1", "=", "(", "bound", "-", "ylims1", "[", "0", "]", ")", "*", "float", "(", "yscale", ")", "ax", ".", "plot", "(", "xxx", ",", "bound1", ",", "label", "=", "'H boundary'", ",", "linestyle", "=", "'-'", ")", "bound", "=", "self", ".", "get", "(", "'c_core_mass'", ")", "[", "modstart", ":", "modstop", "]", "-", "ylims", "[", "0", "]", "bound1", "=", "(", "bound", "-", "ylims1", "[", "0", "]", ")", "*", "float", "(", "yscale", ")", "ax", ".", "plot", "(", "xxx", ",", "bound1", ",", "label", "=", "'He boundary'", ",", "linestyle", "=", "'--'", ")", "bound", "=", "self", ".", "get", "(", "'o_core_mass'", ")", "[", "modstart", ":", "modstop", "]", "-", "ylims", "[", "0", "]", "bound1", "=", "(", "bound", "-", "ylims1", "[", "0", "]", ")", "*", "float", "(", "yscale", ")", "ax", ".", "plot", "(", "xxx", ",", "bound1", ",", "label", "=", "'C boundary'", ",", "linestyle", "=", "'-.'", ")", "bound", "=", "self", ".", "get", "(", "'si_core_mass'", ")", "[", "modstart", ":", "modstop", "]", "-", "ylims", "[", "0", "]", "bound1", "=", "(", "bound", "-", "ylims1", "[", "0", "]", ")", "*", "float", "(", "yscale", ")", "ax", ".", "plot", "(", "xxx", ",", "bound1", ",", "label", "=", "'C boundary'", ",", "linestyle", "=", "'-.'", ")", "bound", "=", "self", ".", "get", "(", "'fe_core_mass'", ")", "[", "modstart", ":", "modstop", "]", "-", "ylims", "[", "0", "]", "bound1", "=", "(", "bound", "-", "ylims1", "[", "0", "]", ")", "*", "float", "(", "yscale", ")", "ax", ".", "plot", "(", "xxx", ",", "bound1", ",", "label", "=", "'C boundary'", ",", "linestyle", "=", "'-.'", ")", "except", ":", "# print 'problem to plot boundaries for this plot'", "pass", "ax", ".", "axis", "(", "[", "xlims", "[", "0", "]", ",", "xlims", "[", "1", "]", ",", "ylims", "[", "0", "]", ",", "ylims", "[", "1", "]", "]", ")", "if", "plot_radius", "==", "True", ":", "ax2", "=", "pyl", ".", "twinx", "(", ")", "ax2", ".", "plot", "(", "xxx", ",", "np", ".", "log10", "(", "self", ".", "get", "(", "'he4_boundary_radius'", ")", "[", "modstart", ":", "modstop", "]", ")", ",", "label", "=", "'He boundary radius'", ",", "color", "=", "'k'", ",", "linewidth", "=", "1.", ",", "linestyle", "=", "'-.'", ")", "ax2", ".", "plot", "(", "xxx", ",", "self", ".", "get", "(", "'log_R'", ")", "[", "modstart", ":", "modstop", "]", ",", "label", "=", "'radius'", ",", "color", "=", "'k'", ",", "linewidth", "=", "1.", ",", "linestyle", "=", "'-.'", ")", "ax2", ".", "set_ylabel", "(", "'log(radius)'", ")", "if", "rad_lines", "==", "True", ":", "ax2", "=", "pyl", ".", "twinx", "(", ")", "for", "i", "in", "range", "(", "len", "(", "masses", ")", ")", ":", "ax2", ".", "plot", "(", "modno", ",", "np", ".", "log10", "(", "rads", "[", "i", "]", ")", ",", "color", "=", "'k'", ")", "if", "outfile", "[", "-", "3", ":", "]", "==", "'png'", ":", "fig", ".", "savefig", "(", "outfile", ",", "dpi", "=", "300", ")", "elif", "outfile", "[", "-", "3", ":", "]", "==", "'eps'", ":", "fig", ".", "savefig", "(", "outfile", ",", "format", "=", "'eps'", ")", "elif", "outfile", "[", "-", "3", ":", "]", "==", "'pdf'", ":", "fig", ".", "savefig", "(", "outfile", ",", "format", "=", "'pdf'", ")", "if", "showfig", "==", "True", ":", "pyl", ".", "show", "(", ")" ]
This function creates a Kippenhahn plot with energy flux using contours. This plot uses mixing_regions and burning_regions written to your history.data or star.log. Set both variables in the log_columns.list file to 20 as a start. The output log file should then contain columns called "mix_type_n", "mix_qtop_n", "burn_type_n" and "burn_qtop_n". The number of columns (i.e. the biggest value of n) is what goes in the arguments as mix_zones and burn_zones. DO NOT WORRY! if you do not have these columns, just leave the default values alone and the script should recognise that you do not have these columns and make the most detailed plot that is available to you. Defaults are set to get some plot, that may not look great if you zoom in interactively. Play with xres and yres as well as setting the xlims to ylims to the region you are interested in. Parameters ---------- ifig : integer, optional Figure frame number. The default is 110. modstart : integer, optional Model from which you want to plot (be careful if your history.data or star.log output is sparse...). If it is 0 then it starts from the beginning, works even if log_cnt > 1. The default is 0. modstop : integer, optional Model to which you wish to plot, -1 corresponds to end [if log_cnt>1, devide modstart and modstop by log_cnt, this needs to be improved! SJ: this should be ficed now]. The defalut is -1. t0_model : integer, optional Model number from which to reset the time to 0. Typically, if modstart!=0, t0_model=modstart is a good choice, but we leave the choice to the user in case the time is wished to start from 0 at a different key point of the evolution. The default value is 0. outfile : sting, optional 'filename + extension' where you want to save the figure. The defalut is "out.png". xlims, ylims : list, optional Plot limits, however these are somewhat obsolete now that we have modstart and modstop. Leaving them as 0. is probably no slower, and you can always zoom in afterwards in mpl. ylims is important for well resolved thermal pulse etc plots; it's best to get the upper and lower limits of he-intershell using s.kippenhahn_CO(1,'model') first. The default is [0., 0.]. xres, yres : integer, optional plot resolution. Needless to say that increasing these values will yield a nicer plot with some slow-down in plotting time. You will most commonly change xres. For a prelim plot, try xres~200, then bump it up to anywhere from 1000-10000 for real nicely resolved, publication quality plots. The default is 1000. ixaxis : string, optional Either 'log_time_left', 'age', or 'model_number'. The default is "model_number". mix_zones, burn_zones : integer, optional As described above, if you have more detailed output about your convection and energy generation boundaries in columns mix_type_n, mix_qtop_n, burn_type_n and burn_qtop_n, you need to specify the total number of columns for mixing zones and burning zones that you have. Can't work this out from your history.data or star.log file? Check the history_columns.list that you used, it'll be the number after "mixing regions" and "burning regions". Can't see these columns? leave it and 2 conv zones and 2 burn zones will be drawn using other data that you certainly should have in your history.data or star.log file. The defalut for both is 20. plot_radius : boolean, optional Whether on a second y-axis you want to plot the radius of the surface and the he-free core. The default is False. engenPlus : boolean Plot energy generation contours for eps_nuc>0. The default is True. endgenMinus : boolean, optional Plot energy generation contours for eos_nuc<0. The default is True. landscape_plot : boolean, optionla The default is False. rad_lines : boolean, optional The deafault is False. profiles : list, optional The default is []. showfig : boolean, optional The default is True. outlines : boolean, optional Whether or not to plot outlines of conv zones in darker colour. boundaries : boolean, optional Whether or not to plot H-, He- and C-free boundaries. c12_boundary : boolean, optional The default is False. rasterise : boolean, optional Whether or not to rasterise the contour regions to make smaller vector graphics figures. The default is False. yscale : string, optional Re-scale the y-axis by this amount engenlevels : list Give cusstom levels to the engenPlus contour. If None, the levels are chosen automatically. The default is None. CBM : boolean, optional plot contours for where CBM is active? fsize : integer font size for labels Notes ----- The parameter xlims is depricated.
[ "This", "function", "creates", "a", "Kippenhahn", "plot", "with", "energy", "flux", "using", "contours", "." ]
python
train
ArduPilot/MAVProxy
MAVProxy/modules/lib/wxhorizon_ui.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/lib/wxhorizon_ui.py#L569-L675
def on_timer(self, event): '''Main Loop.''' state = self.state self.loopStartTime = time.time() if state.close_event.wait(0.001): self.timer.Stop() self.Destroy() return # Check for resizing self.checkReszie() if self.resized: self.on_idle(0) # Get attitude information while state.child_pipe_recv.poll(): objList = state.child_pipe_recv.recv() for obj in objList: self.calcFontScaling() if isinstance(obj,Attitude): self.oldRoll = self.roll self.pitch = obj.pitch*180/math.pi self.roll = obj.roll*180/math.pi self.yaw = obj.yaw*180/math.pi # Update Roll, Pitch, Yaw Text Text self.updateRPYText() # Recalculate Horizon Polygons self.calcHorizonPoints() # Update Pitch Markers self.adjustPitchmarkers() elif isinstance(obj,VFR_HUD): self.heading = obj.heading self.airspeed = obj.airspeed self.climbRate = obj.climbRate # Update Airpseed, Altitude, Climb Rate Locations self.updateAARText() # Update Heading North Pointer self.adjustHeadingPointer() self.adjustNorthPointer() elif isinstance(obj,Global_Position_INT): self.relAlt = obj.relAlt self.relAltTime = obj.curTime # Update Airpseed, Altitude, Climb Rate Locations self.updateAARText() # Update Altitude History self.updateAltHistory() elif isinstance(obj,BatteryInfo): self.voltage = obj.voltage self.current = obj.current self.batRemain = obj.batRemain # Update Battery Bar self.updateBatteryBar() elif isinstance(obj,FlightState): self.mode = obj.mode self.armed = obj.armState # Update Mode and Arm State Text self.updateStateText() elif isinstance(obj,WaypointInfo): self.currentWP = obj.current self.finalWP = obj.final self.wpDist = obj.currentDist self.nextWPTime = obj.nextWPTime if obj.wpBearing < 0.0: self.wpBearing = obj.wpBearing + 360 else: self.wpBearing = obj.wpBearing # Update waypoint text self.updateWPText() # Adjust Waypoint Pointer self.adjustWPPointer() elif isinstance(obj, FPS): # Update fps target self.fps = obj.fps # Quit Drawing if too early if (time.time() > self.nextTime): # Update Matplotlib Plot self.canvas.draw() self.canvas.Refresh() self.Refresh() self.Update() # Calculate next frame time if (self.fps > 0): fpsTime = 1/self.fps self.nextTime = fpsTime + self.loopStartTime else: self.nextTime = time.time()
[ "def", "on_timer", "(", "self", ",", "event", ")", ":", "state", "=", "self", ".", "state", "self", ".", "loopStartTime", "=", "time", ".", "time", "(", ")", "if", "state", ".", "close_event", ".", "wait", "(", "0.001", ")", ":", "self", ".", "timer", ".", "Stop", "(", ")", "self", ".", "Destroy", "(", ")", "return", "# Check for resizing", "self", ".", "checkReszie", "(", ")", "if", "self", ".", "resized", ":", "self", ".", "on_idle", "(", "0", ")", "# Get attitude information", "while", "state", ".", "child_pipe_recv", ".", "poll", "(", ")", ":", "objList", "=", "state", ".", "child_pipe_recv", ".", "recv", "(", ")", "for", "obj", "in", "objList", ":", "self", ".", "calcFontScaling", "(", ")", "if", "isinstance", "(", "obj", ",", "Attitude", ")", ":", "self", ".", "oldRoll", "=", "self", ".", "roll", "self", ".", "pitch", "=", "obj", ".", "pitch", "*", "180", "/", "math", ".", "pi", "self", ".", "roll", "=", "obj", ".", "roll", "*", "180", "/", "math", ".", "pi", "self", ".", "yaw", "=", "obj", ".", "yaw", "*", "180", "/", "math", ".", "pi", "# Update Roll, Pitch, Yaw Text Text", "self", ".", "updateRPYText", "(", ")", "# Recalculate Horizon Polygons", "self", ".", "calcHorizonPoints", "(", ")", "# Update Pitch Markers", "self", ".", "adjustPitchmarkers", "(", ")", "elif", "isinstance", "(", "obj", ",", "VFR_HUD", ")", ":", "self", ".", "heading", "=", "obj", ".", "heading", "self", ".", "airspeed", "=", "obj", ".", "airspeed", "self", ".", "climbRate", "=", "obj", ".", "climbRate", "# Update Airpseed, Altitude, Climb Rate Locations", "self", ".", "updateAARText", "(", ")", "# Update Heading North Pointer", "self", ".", "adjustHeadingPointer", "(", ")", "self", ".", "adjustNorthPointer", "(", ")", "elif", "isinstance", "(", "obj", ",", "Global_Position_INT", ")", ":", "self", ".", "relAlt", "=", "obj", ".", "relAlt", "self", ".", "relAltTime", "=", "obj", ".", "curTime", "# Update Airpseed, Altitude, Climb Rate Locations", "self", ".", "updateAARText", "(", ")", "# Update Altitude History", "self", ".", "updateAltHistory", "(", ")", "elif", "isinstance", "(", "obj", ",", "BatteryInfo", ")", ":", "self", ".", "voltage", "=", "obj", ".", "voltage", "self", ".", "current", "=", "obj", ".", "current", "self", ".", "batRemain", "=", "obj", ".", "batRemain", "# Update Battery Bar", "self", ".", "updateBatteryBar", "(", ")", "elif", "isinstance", "(", "obj", ",", "FlightState", ")", ":", "self", ".", "mode", "=", "obj", ".", "mode", "self", ".", "armed", "=", "obj", ".", "armState", "# Update Mode and Arm State Text", "self", ".", "updateStateText", "(", ")", "elif", "isinstance", "(", "obj", ",", "WaypointInfo", ")", ":", "self", ".", "currentWP", "=", "obj", ".", "current", "self", ".", "finalWP", "=", "obj", ".", "final", "self", ".", "wpDist", "=", "obj", ".", "currentDist", "self", ".", "nextWPTime", "=", "obj", ".", "nextWPTime", "if", "obj", ".", "wpBearing", "<", "0.0", ":", "self", ".", "wpBearing", "=", "obj", ".", "wpBearing", "+", "360", "else", ":", "self", ".", "wpBearing", "=", "obj", ".", "wpBearing", "# Update waypoint text", "self", ".", "updateWPText", "(", ")", "# Adjust Waypoint Pointer", "self", ".", "adjustWPPointer", "(", ")", "elif", "isinstance", "(", "obj", ",", "FPS", ")", ":", "# Update fps target", "self", ".", "fps", "=", "obj", ".", "fps", "# Quit Drawing if too early", "if", "(", "time", ".", "time", "(", ")", ">", "self", ".", "nextTime", ")", ":", "# Update Matplotlib Plot", "self", ".", "canvas", ".", "draw", "(", ")", "self", ".", "canvas", ".", "Refresh", "(", ")", "self", ".", "Refresh", "(", ")", "self", ".", "Update", "(", ")", "# Calculate next frame time", "if", "(", "self", ".", "fps", ">", "0", ")", ":", "fpsTime", "=", "1", "/", "self", ".", "fps", "self", ".", "nextTime", "=", "fpsTime", "+", "self", ".", "loopStartTime", "else", ":", "self", ".", "nextTime", "=", "time", ".", "time", "(", ")" ]
Main Loop.
[ "Main", "Loop", "." ]
python
train
DataONEorg/d1_python
lib_common/src/d1_common/checksum.py
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/checksum.py#L65-L84
def create_checksum_object_from_iterator( itr, algorithm=d1_common.const.DEFAULT_CHECKSUM_ALGORITHM ): """Calculate the checksum of an iterator. Args: itr: iterable Object which supports the iterator protocol. algorithm: str Checksum algorithm, ``MD5`` or ``SHA1`` / ``SHA-1``. Returns: Populated Checksum PyXB object. """ checksum_str = calculate_checksum_on_iterator(itr, algorithm) checksum_pyxb = d1_common.types.dataoneTypes.checksum(checksum_str) checksum_pyxb.algorithm = algorithm return checksum_pyxb
[ "def", "create_checksum_object_from_iterator", "(", "itr", ",", "algorithm", "=", "d1_common", ".", "const", ".", "DEFAULT_CHECKSUM_ALGORITHM", ")", ":", "checksum_str", "=", "calculate_checksum_on_iterator", "(", "itr", ",", "algorithm", ")", "checksum_pyxb", "=", "d1_common", ".", "types", ".", "dataoneTypes", ".", "checksum", "(", "checksum_str", ")", "checksum_pyxb", ".", "algorithm", "=", "algorithm", "return", "checksum_pyxb" ]
Calculate the checksum of an iterator. Args: itr: iterable Object which supports the iterator protocol. algorithm: str Checksum algorithm, ``MD5`` or ``SHA1`` / ``SHA-1``. Returns: Populated Checksum PyXB object.
[ "Calculate", "the", "checksum", "of", "an", "iterator", "." ]
python
train
AkihikoITOH/capybara
capybara/virtualenv/lib/python2.7/site-packages/setuptools/__init__.py
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/setuptools/__init__.py#L136-L147
def findall(dir = os.curdir): """Find all files under 'dir' and return the list of full filenames (relative to 'dir'). """ all_files = [] for base, dirs, files in os.walk(dir, followlinks=True): if base==os.curdir or base.startswith(os.curdir+os.sep): base = base[2:] if base: files = [os.path.join(base, f) for f in files] all_files.extend(filter(os.path.isfile, files)) return all_files
[ "def", "findall", "(", "dir", "=", "os", ".", "curdir", ")", ":", "all_files", "=", "[", "]", "for", "base", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "dir", ",", "followlinks", "=", "True", ")", ":", "if", "base", "==", "os", ".", "curdir", "or", "base", ".", "startswith", "(", "os", ".", "curdir", "+", "os", ".", "sep", ")", ":", "base", "=", "base", "[", "2", ":", "]", "if", "base", ":", "files", "=", "[", "os", ".", "path", ".", "join", "(", "base", ",", "f", ")", "for", "f", "in", "files", "]", "all_files", ".", "extend", "(", "filter", "(", "os", ".", "path", ".", "isfile", ",", "files", ")", ")", "return", "all_files" ]
Find all files under 'dir' and return the list of full filenames (relative to 'dir').
[ "Find", "all", "files", "under", "dir", "and", "return", "the", "list", "of", "full", "filenames", "(", "relative", "to", "dir", ")", "." ]
python
test
zero-os/0-core
client/py-client/zeroos/core0/client/client.py
https://github.com/zero-os/0-core/blob/69f6ce845ab8b8ad805a79a415227e7ac566c218/client/py-client/zeroos/core0/client/client.py#L1143-L1174
def nic_add(self, container, nic): """ Hot plug a nic into a container :param container: container ID :param nic: { 'type': nic_type # one of default, bridge, zerotier, macvlan, passthrough, vlan, or vxlan (note, vlan and vxlan only supported by ovs) 'id': id # depends on the type bridge: bridge name, zerotier: network id, macvlan: the parent link name, passthrough: the link name, vlan: the vlan tag, vxlan: the vxlan id 'name': name of the nic inside the container (ignored in zerotier type) 'hwaddr': Mac address of nic. 'config': { # config is only honored for bridge, vlan, and vxlan types 'dhcp': bool, 'cidr': static_ip # ip/mask 'gateway': gateway 'dns': [dns] } } :return: """ args = { 'container': container, 'nic': nic } self._nic_add.check(args) return self._client.json('corex.nic-add', args)
[ "def", "nic_add", "(", "self", ",", "container", ",", "nic", ")", ":", "args", "=", "{", "'container'", ":", "container", ",", "'nic'", ":", "nic", "}", "self", ".", "_nic_add", ".", "check", "(", "args", ")", "return", "self", ".", "_client", ".", "json", "(", "'corex.nic-add'", ",", "args", ")" ]
Hot plug a nic into a container :param container: container ID :param nic: { 'type': nic_type # one of default, bridge, zerotier, macvlan, passthrough, vlan, or vxlan (note, vlan and vxlan only supported by ovs) 'id': id # depends on the type bridge: bridge name, zerotier: network id, macvlan: the parent link name, passthrough: the link name, vlan: the vlan tag, vxlan: the vxlan id 'name': name of the nic inside the container (ignored in zerotier type) 'hwaddr': Mac address of nic. 'config': { # config is only honored for bridge, vlan, and vxlan types 'dhcp': bool, 'cidr': static_ip # ip/mask 'gateway': gateway 'dns': [dns] } } :return:
[ "Hot", "plug", "a", "nic", "into", "a", "container" ]
python
train
hyperledger/sawtooth-core
validator/sawtooth_validator/execution/scheduler_parallel.py
https://github.com/hyperledger/sawtooth-core/blob/8cf473bc2207e51f02bd182d825158a57d72b098/validator/sawtooth_validator/execution/scheduler_parallel.py#L631-L665
def _get_contexts_for_squash(self, batch_signature): """Starting with the batch referenced by batch_signature, iterate back through the batches and for each valid batch collect the context_id. At the end remove contexts for txns that are other txn's predecessors. Args: batch_signature (str): The batch to start from, moving back through the batches in the scheduler Returns: (list): Context ids that haven't been previous base contexts. """ batch = self._batches_by_id[batch_signature].batch index = self._batches.index(batch) contexts = [] txns_added_predecessors = [] for b in self._batches[index::-1]: batch_is_valid = True contexts_from_batch = [] for txn in b.transactions[::-1]: result = self._txn_results[txn.header_signature] if not result.is_valid: batch_is_valid = False break else: txn_id = txn.header_signature if txn_id not in txns_added_predecessors: txns_added_predecessors.append( self._txn_predecessors[txn_id]) contexts_from_batch.append(result.context_id) if batch_is_valid: contexts.extend(contexts_from_batch) return contexts
[ "def", "_get_contexts_for_squash", "(", "self", ",", "batch_signature", ")", ":", "batch", "=", "self", ".", "_batches_by_id", "[", "batch_signature", "]", ".", "batch", "index", "=", "self", ".", "_batches", ".", "index", "(", "batch", ")", "contexts", "=", "[", "]", "txns_added_predecessors", "=", "[", "]", "for", "b", "in", "self", ".", "_batches", "[", "index", ":", ":", "-", "1", "]", ":", "batch_is_valid", "=", "True", "contexts_from_batch", "=", "[", "]", "for", "txn", "in", "b", ".", "transactions", "[", ":", ":", "-", "1", "]", ":", "result", "=", "self", ".", "_txn_results", "[", "txn", ".", "header_signature", "]", "if", "not", "result", ".", "is_valid", ":", "batch_is_valid", "=", "False", "break", "else", ":", "txn_id", "=", "txn", ".", "header_signature", "if", "txn_id", "not", "in", "txns_added_predecessors", ":", "txns_added_predecessors", ".", "append", "(", "self", ".", "_txn_predecessors", "[", "txn_id", "]", ")", "contexts_from_batch", ".", "append", "(", "result", ".", "context_id", ")", "if", "batch_is_valid", ":", "contexts", ".", "extend", "(", "contexts_from_batch", ")", "return", "contexts" ]
Starting with the batch referenced by batch_signature, iterate back through the batches and for each valid batch collect the context_id. At the end remove contexts for txns that are other txn's predecessors. Args: batch_signature (str): The batch to start from, moving back through the batches in the scheduler Returns: (list): Context ids that haven't been previous base contexts.
[ "Starting", "with", "the", "batch", "referenced", "by", "batch_signature", "iterate", "back", "through", "the", "batches", "and", "for", "each", "valid", "batch", "collect", "the", "context_id", ".", "At", "the", "end", "remove", "contexts", "for", "txns", "that", "are", "other", "txn", "s", "predecessors", "." ]
python
train
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/service_reflection.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/service_reflection.py#L251-L263
def BuildServiceStub(self, cls): """Constructs the stub class. Args: cls: The class that will be constructed. """ def _ServiceStubInit(stub, rpc_channel): stub.rpc_channel = rpc_channel self.cls = cls cls.__init__ = _ServiceStubInit for method in self.descriptor.methods: setattr(cls, method.name, self._GenerateStubMethod(method))
[ "def", "BuildServiceStub", "(", "self", ",", "cls", ")", ":", "def", "_ServiceStubInit", "(", "stub", ",", "rpc_channel", ")", ":", "stub", ".", "rpc_channel", "=", "rpc_channel", "self", ".", "cls", "=", "cls", "cls", ".", "__init__", "=", "_ServiceStubInit", "for", "method", "in", "self", ".", "descriptor", ".", "methods", ":", "setattr", "(", "cls", ",", "method", ".", "name", ",", "self", ".", "_GenerateStubMethod", "(", "method", ")", ")" ]
Constructs the stub class. Args: cls: The class that will be constructed.
[ "Constructs", "the", "stub", "class", "." ]
python
train
pyviz/holoviews
holoviews/ipython/display_hooks.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/ipython/display_hooks.py#L230-L266
def display(obj, raw_output=False, **kwargs): """ Renders any HoloViews object to HTML and displays it using the IPython display function. If raw is enabled the raw HTML is returned instead of displaying it directly. """ if not Store.loaded_backends() and isinstance(obj, Dimensioned): raise RuntimeError('To use display on a HoloViews object ensure ' 'a backend is loaded using the holoviews ' 'extension.') raw = True if isinstance(obj, GridSpace): with option_state(obj): output = grid_display(obj) elif isinstance(obj, (CompositeOverlay, ViewableElement)): with option_state(obj): output = element_display(obj) elif isinstance(obj, (Layout, NdLayout, AdjointLayout)): with option_state(obj): output = layout_display(obj) elif isinstance(obj, (HoloMap, DynamicMap)): with option_state(obj): output = map_display(obj) elif isinstance(obj, Plot): output = render(obj) else: output = obj raw = kwargs.pop('raw', False) if raw_output: return output elif isinstance(output, tuple): data, metadata = output else: data, metadata = output, {} return IPython.display.display(data, raw=raw, metadata=metadata, **kwargs)
[ "def", "display", "(", "obj", ",", "raw_output", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "not", "Store", ".", "loaded_backends", "(", ")", "and", "isinstance", "(", "obj", ",", "Dimensioned", ")", ":", "raise", "RuntimeError", "(", "'To use display on a HoloViews object ensure '", "'a backend is loaded using the holoviews '", "'extension.'", ")", "raw", "=", "True", "if", "isinstance", "(", "obj", ",", "GridSpace", ")", ":", "with", "option_state", "(", "obj", ")", ":", "output", "=", "grid_display", "(", "obj", ")", "elif", "isinstance", "(", "obj", ",", "(", "CompositeOverlay", ",", "ViewableElement", ")", ")", ":", "with", "option_state", "(", "obj", ")", ":", "output", "=", "element_display", "(", "obj", ")", "elif", "isinstance", "(", "obj", ",", "(", "Layout", ",", "NdLayout", ",", "AdjointLayout", ")", ")", ":", "with", "option_state", "(", "obj", ")", ":", "output", "=", "layout_display", "(", "obj", ")", "elif", "isinstance", "(", "obj", ",", "(", "HoloMap", ",", "DynamicMap", ")", ")", ":", "with", "option_state", "(", "obj", ")", ":", "output", "=", "map_display", "(", "obj", ")", "elif", "isinstance", "(", "obj", ",", "Plot", ")", ":", "output", "=", "render", "(", "obj", ")", "else", ":", "output", "=", "obj", "raw", "=", "kwargs", ".", "pop", "(", "'raw'", ",", "False", ")", "if", "raw_output", ":", "return", "output", "elif", "isinstance", "(", "output", ",", "tuple", ")", ":", "data", ",", "metadata", "=", "output", "else", ":", "data", ",", "metadata", "=", "output", ",", "{", "}", "return", "IPython", ".", "display", ".", "display", "(", "data", ",", "raw", "=", "raw", ",", "metadata", "=", "metadata", ",", "*", "*", "kwargs", ")" ]
Renders any HoloViews object to HTML and displays it using the IPython display function. If raw is enabled the raw HTML is returned instead of displaying it directly.
[ "Renders", "any", "HoloViews", "object", "to", "HTML", "and", "displays", "it", "using", "the", "IPython", "display", "function", ".", "If", "raw", "is", "enabled", "the", "raw", "HTML", "is", "returned", "instead", "of", "displaying", "it", "directly", "." ]
python
train
bunq/sdk_python
bunq/sdk/exception_factory.py
https://github.com/bunq/sdk_python/blob/da6c9b83e6d83ee8062617f53c6eb7293c0d863d/bunq/sdk/exception_factory.py#L30-L98
def create_exception_for_response( cls, response_code, messages, response_id ): """ :type response_code: int :type messages: list[str] :type response_id: str :return: The exception according to the status code. :rtype: ApiException """ error_message = cls._generate_message_error( response_code, messages, response_id ) if response_code == cls._HTTP_RESPONSE_CODE_BAD_REQUEST: return BadRequestException( error_message, response_code, response_id ) if response_code == cls._HTTP_RESPONSE_CODE_UNAUTHORIZED: return UnauthorizedException( error_message, response_code, response_id ) if response_code == cls._HTTP_RESPONSE_CODE_FORBIDDEN: return ForbiddenException( error_message, response_code, response_id ) if response_code == cls._HTTP_RESPONSE_CODE_NOT_FOUND: return NotFoundException( error_message, response_code, response_id ) if response_code == cls._HTTP_RESPONSE_CODE_METHOD_NOT_ALLOWED: return MethodNotAllowedException( error_message, response_code, response_id ) if response_code == cls._HTTP_RESPONSE_CODE_TOO_MANY_REQUESTS: return TooManyRequestsException( error_message, response_code, response_id ) if response_code == cls._HTTP_RESPONSE_CODE_INTERNAL_SERVER_ERROR: return PleaseContactBunqException( error_message, response_code, response_id ) return UnknownApiErrorException( error_message, response_code, response_id )
[ "def", "create_exception_for_response", "(", "cls", ",", "response_code", ",", "messages", ",", "response_id", ")", ":", "error_message", "=", "cls", ".", "_generate_message_error", "(", "response_code", ",", "messages", ",", "response_id", ")", "if", "response_code", "==", "cls", ".", "_HTTP_RESPONSE_CODE_BAD_REQUEST", ":", "return", "BadRequestException", "(", "error_message", ",", "response_code", ",", "response_id", ")", "if", "response_code", "==", "cls", ".", "_HTTP_RESPONSE_CODE_UNAUTHORIZED", ":", "return", "UnauthorizedException", "(", "error_message", ",", "response_code", ",", "response_id", ")", "if", "response_code", "==", "cls", ".", "_HTTP_RESPONSE_CODE_FORBIDDEN", ":", "return", "ForbiddenException", "(", "error_message", ",", "response_code", ",", "response_id", ")", "if", "response_code", "==", "cls", ".", "_HTTP_RESPONSE_CODE_NOT_FOUND", ":", "return", "NotFoundException", "(", "error_message", ",", "response_code", ",", "response_id", ")", "if", "response_code", "==", "cls", ".", "_HTTP_RESPONSE_CODE_METHOD_NOT_ALLOWED", ":", "return", "MethodNotAllowedException", "(", "error_message", ",", "response_code", ",", "response_id", ")", "if", "response_code", "==", "cls", ".", "_HTTP_RESPONSE_CODE_TOO_MANY_REQUESTS", ":", "return", "TooManyRequestsException", "(", "error_message", ",", "response_code", ",", "response_id", ")", "if", "response_code", "==", "cls", ".", "_HTTP_RESPONSE_CODE_INTERNAL_SERVER_ERROR", ":", "return", "PleaseContactBunqException", "(", "error_message", ",", "response_code", ",", "response_id", ")", "return", "UnknownApiErrorException", "(", "error_message", ",", "response_code", ",", "response_id", ")" ]
:type response_code: int :type messages: list[str] :type response_id: str :return: The exception according to the status code. :rtype: ApiException
[ ":", "type", "response_code", ":", "int", ":", "type", "messages", ":", "list", "[", "str", "]", ":", "type", "response_id", ":", "str" ]
python
train
OCR-D/core
ocrd_validators/ocrd_validators/json_validator.py
https://github.com/OCR-D/core/blob/57e68c578526cb955fd2e368207f5386c459d91d/ocrd_validators/ocrd_validators/json_validator.py#L65-L78
def _validate(self, obj): """ Do the actual validation Arguments: obj (dict): object to validate Returns: ValidationReport """ report = ValidationReport() if not self.validator.is_valid(obj): for v in self.validator.iter_errors(obj): report.add_error("[%s] %s" % ('.'.join(str(vv) for vv in v.path), v.message)) return report
[ "def", "_validate", "(", "self", ",", "obj", ")", ":", "report", "=", "ValidationReport", "(", ")", "if", "not", "self", ".", "validator", ".", "is_valid", "(", "obj", ")", ":", "for", "v", "in", "self", ".", "validator", ".", "iter_errors", "(", "obj", ")", ":", "report", ".", "add_error", "(", "\"[%s] %s\"", "%", "(", "'.'", ".", "join", "(", "str", "(", "vv", ")", "for", "vv", "in", "v", ".", "path", ")", ",", "v", ".", "message", ")", ")", "return", "report" ]
Do the actual validation Arguments: obj (dict): object to validate Returns: ValidationReport
[ "Do", "the", "actual", "validation" ]
python
train
tensorflow/tensor2tensor
tensor2tensor/layers/discretization.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/discretization.py#L1429-L1437
def isemhash_unbottleneck(x, hidden_size, isemhash_filter_size_multiplier=1.0): """Improved semantic hashing un-bottleneck.""" filter_size = int(hidden_size * isemhash_filter_size_multiplier) x = 0.5 * (x - 1.0) # Move from [-1, 1] to [0, 1]. with tf.variable_scope("isemhash_unbottleneck"): h1a = tf.layers.dense(x, filter_size, name="hidden1a") h1b = tf.layers.dense(1.0 - x, filter_size, name="hidden1b") h2 = tf.layers.dense(tf.nn.relu(h1a + h1b), filter_size, name="hidden2") return tf.layers.dense(tf.nn.relu(h2), hidden_size, name="final")
[ "def", "isemhash_unbottleneck", "(", "x", ",", "hidden_size", ",", "isemhash_filter_size_multiplier", "=", "1.0", ")", ":", "filter_size", "=", "int", "(", "hidden_size", "*", "isemhash_filter_size_multiplier", ")", "x", "=", "0.5", "*", "(", "x", "-", "1.0", ")", "# Move from [-1, 1] to [0, 1].", "with", "tf", ".", "variable_scope", "(", "\"isemhash_unbottleneck\"", ")", ":", "h1a", "=", "tf", ".", "layers", ".", "dense", "(", "x", ",", "filter_size", ",", "name", "=", "\"hidden1a\"", ")", "h1b", "=", "tf", ".", "layers", ".", "dense", "(", "1.0", "-", "x", ",", "filter_size", ",", "name", "=", "\"hidden1b\"", ")", "h2", "=", "tf", ".", "layers", ".", "dense", "(", "tf", ".", "nn", ".", "relu", "(", "h1a", "+", "h1b", ")", ",", "filter_size", ",", "name", "=", "\"hidden2\"", ")", "return", "tf", ".", "layers", ".", "dense", "(", "tf", ".", "nn", ".", "relu", "(", "h2", ")", ",", "hidden_size", ",", "name", "=", "\"final\"", ")" ]
Improved semantic hashing un-bottleneck.
[ "Improved", "semantic", "hashing", "un", "-", "bottleneck", "." ]
python
train
uw-it-aca/uw-restclients-nws
uw_nws/__init__.py
https://github.com/uw-it-aca/uw-restclients-nws/blob/ec6fd14342ffc883d14bcb53b2fe9bc288696027/uw_nws/__init__.py#L274-L291
def search_subscriptions(self, **kwargs): """ Search for all subscriptions by parameters """ params = [(key, kwargs[key]) for key in sorted(kwargs.keys())] url = "/notification/v1/subscription?{}".format( urlencode(params, doseq=True)) response = NWS_DAO().getURL(url, self._read_headers) if response.status != 200: raise DataFailureException(url, response.status, response.data) data = json.loads(response.data) subscriptions = [] for datum in data.get("Subscriptions", []): subscriptions.append(self._subscription_from_json(datum)) return subscriptions
[ "def", "search_subscriptions", "(", "self", ",", "*", "*", "kwargs", ")", ":", "params", "=", "[", "(", "key", ",", "kwargs", "[", "key", "]", ")", "for", "key", "in", "sorted", "(", "kwargs", ".", "keys", "(", ")", ")", "]", "url", "=", "\"/notification/v1/subscription?{}\"", ".", "format", "(", "urlencode", "(", "params", ",", "doseq", "=", "True", ")", ")", "response", "=", "NWS_DAO", "(", ")", ".", "getURL", "(", "url", ",", "self", ".", "_read_headers", ")", "if", "response", ".", "status", "!=", "200", ":", "raise", "DataFailureException", "(", "url", ",", "response", ".", "status", ",", "response", ".", "data", ")", "data", "=", "json", ".", "loads", "(", "response", ".", "data", ")", "subscriptions", "=", "[", "]", "for", "datum", "in", "data", ".", "get", "(", "\"Subscriptions\"", ",", "[", "]", ")", ":", "subscriptions", ".", "append", "(", "self", ".", "_subscription_from_json", "(", "datum", ")", ")", "return", "subscriptions" ]
Search for all subscriptions by parameters
[ "Search", "for", "all", "subscriptions", "by", "parameters" ]
python
train
swharden/SWHLab
swhlab/core.py
https://github.com/swharden/SWHLab/blob/a86c3c65323cec809a4bd4f81919644927094bf5/swhlab/core.py#L175-L179
def setsweeps(self): """iterate over every sweep""" for sweep in range(self.sweeps): self.setsweep(sweep) yield self.sweep
[ "def", "setsweeps", "(", "self", ")", ":", "for", "sweep", "in", "range", "(", "self", ".", "sweeps", ")", ":", "self", ".", "setsweep", "(", "sweep", ")", "yield", "self", ".", "sweep" ]
iterate over every sweep
[ "iterate", "over", "every", "sweep" ]
python
valid
google/grr
grr/core/grr_response_core/lib/rdfvalues/structs.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/lib/rdfvalues/structs.py#L947-L950
def ConvertToWireFormat(self, value): """Encode the nested protobuf into wire format.""" output = _SerializeEntries(_GetOrderedEntries(value.GetRawData())) return (self.encoded_tag, VarintEncode(len(output)), output)
[ "def", "ConvertToWireFormat", "(", "self", ",", "value", ")", ":", "output", "=", "_SerializeEntries", "(", "_GetOrderedEntries", "(", "value", ".", "GetRawData", "(", ")", ")", ")", "return", "(", "self", ".", "encoded_tag", ",", "VarintEncode", "(", "len", "(", "output", ")", ")", ",", "output", ")" ]
Encode the nested protobuf into wire format.
[ "Encode", "the", "nested", "protobuf", "into", "wire", "format", "." ]
python
train
Becksteinlab/GromacsWrapper
gromacs/cbook.py
https://github.com/Becksteinlab/GromacsWrapper/blob/d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9/gromacs/cbook.py#L1772-L1847
def fit(self, xy=False, **kwargs): """Write xtc that is fitted to the tpr reference structure. Runs :class:`gromacs.tools.trjconv` with appropriate arguments for fitting. The most important *kwargs* are listed here but in most cases the defaults should work. Note that the default settings do *not* include centering or periodic boundary treatment as this often does not work well with fitting. It is better to do this as a separate step (see :meth:`center_fit` or :func:`gromacs.cbook.trj_fitandcenter`) :Keywords: *s* Input structure (typically the default tpr file but can be set to some other file with a different conformation for fitting) *n* Alternative index file. *o* Name of the output trajectory. A default name is created. If e.g. *dt* = 100 is one of the *kwargs* then the default name includes "_dt100ps". *xy* : boolean If ``True`` then only do a rot+trans fit in the xy plane (good for membrane simulations); default is ``False``. *force* ``True``: overwrite existing trajectories ``False``: throw a IOError exception ``None``: skip existing and log a warning [default] *fitgroup* index group to fit on ["backbone"] .. Note:: If keyword *input* is supplied then it will override *fitgroup*; *input* = ``[fitgroup, outgroup]`` *kwargs* kwargs are passed to :func:`~gromacs.cbook.trj_xyfitted` :Returns: dictionary with keys *tpr*, *xtc*, which are the names of the the new files """ kwargs.setdefault('s', self.tpr) kwargs.setdefault('n', self.ndx) kwargs['f'] = self.xtc force = kwargs.pop('force', self.force) if xy: fitmode = 'rotxy+transxy' kwargs.pop('fit', None) infix_default = '_fitxy' else: fitmode = kwargs.pop('fit', 'rot+trans') # user can use 'progressive', too infix_default = '_fit' dt = kwargs.get('dt') if dt: infix_default += '_dt{0:d}ps'.format(int(dt)) # dt in ps kwargs.setdefault('o', self.outfile(self.infix_filename(None, self.xtc, infix_default, 'xtc'))) fitgroup = kwargs.pop('fitgroup', 'backbone') kwargs.setdefault('input', [fitgroup, "system"]) if kwargs.get('center', False): logger.warn("Transformer.fit(): center=%(center)r used: centering should not be combined with fitting.", kwargs) if len(kwargs['inputs']) != 3: logger.error("If you insist on centering you must provide three groups in the 'input' kwarg: (center, fit, output)") raise ValuError("Insufficient index groups for centering,fitting,output") logger.info("Fitting trajectory %r to with xy=%r...", kwargs['f'], xy) logger.info("Fitting on index group %(fitgroup)r", vars()) with utilities.in_dir(self.dirname): if self.check_file_exists(kwargs['o'], resolve="indicate", force=force): logger.warn("File %r exists; force regenerating it with force=True.", kwargs['o']) else: gromacs.trjconv(fit=fitmode, **kwargs) logger.info("Fitted trajectory (fitmode=%s): %r.", fitmode, kwargs['o']) return {'tpr': self.rp(kwargs['s']), 'xtc': self.rp(kwargs['o'])}
[ "def", "fit", "(", "self", ",", "xy", "=", "False", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "setdefault", "(", "'s'", ",", "self", ".", "tpr", ")", "kwargs", ".", "setdefault", "(", "'n'", ",", "self", ".", "ndx", ")", "kwargs", "[", "'f'", "]", "=", "self", ".", "xtc", "force", "=", "kwargs", ".", "pop", "(", "'force'", ",", "self", ".", "force", ")", "if", "xy", ":", "fitmode", "=", "'rotxy+transxy'", "kwargs", ".", "pop", "(", "'fit'", ",", "None", ")", "infix_default", "=", "'_fitxy'", "else", ":", "fitmode", "=", "kwargs", ".", "pop", "(", "'fit'", ",", "'rot+trans'", ")", "# user can use 'progressive', too", "infix_default", "=", "'_fit'", "dt", "=", "kwargs", ".", "get", "(", "'dt'", ")", "if", "dt", ":", "infix_default", "+=", "'_dt{0:d}ps'", ".", "format", "(", "int", "(", "dt", ")", ")", "# dt in ps", "kwargs", ".", "setdefault", "(", "'o'", ",", "self", ".", "outfile", "(", "self", ".", "infix_filename", "(", "None", ",", "self", ".", "xtc", ",", "infix_default", ",", "'xtc'", ")", ")", ")", "fitgroup", "=", "kwargs", ".", "pop", "(", "'fitgroup'", ",", "'backbone'", ")", "kwargs", ".", "setdefault", "(", "'input'", ",", "[", "fitgroup", ",", "\"system\"", "]", ")", "if", "kwargs", ".", "get", "(", "'center'", ",", "False", ")", ":", "logger", ".", "warn", "(", "\"Transformer.fit(): center=%(center)r used: centering should not be combined with fitting.\"", ",", "kwargs", ")", "if", "len", "(", "kwargs", "[", "'inputs'", "]", ")", "!=", "3", ":", "logger", ".", "error", "(", "\"If you insist on centering you must provide three groups in the 'input' kwarg: (center, fit, output)\"", ")", "raise", "ValuError", "(", "\"Insufficient index groups for centering,fitting,output\"", ")", "logger", ".", "info", "(", "\"Fitting trajectory %r to with xy=%r...\"", ",", "kwargs", "[", "'f'", "]", ",", "xy", ")", "logger", ".", "info", "(", "\"Fitting on index group %(fitgroup)r\"", ",", "vars", "(", ")", ")", "with", "utilities", ".", "in_dir", "(", "self", ".", "dirname", ")", ":", "if", "self", ".", "check_file_exists", "(", "kwargs", "[", "'o'", "]", ",", "resolve", "=", "\"indicate\"", ",", "force", "=", "force", ")", ":", "logger", ".", "warn", "(", "\"File %r exists; force regenerating it with force=True.\"", ",", "kwargs", "[", "'o'", "]", ")", "else", ":", "gromacs", ".", "trjconv", "(", "fit", "=", "fitmode", ",", "*", "*", "kwargs", ")", "logger", ".", "info", "(", "\"Fitted trajectory (fitmode=%s): %r.\"", ",", "fitmode", ",", "kwargs", "[", "'o'", "]", ")", "return", "{", "'tpr'", ":", "self", ".", "rp", "(", "kwargs", "[", "'s'", "]", ")", ",", "'xtc'", ":", "self", ".", "rp", "(", "kwargs", "[", "'o'", "]", ")", "}" ]
Write xtc that is fitted to the tpr reference structure. Runs :class:`gromacs.tools.trjconv` with appropriate arguments for fitting. The most important *kwargs* are listed here but in most cases the defaults should work. Note that the default settings do *not* include centering or periodic boundary treatment as this often does not work well with fitting. It is better to do this as a separate step (see :meth:`center_fit` or :func:`gromacs.cbook.trj_fitandcenter`) :Keywords: *s* Input structure (typically the default tpr file but can be set to some other file with a different conformation for fitting) *n* Alternative index file. *o* Name of the output trajectory. A default name is created. If e.g. *dt* = 100 is one of the *kwargs* then the default name includes "_dt100ps". *xy* : boolean If ``True`` then only do a rot+trans fit in the xy plane (good for membrane simulations); default is ``False``. *force* ``True``: overwrite existing trajectories ``False``: throw a IOError exception ``None``: skip existing and log a warning [default] *fitgroup* index group to fit on ["backbone"] .. Note:: If keyword *input* is supplied then it will override *fitgroup*; *input* = ``[fitgroup, outgroup]`` *kwargs* kwargs are passed to :func:`~gromacs.cbook.trj_xyfitted` :Returns: dictionary with keys *tpr*, *xtc*, which are the names of the the new files
[ "Write", "xtc", "that", "is", "fitted", "to", "the", "tpr", "reference", "structure", "." ]
python
valid
wummel/linkchecker
linkcheck/director/__init__.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/director/__init__.py#L70-L80
def check_url (aggregate): """Helper function waiting for URL queue.""" while True: try: aggregate.urlqueue.join(timeout=30) break except urlqueue.Timeout: # Cleanup threads every 30 seconds aggregate.remove_stopped_threads() if not any(aggregate.get_check_threads()): break
[ "def", "check_url", "(", "aggregate", ")", ":", "while", "True", ":", "try", ":", "aggregate", ".", "urlqueue", ".", "join", "(", "timeout", "=", "30", ")", "break", "except", "urlqueue", ".", "Timeout", ":", "# Cleanup threads every 30 seconds", "aggregate", ".", "remove_stopped_threads", "(", ")", "if", "not", "any", "(", "aggregate", ".", "get_check_threads", "(", ")", ")", ":", "break" ]
Helper function waiting for URL queue.
[ "Helper", "function", "waiting", "for", "URL", "queue", "." ]
python
train
fabioz/PyDev.Debugger
_pydevd_bundle/pydevd_process_net_command_json.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/_pydevd_bundle/pydevd_process_net_command_json.py#L213-L244
def on_completions_request(self, py_db, request): ''' :param CompletionsRequest request: ''' arguments = request.arguments # : :type arguments: CompletionsArguments seq = request.seq text = arguments.text frame_id = arguments.frameId thread_id = py_db.suspended_frames_manager.get_thread_id_for_variable_reference( frame_id) if thread_id is None: body = CompletionsResponseBody([]) variables_response = pydevd_base_schema.build_response( request, kwargs={ 'body': body, 'success': False, 'message': 'Thread to get completions seems to have resumed already.' }) return NetCommand(CMD_RETURN, 0, variables_response, is_json=True) # Note: line and column are 1-based (convert to 0-based for pydevd). column = arguments.column - 1 if arguments.line is None: # line is optional line = -1 else: line = arguments.line - 1 self.api.request_completions(py_db, seq, thread_id, frame_id, text, line=line, column=column)
[ "def", "on_completions_request", "(", "self", ",", "py_db", ",", "request", ")", ":", "arguments", "=", "request", ".", "arguments", "# : :type arguments: CompletionsArguments", "seq", "=", "request", ".", "seq", "text", "=", "arguments", ".", "text", "frame_id", "=", "arguments", ".", "frameId", "thread_id", "=", "py_db", ".", "suspended_frames_manager", ".", "get_thread_id_for_variable_reference", "(", "frame_id", ")", "if", "thread_id", "is", "None", ":", "body", "=", "CompletionsResponseBody", "(", "[", "]", ")", "variables_response", "=", "pydevd_base_schema", ".", "build_response", "(", "request", ",", "kwargs", "=", "{", "'body'", ":", "body", ",", "'success'", ":", "False", ",", "'message'", ":", "'Thread to get completions seems to have resumed already.'", "}", ")", "return", "NetCommand", "(", "CMD_RETURN", ",", "0", ",", "variables_response", ",", "is_json", "=", "True", ")", "# Note: line and column are 1-based (convert to 0-based for pydevd).", "column", "=", "arguments", ".", "column", "-", "1", "if", "arguments", ".", "line", "is", "None", ":", "# line is optional", "line", "=", "-", "1", "else", ":", "line", "=", "arguments", ".", "line", "-", "1", "self", ".", "api", ".", "request_completions", "(", "py_db", ",", "seq", ",", "thread_id", ",", "frame_id", ",", "text", ",", "line", "=", "line", ",", "column", "=", "column", ")" ]
:param CompletionsRequest request:
[ ":", "param", "CompletionsRequest", "request", ":" ]
python
train
dmippolitov/pydnsbl
pydnsbl/checker.py
https://github.com/dmippolitov/pydnsbl/blob/76c460f1118213d66498ddafde2053d8de4ccbdb/pydnsbl/checker.py#L150-L157
def check_ips(self, addrs): """ sync check multiple ips """ tasks = [] for addr in addrs: tasks.append(self._check_ip(addr)) return self._loop.run_until_complete(asyncio.gather(*tasks))
[ "def", "check_ips", "(", "self", ",", "addrs", ")", ":", "tasks", "=", "[", "]", "for", "addr", "in", "addrs", ":", "tasks", ".", "append", "(", "self", ".", "_check_ip", "(", "addr", ")", ")", "return", "self", ".", "_loop", ".", "run_until_complete", "(", "asyncio", ".", "gather", "(", "*", "tasks", ")", ")" ]
sync check multiple ips
[ "sync", "check", "multiple", "ips" ]
python
train
aliyun/aliyun-odps-python-sdk
odps/console.py
https://github.com/aliyun/aliyun-odps-python-sdk/blob/4b0de18f5864386df6068f26f026e62f932c41e4/odps/console.py#L368-L391
def isatty(file): """ Returns `True` if `file` is a tty. Most built-in Python file-like objects have an `isatty` member, but some user-defined types may not, so this assumes those are not ttys. """ if (multiprocessing.current_process().name != 'MainProcess' or threading.current_thread().getName() != 'MainThread'): return False if hasattr(file, 'isatty'): return file.isatty() elif (OutStream is not None and isinstance(file, (OutStream, IPythonIOStream)) and ((hasattr(file, 'name') and file.name == 'stdout') or (hasattr(file, 'stream') and isinstance(file.stream, PyreadlineConsole)))): # File is an IPython OutStream or IOStream and # File name is 'stdout' or # File wraps a Console return True return False
[ "def", "isatty", "(", "file", ")", ":", "if", "(", "multiprocessing", ".", "current_process", "(", ")", ".", "name", "!=", "'MainProcess'", "or", "threading", ".", "current_thread", "(", ")", ".", "getName", "(", ")", "!=", "'MainThread'", ")", ":", "return", "False", "if", "hasattr", "(", "file", ",", "'isatty'", ")", ":", "return", "file", ".", "isatty", "(", ")", "elif", "(", "OutStream", "is", "not", "None", "and", "isinstance", "(", "file", ",", "(", "OutStream", ",", "IPythonIOStream", ")", ")", "and", "(", "(", "hasattr", "(", "file", ",", "'name'", ")", "and", "file", ".", "name", "==", "'stdout'", ")", "or", "(", "hasattr", "(", "file", ",", "'stream'", ")", "and", "isinstance", "(", "file", ".", "stream", ",", "PyreadlineConsole", ")", ")", ")", ")", ":", "# File is an IPython OutStream or IOStream and", "# File name is 'stdout' or", "# File wraps a Console", "return", "True", "return", "False" ]
Returns `True` if `file` is a tty. Most built-in Python file-like objects have an `isatty` member, but some user-defined types may not, so this assumes those are not ttys.
[ "Returns", "True", "if", "file", "is", "a", "tty", "." ]
python
train
yyuu/botornado
boto/cloudfront/distribution.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/cloudfront/distribution.py#L448-L463
def set_permissions_all(self, replace=False): """ Sets the S3 ACL grants for all objects in the Distribution to the appropriate value based on the type of Distribution. :type replace: bool :param replace: If False, the Origin Access Identity will be appended to the existing ACL for the object. If True, the ACL for the object will be completely replaced with one that grants READ permission to the Origin Access Identity. """ bucket = self._get_bucket() for key in bucket: self.set_permissions(key, replace)
[ "def", "set_permissions_all", "(", "self", ",", "replace", "=", "False", ")", ":", "bucket", "=", "self", ".", "_get_bucket", "(", ")", "for", "key", "in", "bucket", ":", "self", ".", "set_permissions", "(", "key", ",", "replace", ")" ]
Sets the S3 ACL grants for all objects in the Distribution to the appropriate value based on the type of Distribution. :type replace: bool :param replace: If False, the Origin Access Identity will be appended to the existing ACL for the object. If True, the ACL for the object will be completely replaced with one that grants READ permission to the Origin Access Identity.
[ "Sets", "the", "S3", "ACL", "grants", "for", "all", "objects", "in", "the", "Distribution", "to", "the", "appropriate", "value", "based", "on", "the", "type", "of", "Distribution", "." ]
python
train
sivakov512/python-static-api-generator
static_api_generator/generator.py
https://github.com/sivakov512/python-static-api-generator/blob/0a7ec27324b9b2a3d1fa9894c4cba73af9ebcc01/static_api_generator/generator.py#L58-L68
def dest_fpath(self, source_fpath: str) -> str: """Calculates full path for end json-api file from source file full path.""" relative_fpath = os.path.join(*source_fpath.split(os.sep)[1:]) relative_dirpath = os.path.dirname(relative_fpath) source_fname = relative_fpath.split(os.sep)[-1] base_fname = source_fname.split('.')[0] dest_fname = f'{base_fname}.json' return os.path.join(self.dest_dir, relative_dirpath, dest_fname)
[ "def", "dest_fpath", "(", "self", ",", "source_fpath", ":", "str", ")", "->", "str", ":", "relative_fpath", "=", "os", ".", "path", ".", "join", "(", "*", "source_fpath", ".", "split", "(", "os", ".", "sep", ")", "[", "1", ":", "]", ")", "relative_dirpath", "=", "os", ".", "path", ".", "dirname", "(", "relative_fpath", ")", "source_fname", "=", "relative_fpath", ".", "split", "(", "os", ".", "sep", ")", "[", "-", "1", "]", "base_fname", "=", "source_fname", ".", "split", "(", "'.'", ")", "[", "0", "]", "dest_fname", "=", "f'{base_fname}.json'", "return", "os", ".", "path", ".", "join", "(", "self", ".", "dest_dir", ",", "relative_dirpath", ",", "dest_fname", ")" ]
Calculates full path for end json-api file from source file full path.
[ "Calculates", "full", "path", "for", "end", "json", "-", "api", "file", "from", "source", "file", "full", "path", "." ]
python
train
shaiguitar/snowclient.py
snowclient/client.py
https://github.com/shaiguitar/snowclient.py/blob/6bb513576d3b37612a7a4da225140d134f3e1c82/snowclient/client.py#L10-L16
def list(self,table, **kparams): """ get a collection of records by table name. returns a collection of SnowRecord obj. """ records = self.api.list(table, **kparams) return records
[ "def", "list", "(", "self", ",", "table", ",", "*", "*", "kparams", ")", ":", "records", "=", "self", ".", "api", ".", "list", "(", "table", ",", "*", "*", "kparams", ")", "return", "records" ]
get a collection of records by table name. returns a collection of SnowRecord obj.
[ "get", "a", "collection", "of", "records", "by", "table", "name", ".", "returns", "a", "collection", "of", "SnowRecord", "obj", "." ]
python
train
knipknap/SpiffWorkflow
SpiffWorkflow/util/event.py
https://github.com/knipknap/SpiffWorkflow/blob/f0af7f59a332e0619e4f3c00a7d4a3d230760e00/SpiffWorkflow/util/event.py#L219-L237
def disconnect(self, callback): """ Disconnects the signal from the given function. :type callback: object :param callback: The callback function. """ if self.weak_subscribers is not None: with self.lock: index = self._weakly_connected_index(callback) if index is not None: self.weak_subscribers.pop(index)[0] if self.hard_subscribers is not None: try: index = self._hard_callbacks().index(callback) except ValueError: pass else: self.hard_subscribers.pop(index)
[ "def", "disconnect", "(", "self", ",", "callback", ")", ":", "if", "self", ".", "weak_subscribers", "is", "not", "None", ":", "with", "self", ".", "lock", ":", "index", "=", "self", ".", "_weakly_connected_index", "(", "callback", ")", "if", "index", "is", "not", "None", ":", "self", ".", "weak_subscribers", ".", "pop", "(", "index", ")", "[", "0", "]", "if", "self", ".", "hard_subscribers", "is", "not", "None", ":", "try", ":", "index", "=", "self", ".", "_hard_callbacks", "(", ")", ".", "index", "(", "callback", ")", "except", "ValueError", ":", "pass", "else", ":", "self", ".", "hard_subscribers", ".", "pop", "(", "index", ")" ]
Disconnects the signal from the given function. :type callback: object :param callback: The callback function.
[ "Disconnects", "the", "signal", "from", "the", "given", "function", "." ]
python
valid
estnltk/estnltk
estnltk/text.py
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L564-L568
def word_spans(self): """The list of spans representing ``words`` layer elements.""" if not self.is_tagged(WORDS): self.tokenize_words() return self.spans(WORDS)
[ "def", "word_spans", "(", "self", ")", ":", "if", "not", "self", ".", "is_tagged", "(", "WORDS", ")", ":", "self", ".", "tokenize_words", "(", ")", "return", "self", ".", "spans", "(", "WORDS", ")" ]
The list of spans representing ``words`` layer elements.
[ "The", "list", "of", "spans", "representing", "words", "layer", "elements", "." ]
python
train
nephila/python-taiga
taiga/models/models.py
https://github.com/nephila/python-taiga/blob/5b471d6b8b59e5d410162a6f1c2f0d4188445a56/taiga/models/models.py#L1288-L1298
def import_task(self, subject, status, **attrs): """ Import a Task and return a :class:`Task` object. :param subject: subject of the :class:`Task` :param status: status of the :class:`Task` :param attrs: optional attributes for :class:`Task` """ return Tasks(self.requester).import_( self.id, subject, status, **attrs )
[ "def", "import_task", "(", "self", ",", "subject", ",", "status", ",", "*", "*", "attrs", ")", ":", "return", "Tasks", "(", "self", ".", "requester", ")", ".", "import_", "(", "self", ".", "id", ",", "subject", ",", "status", ",", "*", "*", "attrs", ")" ]
Import a Task and return a :class:`Task` object. :param subject: subject of the :class:`Task` :param status: status of the :class:`Task` :param attrs: optional attributes for :class:`Task`
[ "Import", "a", "Task", "and", "return", "a", ":", "class", ":", "Task", "object", "." ]
python
train
TeamHG-Memex/eli5
eli5/lime/textutils.py
https://github.com/TeamHG-Memex/eli5/blob/371b402a0676295c05e582a2dd591f7af476b86b/eli5/lime/textutils.py#L112-L144
def replace_random_tokens_bow(self, n_samples, # type: int replacement='', # type: str random_state=None, min_replace=1, # type: Union[int, float] max_replace=1.0, # type: Union[int, float] ): # type: (...) -> List[Tuple[str, int, np.ndarray]] """ Return a list of ``(text, replaced_words_count, mask)`` tuples with n_samples versions of text with some words replaced. If a word is replaced, all duplicate words are also replaced from the text. By default words are replaced with '', i.e. removed. """ if not self.vocab: nomask = np.array([], dtype=int) return [('', 0, nomask)] * n_samples min_replace, max_replace = self._get_min_max(min_replace, max_replace, len(self.vocab)) rng = check_random_state(random_state) replace_sizes = rng.randint(low=min_replace, high=max_replace + 1, size=n_samples) res = [] for num_to_replace in replace_sizes: tokens_to_replace = set(rng.choice(self.vocab, num_to_replace, replace=False)) idx_to_replace = [idx for idx, token in enumerate(self.tokens) if token in tokens_to_replace] mask = indices_to_bool_mask(idx_to_replace, len(self.tokens)) s = self.split.masked(idx_to_replace, replacement) res.append((s.text, num_to_replace, mask)) return res
[ "def", "replace_random_tokens_bow", "(", "self", ",", "n_samples", ",", "# type: int", "replacement", "=", "''", ",", "# type: str", "random_state", "=", "None", ",", "min_replace", "=", "1", ",", "# type: Union[int, float]", "max_replace", "=", "1.0", ",", "# type: Union[int, float]", ")", ":", "# type: (...) -> List[Tuple[str, int, np.ndarray]]", "if", "not", "self", ".", "vocab", ":", "nomask", "=", "np", ".", "array", "(", "[", "]", ",", "dtype", "=", "int", ")", "return", "[", "(", "''", ",", "0", ",", "nomask", ")", "]", "*", "n_samples", "min_replace", ",", "max_replace", "=", "self", ".", "_get_min_max", "(", "min_replace", ",", "max_replace", ",", "len", "(", "self", ".", "vocab", ")", ")", "rng", "=", "check_random_state", "(", "random_state", ")", "replace_sizes", "=", "rng", ".", "randint", "(", "low", "=", "min_replace", ",", "high", "=", "max_replace", "+", "1", ",", "size", "=", "n_samples", ")", "res", "=", "[", "]", "for", "num_to_replace", "in", "replace_sizes", ":", "tokens_to_replace", "=", "set", "(", "rng", ".", "choice", "(", "self", ".", "vocab", ",", "num_to_replace", ",", "replace", "=", "False", ")", ")", "idx_to_replace", "=", "[", "idx", "for", "idx", ",", "token", "in", "enumerate", "(", "self", ".", "tokens", ")", "if", "token", "in", "tokens_to_replace", "]", "mask", "=", "indices_to_bool_mask", "(", "idx_to_replace", ",", "len", "(", "self", ".", "tokens", ")", ")", "s", "=", "self", ".", "split", ".", "masked", "(", "idx_to_replace", ",", "replacement", ")", "res", ".", "append", "(", "(", "s", ".", "text", ",", "num_to_replace", ",", "mask", ")", ")", "return", "res" ]
Return a list of ``(text, replaced_words_count, mask)`` tuples with n_samples versions of text with some words replaced. If a word is replaced, all duplicate words are also replaced from the text. By default words are replaced with '', i.e. removed.
[ "Return", "a", "list", "of", "(", "text", "replaced_words_count", "mask", ")", "tuples", "with", "n_samples", "versions", "of", "text", "with", "some", "words", "replaced", ".", "If", "a", "word", "is", "replaced", "all", "duplicate", "words", "are", "also", "replaced", "from", "the", "text", ".", "By", "default", "words", "are", "replaced", "with", "i", ".", "e", ".", "removed", "." ]
python
train
JukeboxPipeline/jukeboxmaya
src/jukeboxmaya/addons/mayagenesis/mayagenesis.py
https://github.com/JukeboxPipeline/jukeboxmaya/blob/c8d6318d53cdb5493453c4a6b65ef75bdb2d5f2c/src/jukeboxmaya/addons/mayagenesis/mayagenesis.py#L116-L281
def subclass_genesis(self, genesisclass): """Subclass the given genesis class and implement all abstract methods :param genesisclass: the GenesisWin class to subclass :type genesisclass: :class:`GenesisWin` :returns: the subclass :rtype: subclass of :class:`GenesisWin` :raises: None """ class MayaGenesisWin(genesisclass): """Implementation of Genesis for maya """ def open_shot(self, taskfile): """Open the given taskfile :param taskfile: the taskfile for the shot :type taskfile: :class:`djadapter.models.TaskFile` :returns: True if opening was successful :rtype: bool :raises: none """ return self.open_file(taskfile) def save_shot(self, jbfile, tf): """Save the shot to the location of jbfile :param jbfile: the jbfile that can be used to query the location :type jbfile: :class:`jukebox.core.filesys.JB_File` :param tf: the taskfile that is saved :type tf: :class:`djadapter.models.TaskFile` :returns: None :rtype: None :raises: None """ self.update_scene_node(tf) self.save_file(jbfile) def open_asset(self, taskfile): """Open the given taskfile :param taskfile: the taskfile for the asset :type taskfile: :class:`djadapter.models.TaskFile` :returns: True if opening was successful :rtype: bool :raises: None """ return self.open_file(taskfile) def save_asset(self, jbfile, tf): """Save the asset to the location of jbfile :param jbfile: the jbfile that can be used to query the location :type jbfile: :class:`jukebox.core.filesys.JB_File` :param tf: the taskfile that is saved :type tf: :class:`djadapter.models.TaskFile` :returns: None :rtype: None :raises: NotImplementedError """ self.update_scene_node(tf) self.save_file(jbfile) def save_file(self, jbfile): """Physically save current scene to jbfile\'s location :param jbfile: the jbfile that can be used to query the location :type jbfile: :class:`jukebox.core.filesys.JB_File` :returns: None :rtype: None :raises: None """ p = jbfile.get_fullpath() p = os.path.expanduser(p) typ = 'mayaBinary' if jbfile.get_ext() == 'ma': typ = 'mayaAscii' cmds.file(rename = p) cmds.file(save=True, defaultExtensions=False, type=typ) def open_file(self, taskfile): """Open the given jbfile in maya :param taskfile: the taskfile for the asset :type taskfile: :class:`djadapter.models.TaskFile` :returns: True if opening was successful :rtype: bool :raises: None """ r = self.check_modified() if r is False: return False cmds.file(taskfile.path, open=True, force=True, ignoreVersion=True) return True def get_current_file(self, ): """Return the taskfile that is currently open or None if no taskfile is open :returns: the open taskfile or None if no taskfile is open :rtype: :class:`djadapter.models.TaskFile` | None :raises: None """ node = jbscene.get_current_scene_node() if not node: return tfid = cmds.getAttr('%s.taskfile_id' % node) try: return djadapter.taskfiles.get(id=tfid) except djadapter.models.TaskFile.DoesNotExist: log.error("No taskfile with id %s was found. Get current scene failed. Check your jb_sceneNode \'%s\'." % (tfid, node)) return def get_scene_node(self, ): """Return the current scenen node or create one if it does not exist :returns: Name of the scene node :rtype: str :raises: None """ scenenodes = cmds.ls(':jb_sceneNode*') if len(scenenodes) > 1: cmds.delete(scenenodes) node = jbscene.get_current_scene_node() if node is None: cmds.namespace(set=':') node = cmds.createNode('jb_sceneNode') return node def update_scene_node(self, tf): """Update the current scene node :param tf: the taskfile that is saved :type tf: :class:`djadapter.models.TaskFile` :returns: None :rtype: None :raises: None """ node = self.get_scene_node() cmds.setAttr('%s.taskfile_id' % node, lock=False) cmds.setAttr('%s.taskfile_id' % node, tf.id) cmds.setAttr('%s.taskfile_id' % node, lock=True) def check_modified(self, ): """Check if the current scene was modified and ask the user to continue This might save the scene if the user accepts to save before continuing. :returns: True if the user accepted to continue. :rtype: bool :raises: None """ if not cmds.file(q=1, modified=1): return True curfile = cmds.file(q=1, sceneName=1) r = cmds.confirmDialog( title='Save Changes', message='Save changes to %s?' % curfile, button=['Save', 'Don\'t Save' ,'Cancel'], defaultButton='Save', cancelButton='Cancel', dismissString='Cancel') if r == 'Cancel': return False if r == 'Save': cmds.file(save=True, force=True) return True MayaGenesisWin.set_filetype(djadapter.FILETYPES['mayamainscene'],) return MayaGenesisWin
[ "def", "subclass_genesis", "(", "self", ",", "genesisclass", ")", ":", "class", "MayaGenesisWin", "(", "genesisclass", ")", ":", "\"\"\"Implementation of Genesis for maya\n \"\"\"", "def", "open_shot", "(", "self", ",", "taskfile", ")", ":", "\"\"\"Open the given taskfile\n\n :param taskfile: the taskfile for the shot\n :type taskfile: :class:`djadapter.models.TaskFile`\n :returns: True if opening was successful\n :rtype: bool\n :raises: none\n \"\"\"", "return", "self", ".", "open_file", "(", "taskfile", ")", "def", "save_shot", "(", "self", ",", "jbfile", ",", "tf", ")", ":", "\"\"\"Save the shot to the location of jbfile\n\n :param jbfile: the jbfile that can be used to query the location\n :type jbfile: :class:`jukebox.core.filesys.JB_File`\n :param tf: the taskfile that is saved\n :type tf: :class:`djadapter.models.TaskFile`\n :returns: None\n :rtype: None\n :raises: None\n \"\"\"", "self", ".", "update_scene_node", "(", "tf", ")", "self", ".", "save_file", "(", "jbfile", ")", "def", "open_asset", "(", "self", ",", "taskfile", ")", ":", "\"\"\"Open the given taskfile\n\n :param taskfile: the taskfile for the asset\n :type taskfile: :class:`djadapter.models.TaskFile`\n :returns: True if opening was successful\n :rtype: bool\n :raises: None\n \"\"\"", "return", "self", ".", "open_file", "(", "taskfile", ")", "def", "save_asset", "(", "self", ",", "jbfile", ",", "tf", ")", ":", "\"\"\"Save the asset to the location of jbfile\n\n :param jbfile: the jbfile that can be used to query the location\n :type jbfile: :class:`jukebox.core.filesys.JB_File`\n :param tf: the taskfile that is saved\n :type tf: :class:`djadapter.models.TaskFile`\n :returns: None\n :rtype: None\n :raises: NotImplementedError\n \"\"\"", "self", ".", "update_scene_node", "(", "tf", ")", "self", ".", "save_file", "(", "jbfile", ")", "def", "save_file", "(", "self", ",", "jbfile", ")", ":", "\"\"\"Physically save current scene to jbfile\\'s location\n\n :param jbfile: the jbfile that can be used to query the location\n :type jbfile: :class:`jukebox.core.filesys.JB_File`\n :returns: None\n :rtype: None\n :raises: None\n \"\"\"", "p", "=", "jbfile", ".", "get_fullpath", "(", ")", "p", "=", "os", ".", "path", ".", "expanduser", "(", "p", ")", "typ", "=", "'mayaBinary'", "if", "jbfile", ".", "get_ext", "(", ")", "==", "'ma'", ":", "typ", "=", "'mayaAscii'", "cmds", ".", "file", "(", "rename", "=", "p", ")", "cmds", ".", "file", "(", "save", "=", "True", ",", "defaultExtensions", "=", "False", ",", "type", "=", "typ", ")", "def", "open_file", "(", "self", ",", "taskfile", ")", ":", "\"\"\"Open the given jbfile in maya\n\n :param taskfile: the taskfile for the asset\n :type taskfile: :class:`djadapter.models.TaskFile`\n :returns: True if opening was successful\n :rtype: bool\n :raises: None\n \"\"\"", "r", "=", "self", ".", "check_modified", "(", ")", "if", "r", "is", "False", ":", "return", "False", "cmds", ".", "file", "(", "taskfile", ".", "path", ",", "open", "=", "True", ",", "force", "=", "True", ",", "ignoreVersion", "=", "True", ")", "return", "True", "def", "get_current_file", "(", "self", ",", ")", ":", "\"\"\"Return the taskfile that is currently open or None if no taskfile is open\n\n :returns: the open taskfile or None if no taskfile is open\n :rtype: :class:`djadapter.models.TaskFile` | None\n :raises: None\n \"\"\"", "node", "=", "jbscene", ".", "get_current_scene_node", "(", ")", "if", "not", "node", ":", "return", "tfid", "=", "cmds", ".", "getAttr", "(", "'%s.taskfile_id'", "%", "node", ")", "try", ":", "return", "djadapter", ".", "taskfiles", ".", "get", "(", "id", "=", "tfid", ")", "except", "djadapter", ".", "models", ".", "TaskFile", ".", "DoesNotExist", ":", "log", ".", "error", "(", "\"No taskfile with id %s was found. Get current scene failed. Check your jb_sceneNode \\'%s\\'.\"", "%", "(", "tfid", ",", "node", ")", ")", "return", "def", "get_scene_node", "(", "self", ",", ")", ":", "\"\"\"Return the current scenen node or create one if it does not exist\n\n :returns: Name of the scene node\n :rtype: str\n :raises: None\n \"\"\"", "scenenodes", "=", "cmds", ".", "ls", "(", "':jb_sceneNode*'", ")", "if", "len", "(", "scenenodes", ")", ">", "1", ":", "cmds", ".", "delete", "(", "scenenodes", ")", "node", "=", "jbscene", ".", "get_current_scene_node", "(", ")", "if", "node", "is", "None", ":", "cmds", ".", "namespace", "(", "set", "=", "':'", ")", "node", "=", "cmds", ".", "createNode", "(", "'jb_sceneNode'", ")", "return", "node", "def", "update_scene_node", "(", "self", ",", "tf", ")", ":", "\"\"\"Update the current scene node\n\n :param tf: the taskfile that is saved\n :type tf: :class:`djadapter.models.TaskFile`\n :returns: None\n :rtype: None\n :raises: None\n \"\"\"", "node", "=", "self", ".", "get_scene_node", "(", ")", "cmds", ".", "setAttr", "(", "'%s.taskfile_id'", "%", "node", ",", "lock", "=", "False", ")", "cmds", ".", "setAttr", "(", "'%s.taskfile_id'", "%", "node", ",", "tf", ".", "id", ")", "cmds", ".", "setAttr", "(", "'%s.taskfile_id'", "%", "node", ",", "lock", "=", "True", ")", "def", "check_modified", "(", "self", ",", ")", ":", "\"\"\"Check if the current scene was modified and ask the user to continue\n\n This might save the scene if the user accepts to save before continuing.\n\n :returns: True if the user accepted to continue.\n :rtype: bool\n :raises: None\n \"\"\"", "if", "not", "cmds", ".", "file", "(", "q", "=", "1", ",", "modified", "=", "1", ")", ":", "return", "True", "curfile", "=", "cmds", ".", "file", "(", "q", "=", "1", ",", "sceneName", "=", "1", ")", "r", "=", "cmds", ".", "confirmDialog", "(", "title", "=", "'Save Changes'", ",", "message", "=", "'Save changes to %s?'", "%", "curfile", ",", "button", "=", "[", "'Save'", ",", "'Don\\'t Save'", ",", "'Cancel'", "]", ",", "defaultButton", "=", "'Save'", ",", "cancelButton", "=", "'Cancel'", ",", "dismissString", "=", "'Cancel'", ")", "if", "r", "==", "'Cancel'", ":", "return", "False", "if", "r", "==", "'Save'", ":", "cmds", ".", "file", "(", "save", "=", "True", ",", "force", "=", "True", ")", "return", "True", "MayaGenesisWin", ".", "set_filetype", "(", "djadapter", ".", "FILETYPES", "[", "'mayamainscene'", "]", ",", ")", "return", "MayaGenesisWin" ]
Subclass the given genesis class and implement all abstract methods :param genesisclass: the GenesisWin class to subclass :type genesisclass: :class:`GenesisWin` :returns: the subclass :rtype: subclass of :class:`GenesisWin` :raises: None
[ "Subclass", "the", "given", "genesis", "class", "and", "implement", "all", "abstract", "methods" ]
python
train
andialbrecht/sqlparse
sqlparse/sql.py
https://github.com/andialbrecht/sqlparse/blob/913b56e34edc7e3025feea4744dbd762774805c3/sqlparse/sql.py#L470-L477
def get_identifiers(self): """Returns the identifiers. Whitespaces and punctuations are not included in this generator. """ for token in self.tokens: if not (token.is_whitespace or token.match(T.Punctuation, ',')): yield token
[ "def", "get_identifiers", "(", "self", ")", ":", "for", "token", "in", "self", ".", "tokens", ":", "if", "not", "(", "token", ".", "is_whitespace", "or", "token", ".", "match", "(", "T", ".", "Punctuation", ",", "','", ")", ")", ":", "yield", "token" ]
Returns the identifiers. Whitespaces and punctuations are not included in this generator.
[ "Returns", "the", "identifiers", "." ]
python
train
Shizmob/pydle
pydle/features/tls.py
https://github.com/Shizmob/pydle/blob/7ec7d65d097318ed0bcdc5d8401470287d8c7cf7/pydle/features/tls.py#L28-L35
async def connect(self, hostname=None, port=None, tls=False, **kwargs): """ Connect to a server, optionally over TLS. See pydle.features.RFC1459Support.connect for misc parameters. """ if not port: if tls: port = DEFAULT_TLS_PORT else: port = rfc1459.protocol.DEFAULT_PORT return await super().connect(hostname, port, tls=tls, **kwargs)
[ "async", "def", "connect", "(", "self", ",", "hostname", "=", "None", ",", "port", "=", "None", ",", "tls", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "not", "port", ":", "if", "tls", ":", "port", "=", "DEFAULT_TLS_PORT", "else", ":", "port", "=", "rfc1459", ".", "protocol", ".", "DEFAULT_PORT", "return", "await", "super", "(", ")", ".", "connect", "(", "hostname", ",", "port", ",", "tls", "=", "tls", ",", "*", "*", "kwargs", ")" ]
Connect to a server, optionally over TLS. See pydle.features.RFC1459Support.connect for misc parameters.
[ "Connect", "to", "a", "server", "optionally", "over", "TLS", ".", "See", "pydle", ".", "features", ".", "RFC1459Support", ".", "connect", "for", "misc", "parameters", "." ]
python
train
raamana/hiwenet
docs/example_thickness_hiwenet.py
https://github.com/raamana/hiwenet/blob/b12699b3722fd0a6a835e7d7ca4baf58fb181809/docs/example_thickness_hiwenet.py#L25-L31
def get_parcellation(atlas, parcel_param): "Placeholder to insert your own function to return parcellation in reference space." parc_path = os.path.join(atlas, 'parcellation_param{}.mgh'.format(parcel_param)) parcel = nibabel.freesurfer.io.read_geometry(parc_path) return parcel
[ "def", "get_parcellation", "(", "atlas", ",", "parcel_param", ")", ":", "parc_path", "=", "os", ".", "path", ".", "join", "(", "atlas", ",", "'parcellation_param{}.mgh'", ".", "format", "(", "parcel_param", ")", ")", "parcel", "=", "nibabel", ".", "freesurfer", ".", "io", ".", "read_geometry", "(", "parc_path", ")", "return", "parcel" ]
Placeholder to insert your own function to return parcellation in reference space.
[ "Placeholder", "to", "insert", "your", "own", "function", "to", "return", "parcellation", "in", "reference", "space", "." ]
python
train
materialsproject/pymatgen
pymatgen/io/lammps/data.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/lammps/data.py#L128-L150
def get_string(self, significant_figures=6): """ Returns the string representation of simulation box in LAMMPS data file format. Args: significant_figures (int): No. of significant figures to output for box settings. Default to 6. Returns: String representation """ ph = "{:.%df}" % significant_figures lines = [] for bound, d in zip(self.bounds, "xyz"): fillers = bound + [d] * 2 bound_format = " ".join([ph] * 2 + [" {}lo {}hi"]) lines.append(bound_format.format(*fillers)) if self.tilt: tilt_format = " ".join([ph] * 3 + [" xy xz yz"]) lines.append(tilt_format.format(*self.tilt)) return "\n".join(lines)
[ "def", "get_string", "(", "self", ",", "significant_figures", "=", "6", ")", ":", "ph", "=", "\"{:.%df}\"", "%", "significant_figures", "lines", "=", "[", "]", "for", "bound", ",", "d", "in", "zip", "(", "self", ".", "bounds", ",", "\"xyz\"", ")", ":", "fillers", "=", "bound", "+", "[", "d", "]", "*", "2", "bound_format", "=", "\" \"", ".", "join", "(", "[", "ph", "]", "*", "2", "+", "[", "\" {}lo {}hi\"", "]", ")", "lines", ".", "append", "(", "bound_format", ".", "format", "(", "*", "fillers", ")", ")", "if", "self", ".", "tilt", ":", "tilt_format", "=", "\" \"", ".", "join", "(", "[", "ph", "]", "*", "3", "+", "[", "\" xy xz yz\"", "]", ")", "lines", ".", "append", "(", "tilt_format", ".", "format", "(", "*", "self", ".", "tilt", ")", ")", "return", "\"\\n\"", ".", "join", "(", "lines", ")" ]
Returns the string representation of simulation box in LAMMPS data file format. Args: significant_figures (int): No. of significant figures to output for box settings. Default to 6. Returns: String representation
[ "Returns", "the", "string", "representation", "of", "simulation", "box", "in", "LAMMPS", "data", "file", "format", "." ]
python
train
tomer8007/kik-bot-api-unofficial
kik_unofficial/client.py
https://github.com/tomer8007/kik-bot-api-unofficial/blob/2ae5216bc05e7099a41895382fc8e428a7a5c3ac/kik_unofficial/client.py#L191-L201
def send_is_typing(self, peer_jid: str, is_typing: bool): """ Updates the 'is typing' status of the bot during a conversation. :param peer_jid: The JID that the notification will be sent to :param is_typing: If true, indicates that we're currently typing, or False otherwise. """ if self.is_group_jid(peer_jid): return self._send_xmpp_element(chatting.OutgoingGroupIsTypingEvent(peer_jid, is_typing)) else: return self._send_xmpp_element(chatting.OutgoingIsTypingEvent(peer_jid, is_typing))
[ "def", "send_is_typing", "(", "self", ",", "peer_jid", ":", "str", ",", "is_typing", ":", "bool", ")", ":", "if", "self", ".", "is_group_jid", "(", "peer_jid", ")", ":", "return", "self", ".", "_send_xmpp_element", "(", "chatting", ".", "OutgoingGroupIsTypingEvent", "(", "peer_jid", ",", "is_typing", ")", ")", "else", ":", "return", "self", ".", "_send_xmpp_element", "(", "chatting", ".", "OutgoingIsTypingEvent", "(", "peer_jid", ",", "is_typing", ")", ")" ]
Updates the 'is typing' status of the bot during a conversation. :param peer_jid: The JID that the notification will be sent to :param is_typing: If true, indicates that we're currently typing, or False otherwise.
[ "Updates", "the", "is", "typing", "status", "of", "the", "bot", "during", "a", "conversation", "." ]
python
train
profitbricks/profitbricks-sdk-python
profitbricks/client.py
https://github.com/profitbricks/profitbricks-sdk-python/blob/2c804b141688eccb07d6ae56601d5c60a62abebd/profitbricks/client.py#L144-L202
def _get_password(self, password, use_config=True, config_filename=None, use_keyring=HAS_KEYRING): """ Determine the user password If the password is given, this password is used. Otherwise this function will try to get the password from the user's keyring if `use_keyring` is set to True. :param username: Username (used directly if given) :type username: ``str`` :param use_config: Whether to read username from configuration file :type use_config: ``bool`` :param config_filename: Path to the configuration file :type config_filename: ``str`` """ if not password and use_config: if self._config is None: self._read_config(config_filename) password = self._config.get("credentials", "password", fallback=None) if not password and use_keyring: logger = logging.getLogger(__name__) question = ("Please enter your password for {} on {}: " .format(self.username, self.host_base)) if HAS_KEYRING: password = keyring.get_password(self.keyring_identificator, self.username) if password is None: password = getpass.getpass(question) try: keyring.set_password(self.keyring_identificator, self.username, password) except keyring.errors.PasswordSetError as error: logger.warning("Storing password in keyring '%s' failed: %s", self.keyring_identificator, error) else: logger.warning("Install the 'keyring' Python module to store your password " "securely in your keyring!") password = self._config.get("credentials", "password", fallback=None) if password is None: password = getpass.getpass(question) store_plaintext_passwords = self._config.get( "preferences", "store-plaintext-passwords", fallback=None) if store_plaintext_passwords != "no": question = ("Do you want to store your password in plain text in " + self._config_filename()) answer = ask(question, ["yes", "no", "never"], "no") if answer == "yes": self._config.set("credentials", "password", password) self._save_config() elif answer == "never": if "preferences" not in self._config: self._config.add_section("preferences") self._config.set("preferences", "store-plaintext-passwords", "no") self._save_config() return password
[ "def", "_get_password", "(", "self", ",", "password", ",", "use_config", "=", "True", ",", "config_filename", "=", "None", ",", "use_keyring", "=", "HAS_KEYRING", ")", ":", "if", "not", "password", "and", "use_config", ":", "if", "self", ".", "_config", "is", "None", ":", "self", ".", "_read_config", "(", "config_filename", ")", "password", "=", "self", ".", "_config", ".", "get", "(", "\"credentials\"", ",", "\"password\"", ",", "fallback", "=", "None", ")", "if", "not", "password", "and", "use_keyring", ":", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")", "question", "=", "(", "\"Please enter your password for {} on {}: \"", ".", "format", "(", "self", ".", "username", ",", "self", ".", "host_base", ")", ")", "if", "HAS_KEYRING", ":", "password", "=", "keyring", ".", "get_password", "(", "self", ".", "keyring_identificator", ",", "self", ".", "username", ")", "if", "password", "is", "None", ":", "password", "=", "getpass", ".", "getpass", "(", "question", ")", "try", ":", "keyring", ".", "set_password", "(", "self", ".", "keyring_identificator", ",", "self", ".", "username", ",", "password", ")", "except", "keyring", ".", "errors", ".", "PasswordSetError", "as", "error", ":", "logger", ".", "warning", "(", "\"Storing password in keyring '%s' failed: %s\"", ",", "self", ".", "keyring_identificator", ",", "error", ")", "else", ":", "logger", ".", "warning", "(", "\"Install the 'keyring' Python module to store your password \"", "\"securely in your keyring!\"", ")", "password", "=", "self", ".", "_config", ".", "get", "(", "\"credentials\"", ",", "\"password\"", ",", "fallback", "=", "None", ")", "if", "password", "is", "None", ":", "password", "=", "getpass", ".", "getpass", "(", "question", ")", "store_plaintext_passwords", "=", "self", ".", "_config", ".", "get", "(", "\"preferences\"", ",", "\"store-plaintext-passwords\"", ",", "fallback", "=", "None", ")", "if", "store_plaintext_passwords", "!=", "\"no\"", ":", "question", "=", "(", "\"Do you want to store your password in plain text in \"", "+", "self", ".", "_config_filename", "(", ")", ")", "answer", "=", "ask", "(", "question", ",", "[", "\"yes\"", ",", "\"no\"", ",", "\"never\"", "]", ",", "\"no\"", ")", "if", "answer", "==", "\"yes\"", ":", "self", ".", "_config", ".", "set", "(", "\"credentials\"", ",", "\"password\"", ",", "password", ")", "self", ".", "_save_config", "(", ")", "elif", "answer", "==", "\"never\"", ":", "if", "\"preferences\"", "not", "in", "self", ".", "_config", ":", "self", ".", "_config", ".", "add_section", "(", "\"preferences\"", ")", "self", ".", "_config", ".", "set", "(", "\"preferences\"", ",", "\"store-plaintext-passwords\"", ",", "\"no\"", ")", "self", ".", "_save_config", "(", ")", "return", "password" ]
Determine the user password If the password is given, this password is used. Otherwise this function will try to get the password from the user's keyring if `use_keyring` is set to True. :param username: Username (used directly if given) :type username: ``str`` :param use_config: Whether to read username from configuration file :type use_config: ``bool`` :param config_filename: Path to the configuration file :type config_filename: ``str``
[ "Determine", "the", "user", "password" ]
python
valid
webadmin87/midnight
midnight_main/services.py
https://github.com/webadmin87/midnight/blob/b60b3b257b4d633550b82a692f3ea3756c62a0a9/midnight_main/services.py#L85-L104
def get_by_page(query, page, page_size): """ Осуществляет пагинацию :param query: запрос :param page: номер страницы :param page_size: количество объектов на странице :return: """ pager = Paginator(query, page_size) try: models = pager.page(page) except PageNotAnInteger: # If page is not an integer, deliver first page. models = pager.page(1) except EmptyPage: # If page is out of range (e.g. 9999), deliver last page of results. models = pager.page(pager.num_pages) return models
[ "def", "get_by_page", "(", "query", ",", "page", ",", "page_size", ")", ":", "pager", "=", "Paginator", "(", "query", ",", "page_size", ")", "try", ":", "models", "=", "pager", ".", "page", "(", "page", ")", "except", "PageNotAnInteger", ":", "# If page is not an integer, deliver first page.", "models", "=", "pager", ".", "page", "(", "1", ")", "except", "EmptyPage", ":", "# If page is out of range (e.g. 9999), deliver last page of results.", "models", "=", "pager", ".", "page", "(", "pager", ".", "num_pages", ")", "return", "models" ]
Осуществляет пагинацию :param query: запрос :param page: номер страницы :param page_size: количество объектов на странице :return:
[ "Осуществляет", "пагинацию", ":", "param", "query", ":", "запрос", ":", "param", "page", ":", "номер", "страницы", ":", "param", "page_size", ":", "количество", "объектов", "на", "странице", ":", "return", ":" ]
python
train
delfick/harpoon
harpoon/actions.py
https://github.com/delfick/harpoon/blob/a2d39311d6127b7da2e15f40468bf320d598e461/harpoon/actions.py#L102-L112
def pull(collector, image, **kwargs): """Pull an image""" if not image.image_index: raise BadOption("The chosen image does not have a image_index configuration", wanted=image.name) tag = kwargs["artifact"] if tag is NotSpecified: collector.configuration["harpoon"].tag if tag is not NotSpecified: image.tag = tag log.info("Pulling tag: %s", tag) Syncer().pull(image, ignore_missing=image.harpoon.ignore_missing)
[ "def", "pull", "(", "collector", ",", "image", ",", "*", "*", "kwargs", ")", ":", "if", "not", "image", ".", "image_index", ":", "raise", "BadOption", "(", "\"The chosen image does not have a image_index configuration\"", ",", "wanted", "=", "image", ".", "name", ")", "tag", "=", "kwargs", "[", "\"artifact\"", "]", "if", "tag", "is", "NotSpecified", ":", "collector", ".", "configuration", "[", "\"harpoon\"", "]", ".", "tag", "if", "tag", "is", "not", "NotSpecified", ":", "image", ".", "tag", "=", "tag", "log", ".", "info", "(", "\"Pulling tag: %s\"", ",", "tag", ")", "Syncer", "(", ")", ".", "pull", "(", "image", ",", "ignore_missing", "=", "image", ".", "harpoon", ".", "ignore_missing", ")" ]
Pull an image
[ "Pull", "an", "image" ]
python
train
secynic/ipwhois
ipwhois/rdap.py
https://github.com/secynic/ipwhois/blob/b5d634d36b0b942d538d38d77b3bdcd815f155a0/ipwhois/rdap.py#L182-L212
def _parse_email(self, val): """ The function for parsing the vcard email addresses. Args: val (:obj:`list`): The value to parse. """ ret = { 'type': None, 'value': None } try: ret['type'] = val[1]['type'] except (KeyError, ValueError, TypeError): pass ret['value'] = val[3].strip() try: self.vars['email'].append(ret) except AttributeError: self.vars['email'] = [] self.vars['email'].append(ret)
[ "def", "_parse_email", "(", "self", ",", "val", ")", ":", "ret", "=", "{", "'type'", ":", "None", ",", "'value'", ":", "None", "}", "try", ":", "ret", "[", "'type'", "]", "=", "val", "[", "1", "]", "[", "'type'", "]", "except", "(", "KeyError", ",", "ValueError", ",", "TypeError", ")", ":", "pass", "ret", "[", "'value'", "]", "=", "val", "[", "3", "]", ".", "strip", "(", ")", "try", ":", "self", ".", "vars", "[", "'email'", "]", ".", "append", "(", "ret", ")", "except", "AttributeError", ":", "self", ".", "vars", "[", "'email'", "]", "=", "[", "]", "self", ".", "vars", "[", "'email'", "]", ".", "append", "(", "ret", ")" ]
The function for parsing the vcard email addresses. Args: val (:obj:`list`): The value to parse.
[ "The", "function", "for", "parsing", "the", "vcard", "email", "addresses", "." ]
python
train
teepark/greenhouse
greenhouse/scheduler.py
https://github.com/teepark/greenhouse/blob/8fd1be4f5443ba090346b5ec82fdbeb0a060d956/greenhouse/scheduler.py#L381-L442
def schedule_recurring(interval, target=None, maxtimes=0, starting_at=0, args=(), kwargs=None): """insert a greenlet into the scheduler to run regularly at an interval If provided a function, it is wrapped in a new greenlet :param interval: the number of seconds between invocations :type interval: int or float :param target: what to schedule :type target: function or greenlet :param maxtimes: if provided, do not run more than ``maxtimes`` iterations :type maxtimes: int :param starting_at: the unix timestamp of when to schedule it for the first time (defaults to the time of the ``schedule_recurring`` call) :type starting_at: int or float :param args: arguments for the function :type args: tuple :param kwargs: keyword arguments for the function :type kwargs: dict or None :returns: the ``target`` argument This function can also be used as a decorator: >>> @schedule_recurring(30) >>> def f(): ... print "the regular 'hello' from f" and args/kwargs can also be preloaded: >>> @schedule_recurring(30, args=('world',)) >>> def f(name): ... print 'the regular hello %s' % name """ starting_at = starting_at or time.time() if target is None: def decorator(target): return schedule_recurring( interval, target, maxtimes, starting_at, args, kwargs) return decorator func = target if isinstance(target, compat.greenlet) or target is compat.main_greenlet: if target.dead: raise TypeError("can't schedule a dead greenlet") func = target.run def run_and_schedule_one(tstamp, count): # pass in the time scheduled instead of just checking # time.time() so that delays don't add up if not maxtimes or count < maxtimes: tstamp += interval func(*args, **(kwargs or {})) schedule_at(tstamp, run_and_schedule_one, args=(tstamp, count + 1)) firstrun = starting_at + interval schedule_at(firstrun, run_and_schedule_one, args=(firstrun, 0)) return target
[ "def", "schedule_recurring", "(", "interval", ",", "target", "=", "None", ",", "maxtimes", "=", "0", ",", "starting_at", "=", "0", ",", "args", "=", "(", ")", ",", "kwargs", "=", "None", ")", ":", "starting_at", "=", "starting_at", "or", "time", ".", "time", "(", ")", "if", "target", "is", "None", ":", "def", "decorator", "(", "target", ")", ":", "return", "schedule_recurring", "(", "interval", ",", "target", ",", "maxtimes", ",", "starting_at", ",", "args", ",", "kwargs", ")", "return", "decorator", "func", "=", "target", "if", "isinstance", "(", "target", ",", "compat", ".", "greenlet", ")", "or", "target", "is", "compat", ".", "main_greenlet", ":", "if", "target", ".", "dead", ":", "raise", "TypeError", "(", "\"can't schedule a dead greenlet\"", ")", "func", "=", "target", ".", "run", "def", "run_and_schedule_one", "(", "tstamp", ",", "count", ")", ":", "# pass in the time scheduled instead of just checking", "# time.time() so that delays don't add up", "if", "not", "maxtimes", "or", "count", "<", "maxtimes", ":", "tstamp", "+=", "interval", "func", "(", "*", "args", ",", "*", "*", "(", "kwargs", "or", "{", "}", ")", ")", "schedule_at", "(", "tstamp", ",", "run_and_schedule_one", ",", "args", "=", "(", "tstamp", ",", "count", "+", "1", ")", ")", "firstrun", "=", "starting_at", "+", "interval", "schedule_at", "(", "firstrun", ",", "run_and_schedule_one", ",", "args", "=", "(", "firstrun", ",", "0", ")", ")", "return", "target" ]
insert a greenlet into the scheduler to run regularly at an interval If provided a function, it is wrapped in a new greenlet :param interval: the number of seconds between invocations :type interval: int or float :param target: what to schedule :type target: function or greenlet :param maxtimes: if provided, do not run more than ``maxtimes`` iterations :type maxtimes: int :param starting_at: the unix timestamp of when to schedule it for the first time (defaults to the time of the ``schedule_recurring`` call) :type starting_at: int or float :param args: arguments for the function :type args: tuple :param kwargs: keyword arguments for the function :type kwargs: dict or None :returns: the ``target`` argument This function can also be used as a decorator: >>> @schedule_recurring(30) >>> def f(): ... print "the regular 'hello' from f" and args/kwargs can also be preloaded: >>> @schedule_recurring(30, args=('world',)) >>> def f(name): ... print 'the regular hello %s' % name
[ "insert", "a", "greenlet", "into", "the", "scheduler", "to", "run", "regularly", "at", "an", "interval" ]
python
train
saltstack/salt
salt/cloud/clouds/linode.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/linode.py#L1455-L1469
def _clean_data(api_response): ''' Returns the DATA response from a Linode API query as a single pre-formatted dictionary api_response The query to be cleaned. ''' data = {} data.update(api_response['DATA']) if not data: response_data = api_response['DATA'] data.update(response_data) return data
[ "def", "_clean_data", "(", "api_response", ")", ":", "data", "=", "{", "}", "data", ".", "update", "(", "api_response", "[", "'DATA'", "]", ")", "if", "not", "data", ":", "response_data", "=", "api_response", "[", "'DATA'", "]", "data", ".", "update", "(", "response_data", ")", "return", "data" ]
Returns the DATA response from a Linode API query as a single pre-formatted dictionary api_response The query to be cleaned.
[ "Returns", "the", "DATA", "response", "from", "a", "Linode", "API", "query", "as", "a", "single", "pre", "-", "formatted", "dictionary" ]
python
train
netleibi/fastchunking
fastchunking/__init__.py
https://github.com/netleibi/fastchunking/blob/069b7689d26bc067120907f01d9453ab3d2efa74/fastchunking/__init__.py#L214-L230
def create_multilevel_chunker(self, chunk_sizes): """Create a multi-level chunker performing content-defined chunking (CDC) using Rabin Karp's rolling hash scheme with different specific, expected chunk sizes. Args: chunk_sizes (list): List of (expected) target chunk sizes. Warning: For performance reasons, behavior is only defined if chunk sizes are passed in order, i.e., from lowest to highest value. Returns: BaseMultiLevelChunker: A multi-level chunker object. """ rolling_hash = _rabinkarprh.RabinKarpMultiThresholdHash(self.window_size, self._seed, [1.0 / chunk_size for chunk_size in chunk_sizes]) return RabinKarpCDC._MultiLevelChunker(rolling_hash)
[ "def", "create_multilevel_chunker", "(", "self", ",", "chunk_sizes", ")", ":", "rolling_hash", "=", "_rabinkarprh", ".", "RabinKarpMultiThresholdHash", "(", "self", ".", "window_size", ",", "self", ".", "_seed", ",", "[", "1.0", "/", "chunk_size", "for", "chunk_size", "in", "chunk_sizes", "]", ")", "return", "RabinKarpCDC", ".", "_MultiLevelChunker", "(", "rolling_hash", ")" ]
Create a multi-level chunker performing content-defined chunking (CDC) using Rabin Karp's rolling hash scheme with different specific, expected chunk sizes. Args: chunk_sizes (list): List of (expected) target chunk sizes. Warning: For performance reasons, behavior is only defined if chunk sizes are passed in order, i.e., from lowest to highest value. Returns: BaseMultiLevelChunker: A multi-level chunker object.
[ "Create", "a", "multi", "-", "level", "chunker", "performing", "content", "-", "defined", "chunking", "(", "CDC", ")", "using", "Rabin", "Karp", "s", "rolling", "hash", "scheme", "with", "different", "specific", "expected", "chunk", "sizes", "." ]
python
valid
dpgaspar/Flask-AppBuilder
flask_appbuilder/api/manager.py
https://github.com/dpgaspar/Flask-AppBuilder/blob/c293734c1b86e176a3ba57ee2deab6676d125576/flask_appbuilder/api/manager.py#L18-L49
def get(self, version): """ Endpoint that renders an OpenApi spec for all views that belong to a certain version --- get: parameters: - in: path schema: type: string name: version responses: 200: description: Item from Model content: application/json: schema: type: object 404: $ref: '#/components/responses/404' 500: $ref: '#/components/responses/500' """ version_found = False api_spec = self._create_api_spec(version) for base_api in current_app.appbuilder.baseviews: if isinstance(base_api, BaseApi) and base_api.version == version: base_api.add_api_spec(api_spec) version_found = True if version_found: return self.response(200, **api_spec.to_dict()) else: return self.response_404()
[ "def", "get", "(", "self", ",", "version", ")", ":", "version_found", "=", "False", "api_spec", "=", "self", ".", "_create_api_spec", "(", "version", ")", "for", "base_api", "in", "current_app", ".", "appbuilder", ".", "baseviews", ":", "if", "isinstance", "(", "base_api", ",", "BaseApi", ")", "and", "base_api", ".", "version", "==", "version", ":", "base_api", ".", "add_api_spec", "(", "api_spec", ")", "version_found", "=", "True", "if", "version_found", ":", "return", "self", ".", "response", "(", "200", ",", "*", "*", "api_spec", ".", "to_dict", "(", ")", ")", "else", ":", "return", "self", ".", "response_404", "(", ")" ]
Endpoint that renders an OpenApi spec for all views that belong to a certain version --- get: parameters: - in: path schema: type: string name: version responses: 200: description: Item from Model content: application/json: schema: type: object 404: $ref: '#/components/responses/404' 500: $ref: '#/components/responses/500'
[ "Endpoint", "that", "renders", "an", "OpenApi", "spec", "for", "all", "views", "that", "belong", "to", "a", "certain", "version", "---", "get", ":", "parameters", ":", "-", "in", ":", "path", "schema", ":", "type", ":", "string", "name", ":", "version", "responses", ":", "200", ":", "description", ":", "Item", "from", "Model", "content", ":", "application", "/", "json", ":", "schema", ":", "type", ":", "object", "404", ":", "$ref", ":", "#", "/", "components", "/", "responses", "/", "404", "500", ":", "$ref", ":", "#", "/", "components", "/", "responses", "/", "500" ]
python
train
ewels/MultiQC
multiqc/modules/damageprofiler/damageprofiler.py
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/damageprofiler/damageprofiler.py#L242-L267
def threeprime_plot(self): """Generate a 3' G>A linegraph plot""" data = dict() dict_to_add = dict() # Create tuples out of entries for key in self.threepGtoAfreq_data: pos = list(range(1,len(self.threepGtoAfreq_data.get(key)))) #Multiply values by 100 to get % tmp = [i * 100.0 for i in self.threepGtoAfreq_data.get(key)] tuples = list(zip(pos,tmp)) # Get a dictionary out of it data = dict((x, y) for x, y in tuples) dict_to_add[key] = data config = { 'id': 'threeprime_misinc_plot', 'title': 'DamageProfiler: 3P G>A misincorporation plot', 'ylab': '% G to A substituted', 'xlab': 'Nucleotide position from 3\'', 'tt_label': '{point.y:.2f} % G>A misincorporations at nucleotide position {point.x}', 'ymin': 0, 'xmin': 1 } return linegraph.plot(dict_to_add,config)
[ "def", "threeprime_plot", "(", "self", ")", ":", "data", "=", "dict", "(", ")", "dict_to_add", "=", "dict", "(", ")", "# Create tuples out of entries", "for", "key", "in", "self", ".", "threepGtoAfreq_data", ":", "pos", "=", "list", "(", "range", "(", "1", ",", "len", "(", "self", ".", "threepGtoAfreq_data", ".", "get", "(", "key", ")", ")", ")", ")", "#Multiply values by 100 to get %", "tmp", "=", "[", "i", "*", "100.0", "for", "i", "in", "self", ".", "threepGtoAfreq_data", ".", "get", "(", "key", ")", "]", "tuples", "=", "list", "(", "zip", "(", "pos", ",", "tmp", ")", ")", "# Get a dictionary out of it", "data", "=", "dict", "(", "(", "x", ",", "y", ")", "for", "x", ",", "y", "in", "tuples", ")", "dict_to_add", "[", "key", "]", "=", "data", "config", "=", "{", "'id'", ":", "'threeprime_misinc_plot'", ",", "'title'", ":", "'DamageProfiler: 3P G>A misincorporation plot'", ",", "'ylab'", ":", "'% G to A substituted'", ",", "'xlab'", ":", "'Nucleotide position from 3\\''", ",", "'tt_label'", ":", "'{point.y:.2f} % G>A misincorporations at nucleotide position {point.x}'", ",", "'ymin'", ":", "0", ",", "'xmin'", ":", "1", "}", "return", "linegraph", ".", "plot", "(", "dict_to_add", ",", "config", ")" ]
Generate a 3' G>A linegraph plot
[ "Generate", "a", "3", "G", ">", "A", "linegraph", "plot" ]
python
train
Erotemic/utool
utool/util_numpy.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_numpy.py#L55-L81
def _pystate_to_npstate(pystate): """ Convert state of a Python Random object to state usable by NumPy RandomState. References: https://stackoverflow.com/questions/44313620/converting-randomstate Example: >>> # ENABLE_DOCTEST >>> from utool.util_numpy import * # NOQA >>> from utool.util_numpy import _pystate_to_npstate >>> py_rng = random.Random(0) >>> np_rng = np.random.RandomState(seed=0) >>> pystate = py_rng.getstate() >>> npstate = _pystate_to_npstate(pystate) >>> np_rng.set_state(npstate) >>> assert np_rng.rand() == py_rng.random() """ NP_VERSION = 'MT19937' version, keys_pos_, cached_gaussian_ = pystate keys, pos = keys_pos_[:-1], keys_pos_[-1] keys = np.array(keys, dtype=np.uint32) has_gauss = cached_gaussian_ is not None cached_gaussian = cached_gaussian_ if has_gauss else 0.0 npstate = (NP_VERSION, keys, pos, has_gauss, cached_gaussian) return npstate
[ "def", "_pystate_to_npstate", "(", "pystate", ")", ":", "NP_VERSION", "=", "'MT19937'", "version", ",", "keys_pos_", ",", "cached_gaussian_", "=", "pystate", "keys", ",", "pos", "=", "keys_pos_", "[", ":", "-", "1", "]", ",", "keys_pos_", "[", "-", "1", "]", "keys", "=", "np", ".", "array", "(", "keys", ",", "dtype", "=", "np", ".", "uint32", ")", "has_gauss", "=", "cached_gaussian_", "is", "not", "None", "cached_gaussian", "=", "cached_gaussian_", "if", "has_gauss", "else", "0.0", "npstate", "=", "(", "NP_VERSION", ",", "keys", ",", "pos", ",", "has_gauss", ",", "cached_gaussian", ")", "return", "npstate" ]
Convert state of a Python Random object to state usable by NumPy RandomState. References: https://stackoverflow.com/questions/44313620/converting-randomstate Example: >>> # ENABLE_DOCTEST >>> from utool.util_numpy import * # NOQA >>> from utool.util_numpy import _pystate_to_npstate >>> py_rng = random.Random(0) >>> np_rng = np.random.RandomState(seed=0) >>> pystate = py_rng.getstate() >>> npstate = _pystate_to_npstate(pystate) >>> np_rng.set_state(npstate) >>> assert np_rng.rand() == py_rng.random()
[ "Convert", "state", "of", "a", "Python", "Random", "object", "to", "state", "usable", "by", "NumPy", "RandomState", "." ]
python
train
loli/medpy
medpy/graphcut/wrapper.py
https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/graphcut/wrapper.py#L203-L262
def graphcut_stawiaski(regions, gradient = False, foreground = False, background = False): """ Executes a Stawiaski label graph cut. Parameters ---------- regions : ndarray The regions image / label map. gradient : ndarray The gradient image. foreground : ndarray The foreground markers. background : ndarray The background markers. Returns ------- segmentation : ndarray The graph-cut segmentation result as boolean array. Raises ------ ArgumentError When the supplied data is erroneous. """ # initialize logger logger = Logger.getInstance() # unpack images if required # !TODO: This is an ugly hack, especially since it can be seen inside the function definition # How to overcome this, since I can not use a wrapper function as the whole thing must be pickable if not gradient and not foreground and not background: regions, gradient, foreground, background = regions # ensure that input images are scipy arrays img_region = scipy.asarray(regions) img_gradient = scipy.asarray(gradient) img_fg = scipy.asarray(foreground, dtype=scipy.bool_) img_bg = scipy.asarray(background, dtype=scipy.bool_) # ensure correctness of supplied images if not (img_region.shape == img_gradient.shape == img_fg.shape == img_bg.shape): raise ArgumentError('All supplied images must be of the same shape.') # recompute the label ids to start from id = 1 img_region = relabel(img_region) # generate graph gcgraph = graph_from_labels(img_region, img_fg, img_bg, boundary_term = boundary_stawiaski, boundary_term_args = (img_gradient)) # execute min-cut maxflow = gcgraph.maxflow() # executes the cut and returns the maxflow value logger.debug('Graph-cut terminated successfully with maxflow of {}.'.format(maxflow)) # apply results to the region image mapping = [0] # no regions with id 1 exists in mapping, entry used as padding mapping.extend([0 if gcgraph.termtype.SINK == gcgraph.what_segment(int(x) - 1) else 1 for x in scipy.unique(img_region)]) img_results = relabel_map(img_region, mapping) return img_results.astype(scipy.bool_)
[ "def", "graphcut_stawiaski", "(", "regions", ",", "gradient", "=", "False", ",", "foreground", "=", "False", ",", "background", "=", "False", ")", ":", "# initialize logger", "logger", "=", "Logger", ".", "getInstance", "(", ")", "# unpack images if required", "# !TODO: This is an ugly hack, especially since it can be seen inside the function definition", "# How to overcome this, since I can not use a wrapper function as the whole thing must be pickable", "if", "not", "gradient", "and", "not", "foreground", "and", "not", "background", ":", "regions", ",", "gradient", ",", "foreground", ",", "background", "=", "regions", "# ensure that input images are scipy arrays", "img_region", "=", "scipy", ".", "asarray", "(", "regions", ")", "img_gradient", "=", "scipy", ".", "asarray", "(", "gradient", ")", "img_fg", "=", "scipy", ".", "asarray", "(", "foreground", ",", "dtype", "=", "scipy", ".", "bool_", ")", "img_bg", "=", "scipy", ".", "asarray", "(", "background", ",", "dtype", "=", "scipy", ".", "bool_", ")", "# ensure correctness of supplied images", "if", "not", "(", "img_region", ".", "shape", "==", "img_gradient", ".", "shape", "==", "img_fg", ".", "shape", "==", "img_bg", ".", "shape", ")", ":", "raise", "ArgumentError", "(", "'All supplied images must be of the same shape.'", ")", "# recompute the label ids to start from id = 1", "img_region", "=", "relabel", "(", "img_region", ")", "# generate graph", "gcgraph", "=", "graph_from_labels", "(", "img_region", ",", "img_fg", ",", "img_bg", ",", "boundary_term", "=", "boundary_stawiaski", ",", "boundary_term_args", "=", "(", "img_gradient", ")", ")", "# execute min-cut", "maxflow", "=", "gcgraph", ".", "maxflow", "(", ")", "# executes the cut and returns the maxflow value", "logger", ".", "debug", "(", "'Graph-cut terminated successfully with maxflow of {}.'", ".", "format", "(", "maxflow", ")", ")", "# apply results to the region image", "mapping", "=", "[", "0", "]", "# no regions with id 1 exists in mapping, entry used as padding", "mapping", ".", "extend", "(", "[", "0", "if", "gcgraph", ".", "termtype", ".", "SINK", "==", "gcgraph", ".", "what_segment", "(", "int", "(", "x", ")", "-", "1", ")", "else", "1", "for", "x", "in", "scipy", ".", "unique", "(", "img_region", ")", "]", ")", "img_results", "=", "relabel_map", "(", "img_region", ",", "mapping", ")", "return", "img_results", ".", "astype", "(", "scipy", ".", "bool_", ")" ]
Executes a Stawiaski label graph cut. Parameters ---------- regions : ndarray The regions image / label map. gradient : ndarray The gradient image. foreground : ndarray The foreground markers. background : ndarray The background markers. Returns ------- segmentation : ndarray The graph-cut segmentation result as boolean array. Raises ------ ArgumentError When the supplied data is erroneous.
[ "Executes", "a", "Stawiaski", "label", "graph", "cut", ".", "Parameters", "----------", "regions", ":", "ndarray", "The", "regions", "image", "/", "label", "map", ".", "gradient", ":", "ndarray", "The", "gradient", "image", ".", "foreground", ":", "ndarray", "The", "foreground", "markers", ".", "background", ":", "ndarray", "The", "background", "markers", ".", "Returns", "-------", "segmentation", ":", "ndarray", "The", "graph", "-", "cut", "segmentation", "result", "as", "boolean", "array", ".", "Raises", "------", "ArgumentError", "When", "the", "supplied", "data", "is", "erroneous", "." ]
python
train
allenai/allennlp
allennlp/training/scheduler.py
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/training/scheduler.py#L49-L53
def state_dict(self) -> Dict[str, Any]: """ Returns the state of the scheduler as a ``dict``. """ return {key: value for key, value in self.__dict__.items() if key != 'optimizer'}
[ "def", "state_dict", "(", "self", ")", "->", "Dict", "[", "str", ",", "Any", "]", ":", "return", "{", "key", ":", "value", "for", "key", ",", "value", "in", "self", ".", "__dict__", ".", "items", "(", ")", "if", "key", "!=", "'optimizer'", "}" ]
Returns the state of the scheduler as a ``dict``.
[ "Returns", "the", "state", "of", "the", "scheduler", "as", "a", "dict", "." ]
python
train
websocket-client/websocket-client
websocket/_core.py
https://github.com/websocket-client/websocket-client/blob/3c25814664fef5b78716ed8841123ed1c0d17824/websocket/_core.py#L138-L146
def settimeout(self, timeout): """ Set the timeout to the websocket. timeout: timeout time(second). """ self.sock_opt.timeout = timeout if self.sock: self.sock.settimeout(timeout)
[ "def", "settimeout", "(", "self", ",", "timeout", ")", ":", "self", ".", "sock_opt", ".", "timeout", "=", "timeout", "if", "self", ".", "sock", ":", "self", ".", "sock", ".", "settimeout", "(", "timeout", ")" ]
Set the timeout to the websocket. timeout: timeout time(second).
[ "Set", "the", "timeout", "to", "the", "websocket", "." ]
python
train
KrishnaswamyLab/graphtools
graphtools/base.py
https://github.com/KrishnaswamyLab/graphtools/blob/44685352be7df2005d44722903092207967457f2/graphtools/base.py#L494-L510
def P(self): """Diffusion operator (cached) Return or calculate the diffusion operator Returns ------- P : array-like, shape=[n_samples, n_samples] diffusion operator defined as a row-stochastic form of the kernel matrix """ try: return self._diff_op except AttributeError: self._diff_op = normalize(self.kernel, 'l1', axis=1) return self._diff_op
[ "def", "P", "(", "self", ")", ":", "try", ":", "return", "self", ".", "_diff_op", "except", "AttributeError", ":", "self", ".", "_diff_op", "=", "normalize", "(", "self", ".", "kernel", ",", "'l1'", ",", "axis", "=", "1", ")", "return", "self", ".", "_diff_op" ]
Diffusion operator (cached) Return or calculate the diffusion operator Returns ------- P : array-like, shape=[n_samples, n_samples] diffusion operator defined as a row-stochastic form of the kernel matrix
[ "Diffusion", "operator", "(", "cached", ")" ]
python
train
peri-source/peri
peri/opt/optimize.py
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L1684-L1707
def find_expected_error(self, delta_params='calc', adjust=True): """ Returns the error expected after an update if the model were linear. Parameters ---------- delta_params : {numpy.ndarray, 'calc', or 'perfect'}, optional The relative change in parameters. If 'calc', uses update calculated from the current damping, J, etc; if 'perfect', uses the update calculated with zero damping. Returns ------- numpy.float64 The expected error after the update with `delta_params` """ expected_error = super(LMGlobals, self).find_expected_error( delta_params=delta_params) if adjust: #adjust for num_pix derr = (expected_error - self.error) * (self.state.residuals.size / float(self.num_pix)) expected_error = self.error + derr return expected_error
[ "def", "find_expected_error", "(", "self", ",", "delta_params", "=", "'calc'", ",", "adjust", "=", "True", ")", ":", "expected_error", "=", "super", "(", "LMGlobals", ",", "self", ")", ".", "find_expected_error", "(", "delta_params", "=", "delta_params", ")", "if", "adjust", ":", "#adjust for num_pix", "derr", "=", "(", "expected_error", "-", "self", ".", "error", ")", "*", "(", "self", ".", "state", ".", "residuals", ".", "size", "/", "float", "(", "self", ".", "num_pix", ")", ")", "expected_error", "=", "self", ".", "error", "+", "derr", "return", "expected_error" ]
Returns the error expected after an update if the model were linear. Parameters ---------- delta_params : {numpy.ndarray, 'calc', or 'perfect'}, optional The relative change in parameters. If 'calc', uses update calculated from the current damping, J, etc; if 'perfect', uses the update calculated with zero damping. Returns ------- numpy.float64 The expected error after the update with `delta_params`
[ "Returns", "the", "error", "expected", "after", "an", "update", "if", "the", "model", "were", "linear", "." ]
python
valid
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Environment.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Environment.py#L2027-L2039
def Execute(self, action, *args, **kw): """Directly execute an action through an Environment """ action = self.Action(action, *args, **kw) result = action([], [], self) if isinstance(result, SCons.Errors.BuildError): errstr = result.errstr if result.filename: errstr = result.filename + ': ' + errstr sys.stderr.write("scons: *** %s\n" % errstr) return result.status else: return result
[ "def", "Execute", "(", "self", ",", "action", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "action", "=", "self", ".", "Action", "(", "action", ",", "*", "args", ",", "*", "*", "kw", ")", "result", "=", "action", "(", "[", "]", ",", "[", "]", ",", "self", ")", "if", "isinstance", "(", "result", ",", "SCons", ".", "Errors", ".", "BuildError", ")", ":", "errstr", "=", "result", ".", "errstr", "if", "result", ".", "filename", ":", "errstr", "=", "result", ".", "filename", "+", "': '", "+", "errstr", "sys", ".", "stderr", ".", "write", "(", "\"scons: *** %s\\n\"", "%", "errstr", ")", "return", "result", ".", "status", "else", ":", "return", "result" ]
Directly execute an action through an Environment
[ "Directly", "execute", "an", "action", "through", "an", "Environment" ]
python
train
rGunti/CarPi-OBDDaemon
obddaemon/custom/Obd2DataParser.py
https://github.com/rGunti/CarPi-OBDDaemon/blob/6831c477b2a00617a0d2ea98b28f3bc5c1ba8e5f/obddaemon/custom/Obd2DataParser.py#L102-L118
def parse_obj(o): """ Parses a given dictionary with the key being the OBD PID and the value its returned value by the OBD interface :param dict o: :return: """ r = {} for k, v in o.items(): if is_unable_to_connect(v): r[k] = None try: r[k] = parse_value(k, v) except (ObdPidParserUnknownError, AttributeError, TypeError): r[k] = None return r
[ "def", "parse_obj", "(", "o", ")", ":", "r", "=", "{", "}", "for", "k", ",", "v", "in", "o", ".", "items", "(", ")", ":", "if", "is_unable_to_connect", "(", "v", ")", ":", "r", "[", "k", "]", "=", "None", "try", ":", "r", "[", "k", "]", "=", "parse_value", "(", "k", ",", "v", ")", "except", "(", "ObdPidParserUnknownError", ",", "AttributeError", ",", "TypeError", ")", ":", "r", "[", "k", "]", "=", "None", "return", "r" ]
Parses a given dictionary with the key being the OBD PID and the value its returned value by the OBD interface :param dict o: :return:
[ "Parses", "a", "given", "dictionary", "with", "the", "key", "being", "the", "OBD", "PID", "and", "the", "value", "its", "returned", "value", "by", "the", "OBD", "interface", ":", "param", "dict", "o", ":", ":", "return", ":" ]
python
train
SuperCowPowers/chains
chains/sources/packet_streamer.py
https://github.com/SuperCowPowers/chains/blob/b0227847b0c43083b456f0bae52daee0b62a3e03/chains/sources/packet_streamer.py#L64-L114
def read_interface(self): """Read Packets from the packet capture interface""" # Spin up the packet capture if self._iface_is_file(): self.pcap = pcapy.open_offline(self.iface_name) else: try: # self.pcap = pcap.pcap(name=self.iface_name, promisc=True, immediate=True) # snaplen (maximum number of bytes to capture _per_packet_) # promiscious mode (1 for true) # timeout (in milliseconds) self.pcap = pcapy.open_live(self.iface_name, 65536 , 1 , 0) except OSError: try: logger.warning('Could not get promisc mode, turning flag off') self.pcap = pcapy.open_live(self.iface_name, 65536 , 0 , 0) except OSError: log_utils.panic('Could no open interface with any options (may need to be sudo)') # Add the BPF if it's specified if self.bpf: self.pcap.setfilter(self.bpf) print('listening on %s: %s' % (self.iface_name, self.bpf)) # For each packet in the pcap process the contents _packets = 0 while True: # Grab the next header and packet buffer header, raw_buf = self.pcap.next() # If we don't get a packet header break out of the loop if not header: break; # Extract the timestamp from the header and yield the packet seconds, micro_sec = header.getts() timestamp = seconds + micro_sec * 10**-6 yield {'timestamp': timestamp, 'raw_buf': raw_buf, 'packet_num': _packets} _packets += 1 # Is there a max packets set if so break on it if self.max_packets and _packets >= self.max_packets: break # All done so report and raise a StopIteration try: print('Packet stats: %d received, %d dropped, %d dropped by interface' % self.pcap.stats()) except pcapy.PcapError: print('No stats available...') raise StopIteration
[ "def", "read_interface", "(", "self", ")", ":", "# Spin up the packet capture", "if", "self", ".", "_iface_is_file", "(", ")", ":", "self", ".", "pcap", "=", "pcapy", ".", "open_offline", "(", "self", ".", "iface_name", ")", "else", ":", "try", ":", "# self.pcap = pcap.pcap(name=self.iface_name, promisc=True, immediate=True)", "# snaplen (maximum number of bytes to capture _per_packet_)", "# promiscious mode (1 for true)", "# timeout (in milliseconds)", "self", ".", "pcap", "=", "pcapy", ".", "open_live", "(", "self", ".", "iface_name", ",", "65536", ",", "1", ",", "0", ")", "except", "OSError", ":", "try", ":", "logger", ".", "warning", "(", "'Could not get promisc mode, turning flag off'", ")", "self", ".", "pcap", "=", "pcapy", ".", "open_live", "(", "self", ".", "iface_name", ",", "65536", ",", "0", ",", "0", ")", "except", "OSError", ":", "log_utils", ".", "panic", "(", "'Could no open interface with any options (may need to be sudo)'", ")", "# Add the BPF if it's specified", "if", "self", ".", "bpf", ":", "self", ".", "pcap", ".", "setfilter", "(", "self", ".", "bpf", ")", "print", "(", "'listening on %s: %s'", "%", "(", "self", ".", "iface_name", ",", "self", ".", "bpf", ")", ")", "# For each packet in the pcap process the contents", "_packets", "=", "0", "while", "True", ":", "# Grab the next header and packet buffer", "header", ",", "raw_buf", "=", "self", ".", "pcap", ".", "next", "(", ")", "# If we don't get a packet header break out of the loop", "if", "not", "header", ":", "break", "# Extract the timestamp from the header and yield the packet", "seconds", ",", "micro_sec", "=", "header", ".", "getts", "(", ")", "timestamp", "=", "seconds", "+", "micro_sec", "*", "10", "**", "-", "6", "yield", "{", "'timestamp'", ":", "timestamp", ",", "'raw_buf'", ":", "raw_buf", ",", "'packet_num'", ":", "_packets", "}", "_packets", "+=", "1", "# Is there a max packets set if so break on it", "if", "self", ".", "max_packets", "and", "_packets", ">=", "self", ".", "max_packets", ":", "break", "# All done so report and raise a StopIteration", "try", ":", "print", "(", "'Packet stats: %d received, %d dropped, %d dropped by interface'", "%", "self", ".", "pcap", ".", "stats", "(", ")", ")", "except", "pcapy", ".", "PcapError", ":", "print", "(", "'No stats available...'", ")", "raise", "StopIteration" ]
Read Packets from the packet capture interface
[ "Read", "Packets", "from", "the", "packet", "capture", "interface" ]
python
train
dw/mitogen
mitogen/core.py
https://github.com/dw/mitogen/blob/a7fdb55e1300a7e0a5e404b09eb730cf9a525da7/mitogen/core.py#L1992-L1997
def start_transmit(self, fd, data=None): """ Cause :meth:`poll` to yield `data` when `fd` is writeable. """ self._wfds[fd] = (data or fd, self._generation) self._update(fd)
[ "def", "start_transmit", "(", "self", ",", "fd", ",", "data", "=", "None", ")", ":", "self", ".", "_wfds", "[", "fd", "]", "=", "(", "data", "or", "fd", ",", "self", ".", "_generation", ")", "self", ".", "_update", "(", "fd", ")" ]
Cause :meth:`poll` to yield `data` when `fd` is writeable.
[ "Cause", ":", "meth", ":", "poll", "to", "yield", "data", "when", "fd", "is", "writeable", "." ]
python
train
intuition-io/intuition
intuition/finance.py
https://github.com/intuition-io/intuition/blob/cd517e6b3b315a743eb4d0d0dc294e264ab913ce/intuition/finance.py#L116-L136
def average_returns(ts, **kwargs): ''' Compute geometric average returns from a returns time serie''' average_type = kwargs.get('type', 'net') if average_type == 'net': relative = 0 else: relative = -1 # gross #start = kwargs.get('start', ts.index[0]) #end = kwargs.get('end', ts.index[len(ts.index) - 1]) #delta = kwargs.get('delta', ts.index[1] - ts.index[0]) period = kwargs.get('period', None) if isinstance(period, int): pass #else: #ts = reIndexDF(ts, start=start, end=end, delta=delta) #period = 1 avg_ret = 1 for idx in range(len(ts.index)): if idx % period == 0: avg_ret *= (1 + ts[idx] + relative) return avg_ret - 1
[ "def", "average_returns", "(", "ts", ",", "*", "*", "kwargs", ")", ":", "average_type", "=", "kwargs", ".", "get", "(", "'type'", ",", "'net'", ")", "if", "average_type", "==", "'net'", ":", "relative", "=", "0", "else", ":", "relative", "=", "-", "1", "# gross", "#start = kwargs.get('start', ts.index[0])", "#end = kwargs.get('end', ts.index[len(ts.index) - 1])", "#delta = kwargs.get('delta', ts.index[1] - ts.index[0])", "period", "=", "kwargs", ".", "get", "(", "'period'", ",", "None", ")", "if", "isinstance", "(", "period", ",", "int", ")", ":", "pass", "#else:", "#ts = reIndexDF(ts, start=start, end=end, delta=delta)", "#period = 1", "avg_ret", "=", "1", "for", "idx", "in", "range", "(", "len", "(", "ts", ".", "index", ")", ")", ":", "if", "idx", "%", "period", "==", "0", ":", "avg_ret", "*=", "(", "1", "+", "ts", "[", "idx", "]", "+", "relative", ")", "return", "avg_ret", "-", "1" ]
Compute geometric average returns from a returns time serie
[ "Compute", "geometric", "average", "returns", "from", "a", "returns", "time", "serie" ]
python
train
Asana/python-asana
asana/resources/gen/custom_fields.py
https://github.com/Asana/python-asana/blob/6deb7a34495db23f44858e53b6bb2c9eccff7872/asana/resources/gen/custom_fields.py#L73-L83
def delete(self, custom_field, params={}, **options): """A specific, existing custom field can be deleted by making a DELETE request on the URL for that custom field. Returns an empty data record. Parameters ---------- custom_field : {Id} Globally unique identifier for the custom field. """ path = "/custom_fields/%s" % (custom_field) return self.client.delete(path, params, **options)
[ "def", "delete", "(", "self", ",", "custom_field", ",", "params", "=", "{", "}", ",", "*", "*", "options", ")", ":", "path", "=", "\"/custom_fields/%s\"", "%", "(", "custom_field", ")", "return", "self", ".", "client", ".", "delete", "(", "path", ",", "params", ",", "*", "*", "options", ")" ]
A specific, existing custom field can be deleted by making a DELETE request on the URL for that custom field. Returns an empty data record. Parameters ---------- custom_field : {Id} Globally unique identifier for the custom field.
[ "A", "specific", "existing", "custom", "field", "can", "be", "deleted", "by", "making", "a", "DELETE", "request", "on", "the", "URL", "for", "that", "custom", "field", ".", "Returns", "an", "empty", "data", "record", "." ]
python
train
nerdvegas/rez
src/rez/util.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/util.py#L63-L80
def create_forwarding_script(filepath, module, func_name, *nargs, **kwargs): """Create a 'forwarding' script. A forwarding script is one that executes some arbitrary Rez function. This is used internally by Rez to dynamically create a script that uses Rez, even though the parent environment may not be configured to do so. """ doc = dict( module=module, func_name=func_name) if nargs: doc["nargs"] = nargs if kwargs: doc["kwargs"] = kwargs body = dump_yaml(doc) create_executable_script(filepath, body, "_rez_fwd")
[ "def", "create_forwarding_script", "(", "filepath", ",", "module", ",", "func_name", ",", "*", "nargs", ",", "*", "*", "kwargs", ")", ":", "doc", "=", "dict", "(", "module", "=", "module", ",", "func_name", "=", "func_name", ")", "if", "nargs", ":", "doc", "[", "\"nargs\"", "]", "=", "nargs", "if", "kwargs", ":", "doc", "[", "\"kwargs\"", "]", "=", "kwargs", "body", "=", "dump_yaml", "(", "doc", ")", "create_executable_script", "(", "filepath", ",", "body", ",", "\"_rez_fwd\"", ")" ]
Create a 'forwarding' script. A forwarding script is one that executes some arbitrary Rez function. This is used internally by Rez to dynamically create a script that uses Rez, even though the parent environment may not be configured to do so.
[ "Create", "a", "forwarding", "script", "." ]
python
train
bapakode/OmMongo
ommongo/query.py
https://github.com/bapakode/OmMongo/blob/52b5a5420516dc709f2d2eb065818c7973991ce3/ommongo/query.py#L337-L347
def in_(self, qfield, *values): ''' Check to see that the value of ``qfield`` is one of ``values`` :param qfield: Instances of :class:`ommongo.query_expression.QueryExpression` :param values: Values should be python values which ``qfield`` \ understands ''' # TODO: make sure that this field represents a list qfield = resolve_name(self.type, qfield) self.filter(QueryExpression({ qfield : { '$in' : [qfield.wrap_value(value) for value in values]}})) return self
[ "def", "in_", "(", "self", ",", "qfield", ",", "*", "values", ")", ":", "# TODO: make sure that this field represents a list", "qfield", "=", "resolve_name", "(", "self", ".", "type", ",", "qfield", ")", "self", ".", "filter", "(", "QueryExpression", "(", "{", "qfield", ":", "{", "'$in'", ":", "[", "qfield", ".", "wrap_value", "(", "value", ")", "for", "value", "in", "values", "]", "}", "}", ")", ")", "return", "self" ]
Check to see that the value of ``qfield`` is one of ``values`` :param qfield: Instances of :class:`ommongo.query_expression.QueryExpression` :param values: Values should be python values which ``qfield`` \ understands
[ "Check", "to", "see", "that", "the", "value", "of", "qfield", "is", "one", "of", "values" ]
python
train
materialsproject/pymatgen
pymatgen/io/abinit/pseudos.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/pseudos.py#L300-L304
def djrepo_path(self): """The path of the djrepo file. None if file does not exist.""" root, ext = os.path.splitext(self.filepath) path = root + ".djrepo" return path
[ "def", "djrepo_path", "(", "self", ")", ":", "root", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "self", ".", "filepath", ")", "path", "=", "root", "+", "\".djrepo\"", "return", "path" ]
The path of the djrepo file. None if file does not exist.
[ "The", "path", "of", "the", "djrepo", "file", ".", "None", "if", "file", "does", "not", "exist", "." ]
python
train
PSPC-SPAC-buyandsell/von_anchor
von_anchor/nodepool/protocol.py
https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/nodepool/protocol.py#L43-L48
def get(version: str) -> 'Protocol': """ Return enum instance corresponding to input version value ('1.6' etc.) """ return Protocol.V_13 if version == Protocol.V_13.value.name else Protocol.DEFAULT
[ "def", "get", "(", "version", ":", "str", ")", "->", "'Protocol'", ":", "return", "Protocol", ".", "V_13", "if", "version", "==", "Protocol", ".", "V_13", ".", "value", ".", "name", "else", "Protocol", ".", "DEFAULT" ]
Return enum instance corresponding to input version value ('1.6' etc.)
[ "Return", "enum", "instance", "corresponding", "to", "input", "version", "value", "(", "1", ".", "6", "etc", ".", ")" ]
python
train
RedFantom/ttkwidgets
ttkwidgets/itemscanvas.py
https://github.com/RedFantom/ttkwidgets/blob/02150322060f867b6e59a175522ef84b09168019/ttkwidgets/itemscanvas.py#L75-L93
def left_press(self, event): """ Callback for the press of the left mouse button. Selects a new item and sets its highlightcolor. :param event: Tkinter event """ self.current_coords = self.canvas.canvasx(event.x), self.canvas.canvasy(event.y) self.set_current() if self.current: self.canvas.itemconfigure(self.current, fill=self.item_colors[self.current][1]) self.current = None return results = self.canvas.find_withtag(tk.CURRENT) if len(results) is 0: return self.current = results[0] self.canvas.itemconfigure(self.current, fill=self.item_colors[self.current][2])
[ "def", "left_press", "(", "self", ",", "event", ")", ":", "self", ".", "current_coords", "=", "self", ".", "canvas", ".", "canvasx", "(", "event", ".", "x", ")", ",", "self", ".", "canvas", ".", "canvasy", "(", "event", ".", "y", ")", "self", ".", "set_current", "(", ")", "if", "self", ".", "current", ":", "self", ".", "canvas", ".", "itemconfigure", "(", "self", ".", "current", ",", "fill", "=", "self", ".", "item_colors", "[", "self", ".", "current", "]", "[", "1", "]", ")", "self", ".", "current", "=", "None", "return", "results", "=", "self", ".", "canvas", ".", "find_withtag", "(", "tk", ".", "CURRENT", ")", "if", "len", "(", "results", ")", "is", "0", ":", "return", "self", ".", "current", "=", "results", "[", "0", "]", "self", ".", "canvas", ".", "itemconfigure", "(", "self", ".", "current", ",", "fill", "=", "self", ".", "item_colors", "[", "self", ".", "current", "]", "[", "2", "]", ")" ]
Callback for the press of the left mouse button. Selects a new item and sets its highlightcolor. :param event: Tkinter event
[ "Callback", "for", "the", "press", "of", "the", "left", "mouse", "button", "." ]
python
train
ungarj/mapchete
mapchete/io/vector.py
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/io/vector.py#L221-L292
def write_vector_window( in_data=None, out_schema=None, out_tile=None, out_path=None, bucket_resource=None ): """ Write features to GeoJSON file. Parameters ---------- in_data : features out_schema : dictionary output schema for fiona out_tile : ``BufferedTile`` tile used for output extent out_path : string output path for GeoJSON file """ # Delete existing file. try: os.remove(out_path) except OSError: pass out_features = [] for feature in in_data: try: # clip feature geometry to tile bounding box and append for writing # if clipped feature still for out_geom in multipart_to_singleparts( clean_geometry_type( to_shape(feature["geometry"]).intersection(out_tile.bbox), out_schema["geometry"] ) ): out_features.append({ "geometry": mapping(out_geom), "properties": feature["properties"] }) except Exception as e: logger.warning("failed to prepare geometry for writing: %s", e) continue # write if there are output features if out_features: try: if out_path.startswith("s3://"): # write data to remote file with VectorWindowMemoryFile( tile=out_tile, features=out_features, schema=out_schema, driver="GeoJSON" ) as memfile: logger.debug((out_tile.id, "upload tile", out_path)) bucket_resource.put_object( Key="/".join(out_path.split("/")[3:]), Body=memfile ) else: # write data to local file with fiona.open( out_path, 'w', schema=out_schema, driver="GeoJSON", crs=out_tile.crs.to_dict() ) as dst: logger.debug((out_tile.id, "write tile", out_path)) dst.writerecords(out_features) except Exception as e: logger.error("error while writing file %s: %s", out_path, e) raise else: logger.debug((out_tile.id, "nothing to write", out_path))
[ "def", "write_vector_window", "(", "in_data", "=", "None", ",", "out_schema", "=", "None", ",", "out_tile", "=", "None", ",", "out_path", "=", "None", ",", "bucket_resource", "=", "None", ")", ":", "# Delete existing file.", "try", ":", "os", ".", "remove", "(", "out_path", ")", "except", "OSError", ":", "pass", "out_features", "=", "[", "]", "for", "feature", "in", "in_data", ":", "try", ":", "# clip feature geometry to tile bounding box and append for writing", "# if clipped feature still", "for", "out_geom", "in", "multipart_to_singleparts", "(", "clean_geometry_type", "(", "to_shape", "(", "feature", "[", "\"geometry\"", "]", ")", ".", "intersection", "(", "out_tile", ".", "bbox", ")", ",", "out_schema", "[", "\"geometry\"", "]", ")", ")", ":", "out_features", ".", "append", "(", "{", "\"geometry\"", ":", "mapping", "(", "out_geom", ")", ",", "\"properties\"", ":", "feature", "[", "\"properties\"", "]", "}", ")", "except", "Exception", "as", "e", ":", "logger", ".", "warning", "(", "\"failed to prepare geometry for writing: %s\"", ",", "e", ")", "continue", "# write if there are output features", "if", "out_features", ":", "try", ":", "if", "out_path", ".", "startswith", "(", "\"s3://\"", ")", ":", "# write data to remote file", "with", "VectorWindowMemoryFile", "(", "tile", "=", "out_tile", ",", "features", "=", "out_features", ",", "schema", "=", "out_schema", ",", "driver", "=", "\"GeoJSON\"", ")", "as", "memfile", ":", "logger", ".", "debug", "(", "(", "out_tile", ".", "id", ",", "\"upload tile\"", ",", "out_path", ")", ")", "bucket_resource", ".", "put_object", "(", "Key", "=", "\"/\"", ".", "join", "(", "out_path", ".", "split", "(", "\"/\"", ")", "[", "3", ":", "]", ")", ",", "Body", "=", "memfile", ")", "else", ":", "# write data to local file", "with", "fiona", ".", "open", "(", "out_path", ",", "'w'", ",", "schema", "=", "out_schema", ",", "driver", "=", "\"GeoJSON\"", ",", "crs", "=", "out_tile", ".", "crs", ".", "to_dict", "(", ")", ")", "as", "dst", ":", "logger", ".", "debug", "(", "(", "out_tile", ".", "id", ",", "\"write tile\"", ",", "out_path", ")", ")", "dst", ".", "writerecords", "(", "out_features", ")", "except", "Exception", "as", "e", ":", "logger", ".", "error", "(", "\"error while writing file %s: %s\"", ",", "out_path", ",", "e", ")", "raise", "else", ":", "logger", ".", "debug", "(", "(", "out_tile", ".", "id", ",", "\"nothing to write\"", ",", "out_path", ")", ")" ]
Write features to GeoJSON file. Parameters ---------- in_data : features out_schema : dictionary output schema for fiona out_tile : ``BufferedTile`` tile used for output extent out_path : string output path for GeoJSON file
[ "Write", "features", "to", "GeoJSON", "file", "." ]
python
valid
pudo-attic/loadkit
loadkit/types/table.py
https://github.com/pudo-attic/loadkit/blob/1fb17e69e2ffaf3dac4f40b574c3b7afb2198b7c/loadkit/types/table.py#L21-L40
def store(self): """ Create a context manager to store records in the cleaned table. """ output = tempfile.NamedTemporaryFile(suffix='.json') try: def write(o): line = json.dumps(o, default=json_default) return output.write(line + '\n') yield write output.seek(0) log.info("Uploading generated table (%s)...", self._obj) self.save_file(output.name, destructive=True) finally: try: output.close() except: pass
[ "def", "store", "(", "self", ")", ":", "output", "=", "tempfile", ".", "NamedTemporaryFile", "(", "suffix", "=", "'.json'", ")", "try", ":", "def", "write", "(", "o", ")", ":", "line", "=", "json", ".", "dumps", "(", "o", ",", "default", "=", "json_default", ")", "return", "output", ".", "write", "(", "line", "+", "'\\n'", ")", "yield", "write", "output", ".", "seek", "(", "0", ")", "log", ".", "info", "(", "\"Uploading generated table (%s)...\"", ",", "self", ".", "_obj", ")", "self", ".", "save_file", "(", "output", ".", "name", ",", "destructive", "=", "True", ")", "finally", ":", "try", ":", "output", ".", "close", "(", ")", "except", ":", "pass" ]
Create a context manager to store records in the cleaned table.
[ "Create", "a", "context", "manager", "to", "store", "records", "in", "the", "cleaned", "table", "." ]
python
train
tensorflow/cleverhans
cleverhans/utils_tf.py
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/utils_tf.py#L667-L722
def jacobian_augmentation(sess, x, X_sub_prev, Y_sub, grads, lmbda, aug_batch_size=512, feed=None): """ Augment an adversary's substitute training set using the Jacobian of a substitute model to generate new synthetic inputs. See https://arxiv.org/abs/1602.02697 for more details. See cleverhans_tutorials/mnist_blackbox.py for example use case :param sess: TF session in which the substitute model is defined :param x: input TF placeholder for the substitute model :param X_sub_prev: substitute training data available to the adversary at the previous iteration :param Y_sub: substitute training labels available to the adversary at the previous iteration :param grads: Jacobian symbolic graph for the substitute (should be generated using utils_tf.jacobian_graph) :return: augmented substitute data (will need to be labeled by oracle) """ assert len(x.get_shape()) == len(np.shape(X_sub_prev)) assert len(grads) >= np.max(Y_sub) + 1 assert len(X_sub_prev) == len(Y_sub) aug_batch_size = min(aug_batch_size, X_sub_prev.shape[0]) # Prepare input_shape (outside loop) for feeding dictionary below input_shape = list(x.get_shape()) input_shape[0] = 1 # Create new numpy array for adversary training data # with twice as many components on the first dimension. X_sub = np.vstack([X_sub_prev, X_sub_prev]) num_samples = X_sub_prev.shape[0] # Creating and processing as batch for p_idxs in range(0, num_samples, aug_batch_size): X_batch = X_sub_prev[p_idxs:p_idxs + aug_batch_size, ...] feed_dict = {x: X_batch} if feed is not None: feed_dict.update(feed) # Compute sign matrix grad_val = sess.run([tf.sign(grads)], feed_dict=feed_dict)[0] # Create new synthetic point in adversary substitute training set for (indx, ind) in zip(range(p_idxs, p_idxs + X_batch.shape[0]), range(X_batch.shape[0])): X_sub[num_samples + indx] = ( X_batch[ind] + lmbda * grad_val[Y_sub[indx], ind, ...]) # Return augmented training data (needs to be labeled afterwards) return X_sub
[ "def", "jacobian_augmentation", "(", "sess", ",", "x", ",", "X_sub_prev", ",", "Y_sub", ",", "grads", ",", "lmbda", ",", "aug_batch_size", "=", "512", ",", "feed", "=", "None", ")", ":", "assert", "len", "(", "x", ".", "get_shape", "(", ")", ")", "==", "len", "(", "np", ".", "shape", "(", "X_sub_prev", ")", ")", "assert", "len", "(", "grads", ")", ">=", "np", ".", "max", "(", "Y_sub", ")", "+", "1", "assert", "len", "(", "X_sub_prev", ")", "==", "len", "(", "Y_sub", ")", "aug_batch_size", "=", "min", "(", "aug_batch_size", ",", "X_sub_prev", ".", "shape", "[", "0", "]", ")", "# Prepare input_shape (outside loop) for feeding dictionary below", "input_shape", "=", "list", "(", "x", ".", "get_shape", "(", ")", ")", "input_shape", "[", "0", "]", "=", "1", "# Create new numpy array for adversary training data", "# with twice as many components on the first dimension.", "X_sub", "=", "np", ".", "vstack", "(", "[", "X_sub_prev", ",", "X_sub_prev", "]", ")", "num_samples", "=", "X_sub_prev", ".", "shape", "[", "0", "]", "# Creating and processing as batch", "for", "p_idxs", "in", "range", "(", "0", ",", "num_samples", ",", "aug_batch_size", ")", ":", "X_batch", "=", "X_sub_prev", "[", "p_idxs", ":", "p_idxs", "+", "aug_batch_size", ",", "...", "]", "feed_dict", "=", "{", "x", ":", "X_batch", "}", "if", "feed", "is", "not", "None", ":", "feed_dict", ".", "update", "(", "feed", ")", "# Compute sign matrix", "grad_val", "=", "sess", ".", "run", "(", "[", "tf", ".", "sign", "(", "grads", ")", "]", ",", "feed_dict", "=", "feed_dict", ")", "[", "0", "]", "# Create new synthetic point in adversary substitute training set", "for", "(", "indx", ",", "ind", ")", "in", "zip", "(", "range", "(", "p_idxs", ",", "p_idxs", "+", "X_batch", ".", "shape", "[", "0", "]", ")", ",", "range", "(", "X_batch", ".", "shape", "[", "0", "]", ")", ")", ":", "X_sub", "[", "num_samples", "+", "indx", "]", "=", "(", "X_batch", "[", "ind", "]", "+", "lmbda", "*", "grad_val", "[", "Y_sub", "[", "indx", "]", ",", "ind", ",", "...", "]", ")", "# Return augmented training data (needs to be labeled afterwards)", "return", "X_sub" ]
Augment an adversary's substitute training set using the Jacobian of a substitute model to generate new synthetic inputs. See https://arxiv.org/abs/1602.02697 for more details. See cleverhans_tutorials/mnist_blackbox.py for example use case :param sess: TF session in which the substitute model is defined :param x: input TF placeholder for the substitute model :param X_sub_prev: substitute training data available to the adversary at the previous iteration :param Y_sub: substitute training labels available to the adversary at the previous iteration :param grads: Jacobian symbolic graph for the substitute (should be generated using utils_tf.jacobian_graph) :return: augmented substitute data (will need to be labeled by oracle)
[ "Augment", "an", "adversary", "s", "substitute", "training", "set", "using", "the", "Jacobian", "of", "a", "substitute", "model", "to", "generate", "new", "synthetic", "inputs", ".", "See", "https", ":", "//", "arxiv", ".", "org", "/", "abs", "/", "1602", ".", "02697", "for", "more", "details", ".", "See", "cleverhans_tutorials", "/", "mnist_blackbox", ".", "py", "for", "example", "use", "case", ":", "param", "sess", ":", "TF", "session", "in", "which", "the", "substitute", "model", "is", "defined", ":", "param", "x", ":", "input", "TF", "placeholder", "for", "the", "substitute", "model", ":", "param", "X_sub_prev", ":", "substitute", "training", "data", "available", "to", "the", "adversary", "at", "the", "previous", "iteration", ":", "param", "Y_sub", ":", "substitute", "training", "labels", "available", "to", "the", "adversary", "at", "the", "previous", "iteration", ":", "param", "grads", ":", "Jacobian", "symbolic", "graph", "for", "the", "substitute", "(", "should", "be", "generated", "using", "utils_tf", ".", "jacobian_graph", ")", ":", "return", ":", "augmented", "substitute", "data", "(", "will", "need", "to", "be", "labeled", "by", "oracle", ")" ]
python
train
jjgomera/iapws
iapws/iapws97.py
https://github.com/jjgomera/iapws/blob/1e5812aab38212fb8a63736f61cdcfa427d223b1/iapws/iapws97.py#L3854-L3925
def _Bound_Ph(P, h): """Region definition for input P y h Parameters ---------- P : float Pressure, [MPa] h : float Specific enthalpy, [kJ/kg] Returns ------- region : float IAPWS-97 region code References ---------- Wagner, W; Kretzschmar, H-J: International Steam Tables: Properties of Water and Steam Based on the Industrial Formulation IAPWS-IF97; Springer, 2008; doi: 10.1007/978-3-540-74234-0. Fig. 2.5 """ region = None if Pmin <= P <= Ps_623: h14 = _Region1(_TSat_P(P), P)["h"] h24 = _Region2(_TSat_P(P), P)["h"] h25 = _Region2(1073.15, P)["h"] hmin = _Region1(273.15, P)["h"] hmax = _Region5(2273.15, P)["h"] if hmin <= h <= h14: region = 1 elif h14 < h < h24: region = 4 elif h24 <= h <= h25: region = 2 elif h25 < h <= hmax: region = 5 elif Ps_623 < P < Pc: hmin = _Region1(273.15, P)["h"] h13 = _Region1(623.15, P)["h"] h32 = _Region2(_t_P(P), P)["h"] h25 = _Region2(1073.15, P)["h"] hmax = _Region5(2273.15, P)["h"] if hmin <= h <= h13: region = 1 elif h13 < h < h32: try: p34 = _PSat_h(h) except NotImplementedError: p34 = Pc if P < p34: region = 4 else: region = 3 elif h32 <= h <= h25: region = 2 elif h25 < h <= hmax: region = 5 elif Pc <= P <= 100: hmin = _Region1(273.15, P)["h"] h13 = _Region1(623.15, P)["h"] h32 = _Region2(_t_P(P), P)["h"] h25 = _Region2(1073.15, P)["h"] hmax = _Region5(2273.15, P)["h"] if hmin <= h <= h13: region = 1 elif h13 < h < h32: region = 3 elif h32 <= h <= h25: region = 2 elif P <= 50 and h25 <= h <= hmax: region = 5 return region
[ "def", "_Bound_Ph", "(", "P", ",", "h", ")", ":", "region", "=", "None", "if", "Pmin", "<=", "P", "<=", "Ps_623", ":", "h14", "=", "_Region1", "(", "_TSat_P", "(", "P", ")", ",", "P", ")", "[", "\"h\"", "]", "h24", "=", "_Region2", "(", "_TSat_P", "(", "P", ")", ",", "P", ")", "[", "\"h\"", "]", "h25", "=", "_Region2", "(", "1073.15", ",", "P", ")", "[", "\"h\"", "]", "hmin", "=", "_Region1", "(", "273.15", ",", "P", ")", "[", "\"h\"", "]", "hmax", "=", "_Region5", "(", "2273.15", ",", "P", ")", "[", "\"h\"", "]", "if", "hmin", "<=", "h", "<=", "h14", ":", "region", "=", "1", "elif", "h14", "<", "h", "<", "h24", ":", "region", "=", "4", "elif", "h24", "<=", "h", "<=", "h25", ":", "region", "=", "2", "elif", "h25", "<", "h", "<=", "hmax", ":", "region", "=", "5", "elif", "Ps_623", "<", "P", "<", "Pc", ":", "hmin", "=", "_Region1", "(", "273.15", ",", "P", ")", "[", "\"h\"", "]", "h13", "=", "_Region1", "(", "623.15", ",", "P", ")", "[", "\"h\"", "]", "h32", "=", "_Region2", "(", "_t_P", "(", "P", ")", ",", "P", ")", "[", "\"h\"", "]", "h25", "=", "_Region2", "(", "1073.15", ",", "P", ")", "[", "\"h\"", "]", "hmax", "=", "_Region5", "(", "2273.15", ",", "P", ")", "[", "\"h\"", "]", "if", "hmin", "<=", "h", "<=", "h13", ":", "region", "=", "1", "elif", "h13", "<", "h", "<", "h32", ":", "try", ":", "p34", "=", "_PSat_h", "(", "h", ")", "except", "NotImplementedError", ":", "p34", "=", "Pc", "if", "P", "<", "p34", ":", "region", "=", "4", "else", ":", "region", "=", "3", "elif", "h32", "<=", "h", "<=", "h25", ":", "region", "=", "2", "elif", "h25", "<", "h", "<=", "hmax", ":", "region", "=", "5", "elif", "Pc", "<=", "P", "<=", "100", ":", "hmin", "=", "_Region1", "(", "273.15", ",", "P", ")", "[", "\"h\"", "]", "h13", "=", "_Region1", "(", "623.15", ",", "P", ")", "[", "\"h\"", "]", "h32", "=", "_Region2", "(", "_t_P", "(", "P", ")", ",", "P", ")", "[", "\"h\"", "]", "h25", "=", "_Region2", "(", "1073.15", ",", "P", ")", "[", "\"h\"", "]", "hmax", "=", "_Region5", "(", "2273.15", ",", "P", ")", "[", "\"h\"", "]", "if", "hmin", "<=", "h", "<=", "h13", ":", "region", "=", "1", "elif", "h13", "<", "h", "<", "h32", ":", "region", "=", "3", "elif", "h32", "<=", "h", "<=", "h25", ":", "region", "=", "2", "elif", "P", "<=", "50", "and", "h25", "<=", "h", "<=", "hmax", ":", "region", "=", "5", "return", "region" ]
Region definition for input P y h Parameters ---------- P : float Pressure, [MPa] h : float Specific enthalpy, [kJ/kg] Returns ------- region : float IAPWS-97 region code References ---------- Wagner, W; Kretzschmar, H-J: International Steam Tables: Properties of Water and Steam Based on the Industrial Formulation IAPWS-IF97; Springer, 2008; doi: 10.1007/978-3-540-74234-0. Fig. 2.5
[ "Region", "definition", "for", "input", "P", "y", "h" ]
python
train
workforce-data-initiative/skills-utils
skills_utils/io.py
https://github.com/workforce-data-initiative/skills-utils/blob/4cf9b7c2938984f34bbcc33d45482d23c52c7539/skills_utils/io.py#L6-L21
def stream_json_file(local_file): """Stream a JSON file (in JSON-per-line format) Args: local_file (file-like object) an open file-handle that contains a JSON string on each line Yields: (dict) JSON objects """ for i, line in enumerate(local_file): try: data = json.loads(line.decode('utf-8')) yield data except ValueError as e: logging.warning("Skipping line %d due to error: %s", i, e) continue
[ "def", "stream_json_file", "(", "local_file", ")", ":", "for", "i", ",", "line", "in", "enumerate", "(", "local_file", ")", ":", "try", ":", "data", "=", "json", ".", "loads", "(", "line", ".", "decode", "(", "'utf-8'", ")", ")", "yield", "data", "except", "ValueError", "as", "e", ":", "logging", ".", "warning", "(", "\"Skipping line %d due to error: %s\"", ",", "i", ",", "e", ")", "continue" ]
Stream a JSON file (in JSON-per-line format) Args: local_file (file-like object) an open file-handle that contains a JSON string on each line Yields: (dict) JSON objects
[ "Stream", "a", "JSON", "file", "(", "in", "JSON", "-", "per", "-", "line", "format", ")" ]
python
train
tonyfischetti/sake
sakelib/acts.py
https://github.com/tonyfischetti/sake/blob/b7ad20fe8e7137db99a20ac06b8da26492601b00/sakelib/acts.py#L599-L623
def write_dot_file(G, filename): """ Writes the graph G in dot file format for graphviz visualization. Args: a Networkx graph A filename to name the dot files """ with io.open(filename, "w") as fh: fh.write("strict digraph DependencyDiagram {\n") edge_list = G.edges() node_list = set(G.nodes()) if edge_list: for edge in sorted(edge_list): source, targ = edge node_list = node_list - set(source) node_list = node_list - set(targ) line = '"{}" -> "{}";\n' fh.write(line.format(source, targ)) # draw nodes with no links if node_list: for node in sorted(node_list): line = '"{}"\n'.format(node) fh.write(line) fh.write("}")
[ "def", "write_dot_file", "(", "G", ",", "filename", ")", ":", "with", "io", ".", "open", "(", "filename", ",", "\"w\"", ")", "as", "fh", ":", "fh", ".", "write", "(", "\"strict digraph DependencyDiagram {\\n\"", ")", "edge_list", "=", "G", ".", "edges", "(", ")", "node_list", "=", "set", "(", "G", ".", "nodes", "(", ")", ")", "if", "edge_list", ":", "for", "edge", "in", "sorted", "(", "edge_list", ")", ":", "source", ",", "targ", "=", "edge", "node_list", "=", "node_list", "-", "set", "(", "source", ")", "node_list", "=", "node_list", "-", "set", "(", "targ", ")", "line", "=", "'\"{}\" -> \"{}\";\\n'", "fh", ".", "write", "(", "line", ".", "format", "(", "source", ",", "targ", ")", ")", "# draw nodes with no links", "if", "node_list", ":", "for", "node", "in", "sorted", "(", "node_list", ")", ":", "line", "=", "'\"{}\"\\n'", ".", "format", "(", "node", ")", "fh", ".", "write", "(", "line", ")", "fh", ".", "write", "(", "\"}\"", ")" ]
Writes the graph G in dot file format for graphviz visualization. Args: a Networkx graph A filename to name the dot files
[ "Writes", "the", "graph", "G", "in", "dot", "file", "format", "for", "graphviz", "visualization", "." ]
python
valid
antiboredom/videogrep
videogrep/videogrep.py
https://github.com/antiboredom/videogrep/blob/faffd3446d96242677757f1af7db23b6dfc429cf/videogrep/videogrep.py#L218-L248
def create_supercut_in_batches(composition, outputfile, padding): """Create & concatenate video clips in groups of size BATCH_SIZE and output finished video file to output directory. """ total_clips = len(composition) start_index = 0 end_index = BATCH_SIZE batch_comp = [] while start_index < total_clips: filename = outputfile + '.tmp' + str(start_index) + '.mp4' try: create_supercut(composition[start_index:end_index], filename, padding) batch_comp.append(filename) gc.collect() start_index += BATCH_SIZE end_index += BATCH_SIZE except: start_index += BATCH_SIZE end_index += BATCH_SIZE next clips = [VideoFileClip(filename) for filename in batch_comp] video = concatenate(clips) video.to_videofile(outputfile, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac') # remove partial video files for filename in batch_comp: os.remove(filename) cleanup_log_files(outputfile)
[ "def", "create_supercut_in_batches", "(", "composition", ",", "outputfile", ",", "padding", ")", ":", "total_clips", "=", "len", "(", "composition", ")", "start_index", "=", "0", "end_index", "=", "BATCH_SIZE", "batch_comp", "=", "[", "]", "while", "start_index", "<", "total_clips", ":", "filename", "=", "outputfile", "+", "'.tmp'", "+", "str", "(", "start_index", ")", "+", "'.mp4'", "try", ":", "create_supercut", "(", "composition", "[", "start_index", ":", "end_index", "]", ",", "filename", ",", "padding", ")", "batch_comp", ".", "append", "(", "filename", ")", "gc", ".", "collect", "(", ")", "start_index", "+=", "BATCH_SIZE", "end_index", "+=", "BATCH_SIZE", "except", ":", "start_index", "+=", "BATCH_SIZE", "end_index", "+=", "BATCH_SIZE", "next", "clips", "=", "[", "VideoFileClip", "(", "filename", ")", "for", "filename", "in", "batch_comp", "]", "video", "=", "concatenate", "(", "clips", ")", "video", ".", "to_videofile", "(", "outputfile", ",", "codec", "=", "\"libx264\"", ",", "temp_audiofile", "=", "'temp-audio.m4a'", ",", "remove_temp", "=", "True", ",", "audio_codec", "=", "'aac'", ")", "# remove partial video files", "for", "filename", "in", "batch_comp", ":", "os", ".", "remove", "(", "filename", ")", "cleanup_log_files", "(", "outputfile", ")" ]
Create & concatenate video clips in groups of size BATCH_SIZE and output finished video file to output directory.
[ "Create", "&", "concatenate", "video", "clips", "in", "groups", "of", "size", "BATCH_SIZE", "and", "output", "finished", "video", "file", "to", "output", "directory", "." ]
python
train
paulovn/sparql-kernel
sparqlkernel/utils.py
https://github.com/paulovn/sparql-kernel/blob/1d2d155ff5da72070cb2a98fae33ea8113fac782/sparqlkernel/utils.py#L28-L47
def escape( x, lb=False ): """ Ensure a string does not contain HTML-reserved characters (including double quotes) Optionally also insert a linebreak if the string is too long """ # Insert a linebreak? Roughly around the middle of the string, if lb: l = len(x) if l >= 10: l >>= 1 # middle of the string s1 = x.find( ' ', l ) # first ws to the right s2 = x.rfind( ' ', 0, l ) # first ws to the left if s2 > 0: s = s2 if s1<0 or l-s1 > s2-l else s1 x = x[:s] + '\\n' + x[s+1:] elif s1 > 0: x = x[:s1] + '\\n' + x[s1+1:] # Escape HTML reserved characters return x.replace("&", "&amp;").replace("<", "&lt;").replace(">", "&gt;").replace('"', "&quot;")
[ "def", "escape", "(", "x", ",", "lb", "=", "False", ")", ":", "# Insert a linebreak? Roughly around the middle of the string,", "if", "lb", ":", "l", "=", "len", "(", "x", ")", "if", "l", ">=", "10", ":", "l", ">>=", "1", "# middle of the string", "s1", "=", "x", ".", "find", "(", "' '", ",", "l", ")", "# first ws to the right", "s2", "=", "x", ".", "rfind", "(", "' '", ",", "0", ",", "l", ")", "# first ws to the left", "if", "s2", ">", "0", ":", "s", "=", "s2", "if", "s1", "<", "0", "or", "l", "-", "s1", ">", "s2", "-", "l", "else", "s1", "x", "=", "x", "[", ":", "s", "]", "+", "'\\\\n'", "+", "x", "[", "s", "+", "1", ":", "]", "elif", "s1", ">", "0", ":", "x", "=", "x", "[", ":", "s1", "]", "+", "'\\\\n'", "+", "x", "[", "s1", "+", "1", ":", "]", "# Escape HTML reserved characters", "return", "x", ".", "replace", "(", "\"&\"", ",", "\"&amp;\"", ")", ".", "replace", "(", "\"<\"", ",", "\"&lt;\"", ")", ".", "replace", "(", "\">\"", ",", "\"&gt;\"", ")", ".", "replace", "(", "'\"'", ",", "\"&quot;\"", ")" ]
Ensure a string does not contain HTML-reserved characters (including double quotes) Optionally also insert a linebreak if the string is too long
[ "Ensure", "a", "string", "does", "not", "contain", "HTML", "-", "reserved", "characters", "(", "including", "double", "quotes", ")", "Optionally", "also", "insert", "a", "linebreak", "if", "the", "string", "is", "too", "long" ]
python
train
cpenv/cpenv
cpenv/utils.py
https://github.com/cpenv/cpenv/blob/afbb569ae04002743db041d3629a5be8c290bd89/cpenv/utils.py#L304-L320
def expand_envvars(env): ''' Expand all environment variables in an environment dict :param env: Environment dict ''' out_env = {} for k, v in env.iteritems(): out_env[k] = Template(v).safe_substitute(env) # Expand twice to make sure we expand everything we possibly can for k, v in out_env.items(): out_env[k] = Template(v).safe_substitute(out_env) return out_env
[ "def", "expand_envvars", "(", "env", ")", ":", "out_env", "=", "{", "}", "for", "k", ",", "v", "in", "env", ".", "iteritems", "(", ")", ":", "out_env", "[", "k", "]", "=", "Template", "(", "v", ")", ".", "safe_substitute", "(", "env", ")", "# Expand twice to make sure we expand everything we possibly can", "for", "k", ",", "v", "in", "out_env", ".", "items", "(", ")", ":", "out_env", "[", "k", "]", "=", "Template", "(", "v", ")", ".", "safe_substitute", "(", "out_env", ")", "return", "out_env" ]
Expand all environment variables in an environment dict :param env: Environment dict
[ "Expand", "all", "environment", "variables", "in", "an", "environment", "dict" ]
python
valid
rkargon/pixelsorter
pixelsorter/paths.py
https://github.com/rkargon/pixelsorter/blob/0775d1e487fbcb023e411e1818ba3290b0e8665e/pixelsorter/paths.py#L28-L36
def horizontal_path(size): """ Creates a generator for progressing horizontally through an image. :param size: A tuple (width, height) of the image size :return: A generator that yields a set of rows through the image. Each row is a generator that yields pixel coordinates. """ width, height = size return (((x, y) for x in range(width)) for y in range(height))
[ "def", "horizontal_path", "(", "size", ")", ":", "width", ",", "height", "=", "size", "return", "(", "(", "(", "x", ",", "y", ")", "for", "x", "in", "range", "(", "width", ")", ")", "for", "y", "in", "range", "(", "height", ")", ")" ]
Creates a generator for progressing horizontally through an image. :param size: A tuple (width, height) of the image size :return: A generator that yields a set of rows through the image. Each row is a generator that yields pixel coordinates.
[ "Creates", "a", "generator", "for", "progressing", "horizontally", "through", "an", "image", ".", ":", "param", "size", ":", "A", "tuple", "(", "width", "height", ")", "of", "the", "image", "size", ":", "return", ":", "A", "generator", "that", "yields", "a", "set", "of", "rows", "through", "the", "image", ".", "Each", "row", "is", "a", "generator", "that", "yields", "pixel", "coordinates", "." ]
python
train
Cue/scales
src/greplin/scales/__init__.py
https://github.com/Cue/scales/blob/0aced26eb050ceb98ee9d5d6cdca8db448666986/src/greplin/scales/__init__.py#L193-L201
def getStat(cls, obj, name): """Gets the stat for the given object with the given name, or None if no such stat exists.""" objClass = type(obj) for theClass in objClass.__mro__: if theClass == object: break for value in theClass.__dict__.values(): if isinstance(value, Stat) and value.getName() == name: return value
[ "def", "getStat", "(", "cls", ",", "obj", ",", "name", ")", ":", "objClass", "=", "type", "(", "obj", ")", "for", "theClass", "in", "objClass", ".", "__mro__", ":", "if", "theClass", "==", "object", ":", "break", "for", "value", "in", "theClass", ".", "__dict__", ".", "values", "(", ")", ":", "if", "isinstance", "(", "value", ",", "Stat", ")", "and", "value", ".", "getName", "(", ")", "==", "name", ":", "return", "value" ]
Gets the stat for the given object with the given name, or None if no such stat exists.
[ "Gets", "the", "stat", "for", "the", "given", "object", "with", "the", "given", "name", "or", "None", "if", "no", "such", "stat", "exists", "." ]
python
train
ArduPilot/MAVProxy
MAVProxy/modules/mavproxy_firmware.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_firmware.py#L253-L257
def cmd_fw_manifest_purge(self): '''remove all downloaded manifests''' for filepath in self.find_manifests(): os.unlink(filepath) self.manifests_parse()
[ "def", "cmd_fw_manifest_purge", "(", "self", ")", ":", "for", "filepath", "in", "self", ".", "find_manifests", "(", ")", ":", "os", ".", "unlink", "(", "filepath", ")", "self", ".", "manifests_parse", "(", ")" ]
remove all downloaded manifests
[ "remove", "all", "downloaded", "manifests" ]
python
train
bretth/djset
djset/djset.py
https://github.com/bretth/djset/blob/e04cbcadc311f6edec50a718415d0004aa304034/djset/djset.py#L38-L57
def get(self, key, prompt_default='', prompt_help=''): """Return a value from the environ or keyring""" value = os.getenv(key) if not value: ns = self.namespace(key) value = self.keyring.get_password(ns, key) else: ns = 'environ' if not value: ns = self.namespace(key, glob=True) value = self.keyring.get_password(ns, key) if not value: ns = '' if not value and self.prompt: value = self._prompt_for_value(key, prompt_default, prompt_help) if value: self.set(key, value) if ns: self.kns[key] = ns return value
[ "def", "get", "(", "self", ",", "key", ",", "prompt_default", "=", "''", ",", "prompt_help", "=", "''", ")", ":", "value", "=", "os", ".", "getenv", "(", "key", ")", "if", "not", "value", ":", "ns", "=", "self", ".", "namespace", "(", "key", ")", "value", "=", "self", ".", "keyring", ".", "get_password", "(", "ns", ",", "key", ")", "else", ":", "ns", "=", "'environ'", "if", "not", "value", ":", "ns", "=", "self", ".", "namespace", "(", "key", ",", "glob", "=", "True", ")", "value", "=", "self", ".", "keyring", ".", "get_password", "(", "ns", ",", "key", ")", "if", "not", "value", ":", "ns", "=", "''", "if", "not", "value", "and", "self", ".", "prompt", ":", "value", "=", "self", ".", "_prompt_for_value", "(", "key", ",", "prompt_default", ",", "prompt_help", ")", "if", "value", ":", "self", ".", "set", "(", "key", ",", "value", ")", "if", "ns", ":", "self", ".", "kns", "[", "key", "]", "=", "ns", "return", "value" ]
Return a value from the environ or keyring
[ "Return", "a", "value", "from", "the", "environ", "or", "keyring" ]
python
train
swharden/SWHLab
swhlab/indexing/index_OLD.py
https://github.com/swharden/SWHLab/blob/a86c3c65323cec809a4bd4f81919644927094bf5/swhlab/indexing/index_OLD.py#L107-L131
def html_index(self,launch=False,showChildren=False): """ generate list of cells with links. keep this simple. automatically generates splash page and regnerates frames. """ self.makePics() # ensure all pics are converted # generate menu html='<a href="index_splash.html" target="content">./%s/</a><br>'%os.path.basename(self.abfFolder) for ID in smartSort(self.fnamesByCell.keys()): link='' if ID+".html" in self.fnames2: link='href="%s.html" target="content"'%ID html+=('<a %s>%s</a><br>'%(link,ID)) # show the parent ABF (ID) if showChildren: for fname in self.fnamesByCell[ID]: thisID=os.path.splitext(fname)[0] files2=[x for x in self.fnames2 if x.startswith(thisID) and not x.endswith(".html")] html+='<i>%s</i>'%thisID # show the child ABF if len(files2): html+=' (%s)'%len(files2) # show number of supporting files html+='<br>' html+="<br>" style.save(html,self.abfFolder2+"/index_menu.html") self.html_index_splash() # make splash page style.frames(self.abfFolder2+"/index.html",launch=launch)
[ "def", "html_index", "(", "self", ",", "launch", "=", "False", ",", "showChildren", "=", "False", ")", ":", "self", ".", "makePics", "(", ")", "# ensure all pics are converted", "# generate menu", "html", "=", "'<a href=\"index_splash.html\" target=\"content\">./%s/</a><br>'", "%", "os", ".", "path", ".", "basename", "(", "self", ".", "abfFolder", ")", "for", "ID", "in", "smartSort", "(", "self", ".", "fnamesByCell", ".", "keys", "(", ")", ")", ":", "link", "=", "''", "if", "ID", "+", "\".html\"", "in", "self", ".", "fnames2", ":", "link", "=", "'href=\"%s.html\" target=\"content\"'", "%", "ID", "html", "+=", "(", "'<a %s>%s</a><br>'", "%", "(", "link", ",", "ID", ")", ")", "# show the parent ABF (ID)", "if", "showChildren", ":", "for", "fname", "in", "self", ".", "fnamesByCell", "[", "ID", "]", ":", "thisID", "=", "os", ".", "path", ".", "splitext", "(", "fname", ")", "[", "0", "]", "files2", "=", "[", "x", "for", "x", "in", "self", ".", "fnames2", "if", "x", ".", "startswith", "(", "thisID", ")", "and", "not", "x", ".", "endswith", "(", "\".html\"", ")", "]", "html", "+=", "'<i>%s</i>'", "%", "thisID", "# show the child ABF", "if", "len", "(", "files2", ")", ":", "html", "+=", "' (%s)'", "%", "len", "(", "files2", ")", "# show number of supporting files", "html", "+=", "'<br>'", "html", "+=", "\"<br>\"", "style", ".", "save", "(", "html", ",", "self", ".", "abfFolder2", "+", "\"/index_menu.html\"", ")", "self", ".", "html_index_splash", "(", ")", "# make splash page", "style", ".", "frames", "(", "self", ".", "abfFolder2", "+", "\"/index.html\"", ",", "launch", "=", "launch", ")" ]
generate list of cells with links. keep this simple. automatically generates splash page and regnerates frames.
[ "generate", "list", "of", "cells", "with", "links", ".", "keep", "this", "simple", ".", "automatically", "generates", "splash", "page", "and", "regnerates", "frames", "." ]
python
valid
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/docbook/__init__.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/docbook/__init__.py#L809-L829
def DocbookXslt(env, target, source=None, *args, **kw): """ A pseudo-Builder, applying a simple XSL transformation to the input file. """ # Init list of targets/sources target, source = __extend_targets_sources(target, source) # Init XSL stylesheet kw['DOCBOOK_XSL'] = kw.get('xsl', 'transform.xsl') # Setup builder __builder = __select_builder(__lxml_builder, __libxml2_builder, __xsltproc_builder) # Create targets result = [] for t,s in zip(target,source): r = __builder.__call__(env, t, s, **kw) env.Depends(r, kw['DOCBOOK_XSL']) result.extend(r) return result
[ "def", "DocbookXslt", "(", "env", ",", "target", ",", "source", "=", "None", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "# Init list of targets/sources", "target", ",", "source", "=", "__extend_targets_sources", "(", "target", ",", "source", ")", "# Init XSL stylesheet", "kw", "[", "'DOCBOOK_XSL'", "]", "=", "kw", ".", "get", "(", "'xsl'", ",", "'transform.xsl'", ")", "# Setup builder", "__builder", "=", "__select_builder", "(", "__lxml_builder", ",", "__libxml2_builder", ",", "__xsltproc_builder", ")", "# Create targets", "result", "=", "[", "]", "for", "t", ",", "s", "in", "zip", "(", "target", ",", "source", ")", ":", "r", "=", "__builder", ".", "__call__", "(", "env", ",", "t", ",", "s", ",", "*", "*", "kw", ")", "env", ".", "Depends", "(", "r", ",", "kw", "[", "'DOCBOOK_XSL'", "]", ")", "result", ".", "extend", "(", "r", ")", "return", "result" ]
A pseudo-Builder, applying a simple XSL transformation to the input file.
[ "A", "pseudo", "-", "Builder", "applying", "a", "simple", "XSL", "transformation", "to", "the", "input", "file", "." ]
python
train
gem/oq-engine
openquake/hazardlib/gsim/utils_swiss_gmpe.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/utils_swiss_gmpe.py#L81-L99
def _get_corr_stddevs(C, tau_ss, stddev_types, num_sites, phi_ss, NL=None, tau_value=None): """ Return standard deviations adjusted for single station sigma as the total standard deviation - as proposed to be used in the Swiss Hazard Model [2014]. """ stddevs = [] temp_stddev = phi_ss * phi_ss if tau_value is not None and NL is not None: temp_stddev = temp_stddev + tau_value * tau_value * ((1 + NL) ** 2) else: temp_stddev = temp_stddev + C[tau_ss] * C[tau_ss] for stddev_type in stddev_types: if stddev_type == const.StdDev.TOTAL: stddevs.append(np.sqrt(temp_stddev) + np.zeros(num_sites)) return stddevs
[ "def", "_get_corr_stddevs", "(", "C", ",", "tau_ss", ",", "stddev_types", ",", "num_sites", ",", "phi_ss", ",", "NL", "=", "None", ",", "tau_value", "=", "None", ")", ":", "stddevs", "=", "[", "]", "temp_stddev", "=", "phi_ss", "*", "phi_ss", "if", "tau_value", "is", "not", "None", "and", "NL", "is", "not", "None", ":", "temp_stddev", "=", "temp_stddev", "+", "tau_value", "*", "tau_value", "*", "(", "(", "1", "+", "NL", ")", "**", "2", ")", "else", ":", "temp_stddev", "=", "temp_stddev", "+", "C", "[", "tau_ss", "]", "*", "C", "[", "tau_ss", "]", "for", "stddev_type", "in", "stddev_types", ":", "if", "stddev_type", "==", "const", ".", "StdDev", ".", "TOTAL", ":", "stddevs", ".", "append", "(", "np", ".", "sqrt", "(", "temp_stddev", ")", "+", "np", ".", "zeros", "(", "num_sites", ")", ")", "return", "stddevs" ]
Return standard deviations adjusted for single station sigma as the total standard deviation - as proposed to be used in the Swiss Hazard Model [2014].
[ "Return", "standard", "deviations", "adjusted", "for", "single", "station", "sigma", "as", "the", "total", "standard", "deviation", "-", "as", "proposed", "to", "be", "used", "in", "the", "Swiss", "Hazard", "Model", "[", "2014", "]", "." ]
python
train
mokelly/wabbit_wappa
wabbit_wappa/__init__.py
https://github.com/mokelly/wabbit_wappa/blob/dfe5bf6d6036079e473c4148335cd6f339d0299b/wabbit_wappa/__init__.py#L101-L114
def add_features(self, features): """Add features to this namespace. features: An iterable of features. A feature may be either 1) A VW label (not containing characters from escape_dict.keys(), unless 'escape' mode is on) 2) A tuple (label, value) where value is any float """ for feature in features: if isinstance(feature, basestring): label = feature value = None else: label, value = feature self.add_feature(label, value)
[ "def", "add_features", "(", "self", ",", "features", ")", ":", "for", "feature", "in", "features", ":", "if", "isinstance", "(", "feature", ",", "basestring", ")", ":", "label", "=", "feature", "value", "=", "None", "else", ":", "label", ",", "value", "=", "feature", "self", ".", "add_feature", "(", "label", ",", "value", ")" ]
Add features to this namespace. features: An iterable of features. A feature may be either 1) A VW label (not containing characters from escape_dict.keys(), unless 'escape' mode is on) 2) A tuple (label, value) where value is any float
[ "Add", "features", "to", "this", "namespace", ".", "features", ":", "An", "iterable", "of", "features", ".", "A", "feature", "may", "be", "either", "1", ")", "A", "VW", "label", "(", "not", "containing", "characters", "from", "escape_dict", ".", "keys", "()", "unless", "escape", "mode", "is", "on", ")", "2", ")", "A", "tuple", "(", "label", "value", ")", "where", "value", "is", "any", "float" ]
python
train
pypa/pipenv
pipenv/vendor/distlib/manifest.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/manifest.py#L57-L82
def findall(self): """Find all files under the base and set ``allfiles`` to the absolute pathnames of files found. """ from stat import S_ISREG, S_ISDIR, S_ISLNK self.allfiles = allfiles = [] root = self.base stack = [root] pop = stack.pop push = stack.append while stack: root = pop() names = os.listdir(root) for name in names: fullname = os.path.join(root, name) # Avoid excess stat calls -- just one will do, thank you! stat = os.stat(fullname) mode = stat.st_mode if S_ISREG(mode): allfiles.append(fsdecode(fullname)) elif S_ISDIR(mode) and not S_ISLNK(mode): push(fullname)
[ "def", "findall", "(", "self", ")", ":", "from", "stat", "import", "S_ISREG", ",", "S_ISDIR", ",", "S_ISLNK", "self", ".", "allfiles", "=", "allfiles", "=", "[", "]", "root", "=", "self", ".", "base", "stack", "=", "[", "root", "]", "pop", "=", "stack", ".", "pop", "push", "=", "stack", ".", "append", "while", "stack", ":", "root", "=", "pop", "(", ")", "names", "=", "os", ".", "listdir", "(", "root", ")", "for", "name", "in", "names", ":", "fullname", "=", "os", ".", "path", ".", "join", "(", "root", ",", "name", ")", "# Avoid excess stat calls -- just one will do, thank you!", "stat", "=", "os", ".", "stat", "(", "fullname", ")", "mode", "=", "stat", ".", "st_mode", "if", "S_ISREG", "(", "mode", ")", ":", "allfiles", ".", "append", "(", "fsdecode", "(", "fullname", ")", ")", "elif", "S_ISDIR", "(", "mode", ")", "and", "not", "S_ISLNK", "(", "mode", ")", ":", "push", "(", "fullname", ")" ]
Find all files under the base and set ``allfiles`` to the absolute pathnames of files found.
[ "Find", "all", "files", "under", "the", "base", "and", "set", "allfiles", "to", "the", "absolute", "pathnames", "of", "files", "found", "." ]
python
train
gofed/gofedlib
gofedlib/distribution/clients/pkgdb/client.py
https://github.com/gofed/gofedlib/blob/0674c248fe3d8706f98f912996b65af469f96b10/gofedlib/distribution/clients/pkgdb/client.py#L27-L39
def packageExists(self, package): """Check if the package already exists :param package: package name :type package: string """ url = "%s/packages" % self.base_url params = {"pattern": package} response = requests.get(url, params=params) if response.status_code != requests.codes.ok: return False return True
[ "def", "packageExists", "(", "self", ",", "package", ")", ":", "url", "=", "\"%s/packages\"", "%", "self", ".", "base_url", "params", "=", "{", "\"pattern\"", ":", "package", "}", "response", "=", "requests", ".", "get", "(", "url", ",", "params", "=", "params", ")", "if", "response", ".", "status_code", "!=", "requests", ".", "codes", ".", "ok", ":", "return", "False", "return", "True" ]
Check if the package already exists :param package: package name :type package: string
[ "Check", "if", "the", "package", "already", "exists" ]
python
train
kensho-technologies/graphql-compiler
graphql_compiler/compiler/ir_lowering_match/between_lowering.py
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/ir_lowering_match/between_lowering.py#L36-L70
def _construct_field_operator_expression_dict(expression_list): """Construct a mapping from local fields to specified operators, and corresponding expressions. Args: expression_list: list of expressions to analyze Returns: local_field_to_expressions: dict mapping local field names to "operator -> list of BinaryComposition" dictionaries, for each BinaryComposition operator involving the LocalField remaining_expression_list: list of remaining expressions that were *not* BinaryCompositions on a LocalField using any of the between operators """ between_operators = (u'<=', u'>=') inverse_operator = {u'>=': u'<=', u'<=': u'>='} local_field_to_expressions = {} remaining_expression_list = deque([]) for expression in expression_list: if all(( isinstance(expression, BinaryComposition), expression.operator in between_operators, isinstance(expression.left, LocalField) or isinstance(expression.right, LocalField) )): if isinstance(expression.right, LocalField): new_operator = inverse_operator[expression.operator] new_expression = BinaryComposition(new_operator, expression.right, expression.left) else: new_expression = expression field_name = new_expression.left.field_name expressions_dict = local_field_to_expressions.setdefault(field_name, {}) expressions_dict.setdefault(new_expression.operator, []).append(new_expression) else: remaining_expression_list.append(expression) return local_field_to_expressions, remaining_expression_list
[ "def", "_construct_field_operator_expression_dict", "(", "expression_list", ")", ":", "between_operators", "=", "(", "u'<='", ",", "u'>='", ")", "inverse_operator", "=", "{", "u'>='", ":", "u'<='", ",", "u'<='", ":", "u'>='", "}", "local_field_to_expressions", "=", "{", "}", "remaining_expression_list", "=", "deque", "(", "[", "]", ")", "for", "expression", "in", "expression_list", ":", "if", "all", "(", "(", "isinstance", "(", "expression", ",", "BinaryComposition", ")", ",", "expression", ".", "operator", "in", "between_operators", ",", "isinstance", "(", "expression", ".", "left", ",", "LocalField", ")", "or", "isinstance", "(", "expression", ".", "right", ",", "LocalField", ")", ")", ")", ":", "if", "isinstance", "(", "expression", ".", "right", ",", "LocalField", ")", ":", "new_operator", "=", "inverse_operator", "[", "expression", ".", "operator", "]", "new_expression", "=", "BinaryComposition", "(", "new_operator", ",", "expression", ".", "right", ",", "expression", ".", "left", ")", "else", ":", "new_expression", "=", "expression", "field_name", "=", "new_expression", ".", "left", ".", "field_name", "expressions_dict", "=", "local_field_to_expressions", ".", "setdefault", "(", "field_name", ",", "{", "}", ")", "expressions_dict", ".", "setdefault", "(", "new_expression", ".", "operator", ",", "[", "]", ")", ".", "append", "(", "new_expression", ")", "else", ":", "remaining_expression_list", ".", "append", "(", "expression", ")", "return", "local_field_to_expressions", ",", "remaining_expression_list" ]
Construct a mapping from local fields to specified operators, and corresponding expressions. Args: expression_list: list of expressions to analyze Returns: local_field_to_expressions: dict mapping local field names to "operator -> list of BinaryComposition" dictionaries, for each BinaryComposition operator involving the LocalField remaining_expression_list: list of remaining expressions that were *not* BinaryCompositions on a LocalField using any of the between operators
[ "Construct", "a", "mapping", "from", "local", "fields", "to", "specified", "operators", "and", "corresponding", "expressions", "." ]
python
train
rgs1/zk_shell
zk_shell/shell.py
https://github.com/rgs1/zk_shell/blob/bbf34fdfcf1f81100e2a5816fad8af6afc782a54/zk_shell/shell.py#L2503-L2588
def do_json_dupes_for_keys(self, params): """ \x1b[1mNAME\x1b[0m json_duples_for_keys - Gets the duplicate znodes for the given keys \x1b[1mSYNOPSIS\x1b[0m json_dupes_for_keys <path> <keys> [prefix] [report_errors] [first] \x1b[1mDESCRIPTION\x1b[0m Znodes with duplicated keys are sorted and all but the first (original) one are printed. \x1b[1mOPTIONS\x1b[0m * prefix: only include matching znodes * report_errors: turn on error reporting (i.e.: bad JSON in a znode) * first: print the first, non duplicated, znode too. \x1b[1mEXAMPLES\x1b[0m > json_cat /configs/primary_service true member_0000000186 { "status": "ALIVE", "serviceEndpoint": { "http": { "host": "10.0.0.2", "port": 31994 } }, "shard": 0 } member_0000000187 { "status": "ALIVE", "serviceEndpoint": { "http": { "host": "10.0.0.2", "port": 31994 } }, "shard": 0 } > json_dupes_for_keys /configs/primary_service shard member_0000000187 """ try: Keys.validate(params.keys) except Keys.Bad as ex: self.show_output(str(ex)) return path_map = PathMap(self._zk, params.path) dupes_by_path = defaultdict(lambda: defaultdict(list)) for path, data in path_map.get(): parent, child = split(path) if not child.startswith(params.prefix): continue try: value = Keys.value(json_deserialize(data), params.keys) dupes_by_path[parent][value].append(path) except BadJSON as ex: if params.report_errors: self.show_output("Path %s has bad JSON.", path) except Keys.Missing as ex: if params.report_errors: self.show_output("Path %s is missing key %s.", path, ex) dupes = [] for _, paths_by_value in dupes_by_path.items(): for _, paths in paths_by_value.items(): if len(paths) > 1: paths.sort() paths = paths if params.first else paths[1:] for path in paths: idx = bisect.bisect(dupes, path) dupes.insert(idx, path) for dup in dupes: self.show_output(dup) # if no dupes were found we call it a failure (i.e.: exit(1) from --run-once) if len(dupes) == 0: return False
[ "def", "do_json_dupes_for_keys", "(", "self", ",", "params", ")", ":", "try", ":", "Keys", ".", "validate", "(", "params", ".", "keys", ")", "except", "Keys", ".", "Bad", "as", "ex", ":", "self", ".", "show_output", "(", "str", "(", "ex", ")", ")", "return", "path_map", "=", "PathMap", "(", "self", ".", "_zk", ",", "params", ".", "path", ")", "dupes_by_path", "=", "defaultdict", "(", "lambda", ":", "defaultdict", "(", "list", ")", ")", "for", "path", ",", "data", "in", "path_map", ".", "get", "(", ")", ":", "parent", ",", "child", "=", "split", "(", "path", ")", "if", "not", "child", ".", "startswith", "(", "params", ".", "prefix", ")", ":", "continue", "try", ":", "value", "=", "Keys", ".", "value", "(", "json_deserialize", "(", "data", ")", ",", "params", ".", "keys", ")", "dupes_by_path", "[", "parent", "]", "[", "value", "]", ".", "append", "(", "path", ")", "except", "BadJSON", "as", "ex", ":", "if", "params", ".", "report_errors", ":", "self", ".", "show_output", "(", "\"Path %s has bad JSON.\"", ",", "path", ")", "except", "Keys", ".", "Missing", "as", "ex", ":", "if", "params", ".", "report_errors", ":", "self", ".", "show_output", "(", "\"Path %s is missing key %s.\"", ",", "path", ",", "ex", ")", "dupes", "=", "[", "]", "for", "_", ",", "paths_by_value", "in", "dupes_by_path", ".", "items", "(", ")", ":", "for", "_", ",", "paths", "in", "paths_by_value", ".", "items", "(", ")", ":", "if", "len", "(", "paths", ")", ">", "1", ":", "paths", ".", "sort", "(", ")", "paths", "=", "paths", "if", "params", ".", "first", "else", "paths", "[", "1", ":", "]", "for", "path", "in", "paths", ":", "idx", "=", "bisect", ".", "bisect", "(", "dupes", ",", "path", ")", "dupes", ".", "insert", "(", "idx", ",", "path", ")", "for", "dup", "in", "dupes", ":", "self", ".", "show_output", "(", "dup", ")", "# if no dupes were found we call it a failure (i.e.: exit(1) from --run-once)", "if", "len", "(", "dupes", ")", "==", "0", ":", "return", "False" ]
\x1b[1mNAME\x1b[0m json_duples_for_keys - Gets the duplicate znodes for the given keys \x1b[1mSYNOPSIS\x1b[0m json_dupes_for_keys <path> <keys> [prefix] [report_errors] [first] \x1b[1mDESCRIPTION\x1b[0m Znodes with duplicated keys are sorted and all but the first (original) one are printed. \x1b[1mOPTIONS\x1b[0m * prefix: only include matching znodes * report_errors: turn on error reporting (i.e.: bad JSON in a znode) * first: print the first, non duplicated, znode too. \x1b[1mEXAMPLES\x1b[0m > json_cat /configs/primary_service true member_0000000186 { "status": "ALIVE", "serviceEndpoint": { "http": { "host": "10.0.0.2", "port": 31994 } }, "shard": 0 } member_0000000187 { "status": "ALIVE", "serviceEndpoint": { "http": { "host": "10.0.0.2", "port": 31994 } }, "shard": 0 } > json_dupes_for_keys /configs/primary_service shard member_0000000187
[ "\\", "x1b", "[", "1mNAME", "\\", "x1b", "[", "0m", "json_duples_for_keys", "-", "Gets", "the", "duplicate", "znodes", "for", "the", "given", "keys" ]
python
train
marshmallow-code/apispec
src/apispec/ext/marshmallow/__init__.py
https://github.com/marshmallow-code/apispec/blob/e92ceffd12b2e392b8d199ed314bd2a7e6512dff/src/apispec/ext/marshmallow/__init__.py#L100-L107
def resolve_schema_in_request_body(self, request_body): """Function to resolve a schema in a requestBody object - modifies then response dict to convert Marshmallow Schema object or class into dict """ content = request_body["content"] for content_type in content: schema = content[content_type]["schema"] content[content_type]["schema"] = self.openapi.resolve_schema_dict(schema)
[ "def", "resolve_schema_in_request_body", "(", "self", ",", "request_body", ")", ":", "content", "=", "request_body", "[", "\"content\"", "]", "for", "content_type", "in", "content", ":", "schema", "=", "content", "[", "content_type", "]", "[", "\"schema\"", "]", "content", "[", "content_type", "]", "[", "\"schema\"", "]", "=", "self", ".", "openapi", ".", "resolve_schema_dict", "(", "schema", ")" ]
Function to resolve a schema in a requestBody object - modifies then response dict to convert Marshmallow Schema object or class into dict
[ "Function", "to", "resolve", "a", "schema", "in", "a", "requestBody", "object", "-", "modifies", "then", "response", "dict", "to", "convert", "Marshmallow", "Schema", "object", "or", "class", "into", "dict" ]
python
train
tanghaibao/jcvi
jcvi/utils/webcolors.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/webcolors.py#L154-L160
def _reversedict(d): """ Internal helper for generating reverse mappings; given a dictionary, returns a new dictionary with keys and values swapped. """ return dict(list(zip(list(d.values()), list(d.keys()))))
[ "def", "_reversedict", "(", "d", ")", ":", "return", "dict", "(", "list", "(", "zip", "(", "list", "(", "d", ".", "values", "(", ")", ")", ",", "list", "(", "d", ".", "keys", "(", ")", ")", ")", ")", ")" ]
Internal helper for generating reverse mappings; given a dictionary, returns a new dictionary with keys and values swapped.
[ "Internal", "helper", "for", "generating", "reverse", "mappings", ";", "given", "a", "dictionary", "returns", "a", "new", "dictionary", "with", "keys", "and", "values", "swapped", "." ]
python
train
libtcod/python-tcod
tcod/libtcodpy.py
https://github.com/libtcod/python-tcod/blob/8ba10c5cfb813eaf3e834de971ba2d6acb7838e4/tcod/libtcodpy.py#L3187-L3198
def map_set_properties( m: tcod.map.Map, x: int, y: int, isTrans: bool, isWalk: bool ) -> None: """Set the properties of a single cell. .. note:: This function is slow. .. deprecated:: 4.5 Use :any:`tcod.map.Map.transparent` and :any:`tcod.map.Map.walkable` arrays to set these properties. """ lib.TCOD_map_set_properties(m.map_c, x, y, isTrans, isWalk)
[ "def", "map_set_properties", "(", "m", ":", "tcod", ".", "map", ".", "Map", ",", "x", ":", "int", ",", "y", ":", "int", ",", "isTrans", ":", "bool", ",", "isWalk", ":", "bool", ")", "->", "None", ":", "lib", ".", "TCOD_map_set_properties", "(", "m", ".", "map_c", ",", "x", ",", "y", ",", "isTrans", ",", "isWalk", ")" ]
Set the properties of a single cell. .. note:: This function is slow. .. deprecated:: 4.5 Use :any:`tcod.map.Map.transparent` and :any:`tcod.map.Map.walkable` arrays to set these properties.
[ "Set", "the", "properties", "of", "a", "single", "cell", "." ]
python
train
coursera-dl/coursera-dl
coursera/api.py
https://github.com/coursera-dl/coursera-dl/blob/9b434bcf3c4011bf3181429fe674633ae5fb7d4d/coursera/api.py#L225-L249
def _convert_markup_images(self, soup): """ Convert images of instructions markup. Images are downloaded, base64-encoded and inserted into <img> tags. @param soup: BeautifulSoup instance. @type soup: BeautifulSoup """ # 6. Replace <img> assets with actual image contents images = [image for image in soup.find_all('img') if image.attrs.get('assetid') is not None] if not images: return # Get assetid attribute from all images asset_ids = [image.attrs.get('assetid') for image in images] self._asset_retriever(asset_ids) for image in images: # Encode each image using base64 asset = self._asset_retriever[image['assetid']] if asset.data is not None: encoded64 = base64.b64encode(asset.data).decode() image['src'] = 'data:%s;base64,%s' % ( asset.content_type, encoded64)
[ "def", "_convert_markup_images", "(", "self", ",", "soup", ")", ":", "# 6. Replace <img> assets with actual image contents", "images", "=", "[", "image", "for", "image", "in", "soup", ".", "find_all", "(", "'img'", ")", "if", "image", ".", "attrs", ".", "get", "(", "'assetid'", ")", "is", "not", "None", "]", "if", "not", "images", ":", "return", "# Get assetid attribute from all images", "asset_ids", "=", "[", "image", ".", "attrs", ".", "get", "(", "'assetid'", ")", "for", "image", "in", "images", "]", "self", ".", "_asset_retriever", "(", "asset_ids", ")", "for", "image", "in", "images", ":", "# Encode each image using base64", "asset", "=", "self", ".", "_asset_retriever", "[", "image", "[", "'assetid'", "]", "]", "if", "asset", ".", "data", "is", "not", "None", ":", "encoded64", "=", "base64", ".", "b64encode", "(", "asset", ".", "data", ")", ".", "decode", "(", ")", "image", "[", "'src'", "]", "=", "'data:%s;base64,%s'", "%", "(", "asset", ".", "content_type", ",", "encoded64", ")" ]
Convert images of instructions markup. Images are downloaded, base64-encoded and inserted into <img> tags. @param soup: BeautifulSoup instance. @type soup: BeautifulSoup
[ "Convert", "images", "of", "instructions", "markup", ".", "Images", "are", "downloaded", "base64", "-", "encoded", "and", "inserted", "into", "<img", ">", "tags", "." ]
python
train
dancsalo/TensorBase
tensorbase/base.py
https://github.com/dancsalo/TensorBase/blob/3d42a326452bd03427034916ff2fb90730020204/tensorbase/base.py#L373-L380
def check_str(obj): """ Returns a string for various input types """ if isinstance(obj, str): return obj if isinstance(obj, float): return str(int(obj)) else: return str(obj)
[ "def", "check_str", "(", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "str", ")", ":", "return", "obj", "if", "isinstance", "(", "obj", ",", "float", ")", ":", "return", "str", "(", "int", "(", "obj", ")", ")", "else", ":", "return", "str", "(", "obj", ")" ]
Returns a string for various input types
[ "Returns", "a", "string", "for", "various", "input", "types" ]
python
train
mapbox/rio-color
rio_color/operations.py
https://github.com/mapbox/rio-color/blob/4e9d7a9348608e66f9381fcdba98c13050e91c83/rio_color/operations.py#L9-L97
def sigmoidal(arr, contrast, bias): r""" Sigmoidal contrast is type of contrast control that adjusts the contrast without saturating highlights or shadows. It allows control over two factors: the contrast range from light to dark, and where the middle value of the mid-tones falls. The result is a non-linear and smooth contrast change. Parameters ---------- arr : ndarray, float, 0 .. 1 Array of color values to adjust contrast : integer Enhances the intensity differences between the lighter and darker elements of the image. For example, 0 is none, 3 is typical and 20 is a lot. bias : float, between 0 and 1 Threshold level for the contrast function to center on (typically centered at 0.5) Notes ---------- Sigmoidal contrast is based on the sigmoidal transfer function: .. math:: g(u) = ( 1/(1 + e^{- \alpha * u + \beta)}) This sigmoid function is scaled so that the output is bound by the interval [0, 1]. .. math:: ( 1/(1 + e^(\beta * (\alpha - u))) - 1/(1 + e^(\beta * \alpha)))/ ( 1/(1 + e^(\beta*(\alpha - 1))) - 1/(1 + e^(\beta * \alpha)) ) Where :math: `\alpha` is the threshold level, and :math: `\beta` the contrast factor to be applied. References ---------- .. [CT] Hany Farid "Fundamentals of Image Processing" http://www.cs.dartmouth.edu/farid/downloads/tutorials/fip.pdf """ if (arr.max() > 1.0 + epsilon) or (arr.min() < 0 - epsilon): raise ValueError("Input array must have float values between 0 and 1") if (bias > 1.0 + epsilon) or (bias < 0 - epsilon): raise ValueError("bias must be a scalar float between 0 and 1") alpha, beta = bias, contrast # We use the names a and b to match documentation. if alpha == 0: alpha = epsilon if beta == 0: return arr np.seterr(divide="ignore", invalid="ignore") if beta > 0: numerator = 1 / (1 + np.exp(beta * (alpha - arr))) - 1 / ( 1 + np.exp(beta * alpha) ) denominator = 1 / (1 + np.exp(beta * (alpha - 1))) - 1 / ( 1 + np.exp(beta * alpha) ) output = numerator / denominator else: # Inverse sigmoidal function: # todo: account for 0s # todo: formatting ;) output = ( (beta * alpha) - np.log( ( 1 / ( (arr / (1 + np.exp(beta * alpha - beta))) - (arr / (1 + np.exp(beta * alpha))) + (1 / (1 + np.exp(beta * alpha))) ) ) - 1 ) ) / beta return output
[ "def", "sigmoidal", "(", "arr", ",", "contrast", ",", "bias", ")", ":", "if", "(", "arr", ".", "max", "(", ")", ">", "1.0", "+", "epsilon", ")", "or", "(", "arr", ".", "min", "(", ")", "<", "0", "-", "epsilon", ")", ":", "raise", "ValueError", "(", "\"Input array must have float values between 0 and 1\"", ")", "if", "(", "bias", ">", "1.0", "+", "epsilon", ")", "or", "(", "bias", "<", "0", "-", "epsilon", ")", ":", "raise", "ValueError", "(", "\"bias must be a scalar float between 0 and 1\"", ")", "alpha", ",", "beta", "=", "bias", ",", "contrast", "# We use the names a and b to match documentation.", "if", "alpha", "==", "0", ":", "alpha", "=", "epsilon", "if", "beta", "==", "0", ":", "return", "arr", "np", ".", "seterr", "(", "divide", "=", "\"ignore\"", ",", "invalid", "=", "\"ignore\"", ")", "if", "beta", ">", "0", ":", "numerator", "=", "1", "/", "(", "1", "+", "np", ".", "exp", "(", "beta", "*", "(", "alpha", "-", "arr", ")", ")", ")", "-", "1", "/", "(", "1", "+", "np", ".", "exp", "(", "beta", "*", "alpha", ")", ")", "denominator", "=", "1", "/", "(", "1", "+", "np", ".", "exp", "(", "beta", "*", "(", "alpha", "-", "1", ")", ")", ")", "-", "1", "/", "(", "1", "+", "np", ".", "exp", "(", "beta", "*", "alpha", ")", ")", "output", "=", "numerator", "/", "denominator", "else", ":", "# Inverse sigmoidal function:", "# todo: account for 0s", "# todo: formatting ;)", "output", "=", "(", "(", "beta", "*", "alpha", ")", "-", "np", ".", "log", "(", "(", "1", "/", "(", "(", "arr", "/", "(", "1", "+", "np", ".", "exp", "(", "beta", "*", "alpha", "-", "beta", ")", ")", ")", "-", "(", "arr", "/", "(", "1", "+", "np", ".", "exp", "(", "beta", "*", "alpha", ")", ")", ")", "+", "(", "1", "/", "(", "1", "+", "np", ".", "exp", "(", "beta", "*", "alpha", ")", ")", ")", ")", ")", "-", "1", ")", ")", "/", "beta", "return", "output" ]
r""" Sigmoidal contrast is type of contrast control that adjusts the contrast without saturating highlights or shadows. It allows control over two factors: the contrast range from light to dark, and where the middle value of the mid-tones falls. The result is a non-linear and smooth contrast change. Parameters ---------- arr : ndarray, float, 0 .. 1 Array of color values to adjust contrast : integer Enhances the intensity differences between the lighter and darker elements of the image. For example, 0 is none, 3 is typical and 20 is a lot. bias : float, between 0 and 1 Threshold level for the contrast function to center on (typically centered at 0.5) Notes ---------- Sigmoidal contrast is based on the sigmoidal transfer function: .. math:: g(u) = ( 1/(1 + e^{- \alpha * u + \beta)}) This sigmoid function is scaled so that the output is bound by the interval [0, 1]. .. math:: ( 1/(1 + e^(\beta * (\alpha - u))) - 1/(1 + e^(\beta * \alpha)))/ ( 1/(1 + e^(\beta*(\alpha - 1))) - 1/(1 + e^(\beta * \alpha)) ) Where :math: `\alpha` is the threshold level, and :math: `\beta` the contrast factor to be applied. References ---------- .. [CT] Hany Farid "Fundamentals of Image Processing" http://www.cs.dartmouth.edu/farid/downloads/tutorials/fip.pdf
[ "r", "Sigmoidal", "contrast", "is", "type", "of", "contrast", "control", "that", "adjusts", "the", "contrast", "without", "saturating", "highlights", "or", "shadows", ".", "It", "allows", "control", "over", "two", "factors", ":", "the", "contrast", "range", "from", "light", "to", "dark", "and", "where", "the", "middle", "value", "of", "the", "mid", "-", "tones", "falls", ".", "The", "result", "is", "a", "non", "-", "linear", "and", "smooth", "contrast", "change", "." ]
python
train
rene-aguirre/pywinusb
pywinusb/hid/core.py
https://github.com/rene-aguirre/pywinusb/blob/954c4b2105d9f01cb0c50e24500bb747d4ecdc43/pywinusb/hid/core.py#L1318-L1375
def set_raw_data(self, raw_data): """Set usage values based on given raw data, item[0] is report_id, length should match 'raw_data_length' value, best performance if raw_data is c_ubyte ctypes array object type """ #pre-parsed data should exist assert(self.__hid_object.is_opened()) #valid length if len(raw_data) != self.__raw_report_size: raise HIDError( "Report size has to be %d elements (bytes)" \ % self.__raw_report_size ) # copy to internal storage self.__alloc_raw_data(raw_data) if not self.__usage_data_list: # create HIDP_DATA buffer max_items = hid_dll.HidP_MaxDataListLength(self.__report_kind, self.__hid_object.ptr_preparsed_data) data_list_type = winapi.HIDP_DATA * max_items self.__usage_data_list = data_list_type() #reference HIDP_DATA buffer data_list = self.__usage_data_list data_len = c_ulong(len(data_list)) #reset old values for item in self.values(): if item.is_value_array(): item.value = [0, ]*len(item) else: item.value = 0 #ready, parse raw data HidStatus( hid_dll.HidP_GetData(self.__report_kind, byref(data_list), byref(data_len), self.__hid_object.ptr_preparsed_data, byref(self.__raw_data), len(self.__raw_data)) ) #set values on internal report item objects for idx in range(data_len.value): value_item = data_list[idx] report_item = self.__idx_items.get(value_item.data_index) if not report_item: # This is not expected to happen continue if report_item.is_value(): report_item.value = value_item.value.raw_value elif report_item.is_button(): report_item.value = value_item.value.on else: pass # HID API should give us either, at least one of 'em #get values of array items for item in self.__value_array_items: #ask hid API to parse HidStatus( hid_dll.HidP_GetUsageValueArray(self.__report_kind, item.page_id, 0, #link collection item.usage_id, #short usage byref(item.value_array), #output data (c_ubyte storage) len(item.value_array), self.__hid_object.ptr_preparsed_data, byref(self.__raw_data), len(self.__raw_data)) )
[ "def", "set_raw_data", "(", "self", ",", "raw_data", ")", ":", "#pre-parsed data should exist\r", "assert", "(", "self", ".", "__hid_object", ".", "is_opened", "(", ")", ")", "#valid length\r", "if", "len", "(", "raw_data", ")", "!=", "self", ".", "__raw_report_size", ":", "raise", "HIDError", "(", "\"Report size has to be %d elements (bytes)\"", "%", "self", ".", "__raw_report_size", ")", "# copy to internal storage\r", "self", ".", "__alloc_raw_data", "(", "raw_data", ")", "if", "not", "self", ".", "__usage_data_list", ":", "# create HIDP_DATA buffer\r", "max_items", "=", "hid_dll", ".", "HidP_MaxDataListLength", "(", "self", ".", "__report_kind", ",", "self", ".", "__hid_object", ".", "ptr_preparsed_data", ")", "data_list_type", "=", "winapi", ".", "HIDP_DATA", "*", "max_items", "self", ".", "__usage_data_list", "=", "data_list_type", "(", ")", "#reference HIDP_DATA buffer\r", "data_list", "=", "self", ".", "__usage_data_list", "data_len", "=", "c_ulong", "(", "len", "(", "data_list", ")", ")", "#reset old values\r", "for", "item", "in", "self", ".", "values", "(", ")", ":", "if", "item", ".", "is_value_array", "(", ")", ":", "item", ".", "value", "=", "[", "0", ",", "]", "*", "len", "(", "item", ")", "else", ":", "item", ".", "value", "=", "0", "#ready, parse raw data\r", "HidStatus", "(", "hid_dll", ".", "HidP_GetData", "(", "self", ".", "__report_kind", ",", "byref", "(", "data_list", ")", ",", "byref", "(", "data_len", ")", ",", "self", ".", "__hid_object", ".", "ptr_preparsed_data", ",", "byref", "(", "self", ".", "__raw_data", ")", ",", "len", "(", "self", ".", "__raw_data", ")", ")", ")", "#set values on internal report item objects\r", "for", "idx", "in", "range", "(", "data_len", ".", "value", ")", ":", "value_item", "=", "data_list", "[", "idx", "]", "report_item", "=", "self", ".", "__idx_items", ".", "get", "(", "value_item", ".", "data_index", ")", "if", "not", "report_item", ":", "# This is not expected to happen\r", "continue", "if", "report_item", ".", "is_value", "(", ")", ":", "report_item", ".", "value", "=", "value_item", ".", "value", ".", "raw_value", "elif", "report_item", ".", "is_button", "(", ")", ":", "report_item", ".", "value", "=", "value_item", ".", "value", ".", "on", "else", ":", "pass", "# HID API should give us either, at least one of 'em\r", "#get values of array items\r", "for", "item", "in", "self", ".", "__value_array_items", ":", "#ask hid API to parse\r", "HidStatus", "(", "hid_dll", ".", "HidP_GetUsageValueArray", "(", "self", ".", "__report_kind", ",", "item", ".", "page_id", ",", "0", ",", "#link collection\r", "item", ".", "usage_id", ",", "#short usage\r", "byref", "(", "item", ".", "value_array", ")", ",", "#output data (c_ubyte storage)\r", "len", "(", "item", ".", "value_array", ")", ",", "self", ".", "__hid_object", ".", "ptr_preparsed_data", ",", "byref", "(", "self", ".", "__raw_data", ")", ",", "len", "(", "self", ".", "__raw_data", ")", ")", ")" ]
Set usage values based on given raw data, item[0] is report_id, length should match 'raw_data_length' value, best performance if raw_data is c_ubyte ctypes array object type
[ "Set", "usage", "values", "based", "on", "given", "raw", "data", "item", "[", "0", "]", "is", "report_id", "length", "should", "match", "raw_data_length", "value", "best", "performance", "if", "raw_data", "is", "c_ubyte", "ctypes", "array", "object", "type" ]
python
train
jtambasco/modesolverpy
modesolverpy/design.py
https://github.com/jtambasco/modesolverpy/blob/85254a13b5aed2404187c52ac93b9b3ce99ee3a3/modesolverpy/design.py#L29-L60
def grating_coupler_period(wavelength, n_eff, n_clad, incidence_angle_deg, diffration_order=1): ''' Calculate the period needed for a grating coupler. Args: wavelength (float): The target wavelength for the grating coupler. n_eff (float): The effective index of the mode of a waveguide with the width of the grating coupler. n_clad (float): The refractive index of the cladding. incidence_angle_deg (float): The incidence angle the grating coupler should operate at [degrees]. diffration_order (int): The grating order the coupler should work at. Default is 1st order (1). Returns: float: The period needed for the grating coupler in the same units as the wavelength was given at. ''' k0 = 2. * np.pi / wavelength beta = n_eff.real * k0 n_inc = n_clad grating_period = (2.*np.pi*diffration_order) \ / (beta - k0*n_inc*np.sin(np.radians(incidence_angle_deg))) return grating_period
[ "def", "grating_coupler_period", "(", "wavelength", ",", "n_eff", ",", "n_clad", ",", "incidence_angle_deg", ",", "diffration_order", "=", "1", ")", ":", "k0", "=", "2.", "*", "np", ".", "pi", "/", "wavelength", "beta", "=", "n_eff", ".", "real", "*", "k0", "n_inc", "=", "n_clad", "grating_period", "=", "(", "2.", "*", "np", ".", "pi", "*", "diffration_order", ")", "/", "(", "beta", "-", "k0", "*", "n_inc", "*", "np", ".", "sin", "(", "np", ".", "radians", "(", "incidence_angle_deg", ")", ")", ")", "return", "grating_period" ]
Calculate the period needed for a grating coupler. Args: wavelength (float): The target wavelength for the grating coupler. n_eff (float): The effective index of the mode of a waveguide with the width of the grating coupler. n_clad (float): The refractive index of the cladding. incidence_angle_deg (float): The incidence angle the grating coupler should operate at [degrees]. diffration_order (int): The grating order the coupler should work at. Default is 1st order (1). Returns: float: The period needed for the grating coupler in the same units as the wavelength was given at.
[ "Calculate", "the", "period", "needed", "for", "a", "grating", "coupler", "." ]
python
train
kivy/python-for-android
pythonforandroid/python.py
https://github.com/kivy/python-for-android/blob/8e0e8056bc22e4d5bd3398a6b0301f38ff167933/pythonforandroid/python.py#L276-L290
def compile_python_files(self, dir): ''' Compile the python files (recursively) for the python files inside a given folder. .. note:: python2 compiles the files into extension .pyo, but in python3, and as of Python 3.5, the .pyo filename extension is no longer used...uses .pyc (https://www.python.org/dev/peps/pep-0488) ''' args = [self.ctx.hostpython] if self.ctx.python_recipe.name == 'python3': args += ['-OO', '-m', 'compileall', '-b', '-f', dir] else: args += ['-OO', '-m', 'compileall', '-f', dir] subprocess.call(args)
[ "def", "compile_python_files", "(", "self", ",", "dir", ")", ":", "args", "=", "[", "self", ".", "ctx", ".", "hostpython", "]", "if", "self", ".", "ctx", ".", "python_recipe", ".", "name", "==", "'python3'", ":", "args", "+=", "[", "'-OO'", ",", "'-m'", ",", "'compileall'", ",", "'-b'", ",", "'-f'", ",", "dir", "]", "else", ":", "args", "+=", "[", "'-OO'", ",", "'-m'", ",", "'compileall'", ",", "'-f'", ",", "dir", "]", "subprocess", ".", "call", "(", "args", ")" ]
Compile the python files (recursively) for the python files inside a given folder. .. note:: python2 compiles the files into extension .pyo, but in python3, and as of Python 3.5, the .pyo filename extension is no longer used...uses .pyc (https://www.python.org/dev/peps/pep-0488)
[ "Compile", "the", "python", "files", "(", "recursively", ")", "for", "the", "python", "files", "inside", "a", "given", "folder", "." ]
python
train
rosenbrockc/fortpy
fortpy/interop/converter.py
https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/interop/converter.py#L398-L416
def convert(self, path, version, target = None): """Converts the specified file using the relevant template. :arg path: the full path to the file to convert. :arg version: the new version of the file. :arg target: the optional path to save the file under. If not specified, the file is saved based on the template file name. """ #Get the template and values out of the XML input file and #write them in the format of the keywordless file. values, template = self.parse(path) lines = template.write(values, version) #Finally, write the lines to the correct path. if target is None: target = os.path.join(os.path.dirname(path), template.name) with open(os.path.expanduser(target), 'w') as f: f.write("\n".join(lines))
[ "def", "convert", "(", "self", ",", "path", ",", "version", ",", "target", "=", "None", ")", ":", "#Get the template and values out of the XML input file and", "#write them in the format of the keywordless file.", "values", ",", "template", "=", "self", ".", "parse", "(", "path", ")", "lines", "=", "template", ".", "write", "(", "values", ",", "version", ")", "#Finally, write the lines to the correct path.", "if", "target", "is", "None", ":", "target", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "path", ")", ",", "template", ".", "name", ")", "with", "open", "(", "os", ".", "path", ".", "expanduser", "(", "target", ")", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "\"\\n\"", ".", "join", "(", "lines", ")", ")" ]
Converts the specified file using the relevant template. :arg path: the full path to the file to convert. :arg version: the new version of the file. :arg target: the optional path to save the file under. If not specified, the file is saved based on the template file name.
[ "Converts", "the", "specified", "file", "using", "the", "relevant", "template", "." ]
python
train
MrYsLab/pymata-aio
examples/sparkfun_redbot/basics/simple_drive.py
https://github.com/MrYsLab/pymata-aio/blob/015081a4628b9d47dfe3f8d6c698ff903f107810/examples/sparkfun_redbot/basics/simple_drive.py#L21-L29
def setup(): """Setup pins""" print("Simple drive") board.set_pin_mode(L_CTRL_1, Constants.OUTPUT) board.set_pin_mode(L_CTRL_2, Constants.OUTPUT) board.set_pin_mode(PWM_L, Constants.PWM) board.set_pin_mode(R_CTRL_1, Constants.OUTPUT) board.set_pin_mode(R_CTRL_2, Constants.OUTPUT) board.set_pin_mode(PWM_R, Constants.PWM)
[ "def", "setup", "(", ")", ":", "print", "(", "\"Simple drive\"", ")", "board", ".", "set_pin_mode", "(", "L_CTRL_1", ",", "Constants", ".", "OUTPUT", ")", "board", ".", "set_pin_mode", "(", "L_CTRL_2", ",", "Constants", ".", "OUTPUT", ")", "board", ".", "set_pin_mode", "(", "PWM_L", ",", "Constants", ".", "PWM", ")", "board", ".", "set_pin_mode", "(", "R_CTRL_1", ",", "Constants", ".", "OUTPUT", ")", "board", ".", "set_pin_mode", "(", "R_CTRL_2", ",", "Constants", ".", "OUTPUT", ")", "board", ".", "set_pin_mode", "(", "PWM_R", ",", "Constants", ".", "PWM", ")" ]
Setup pins
[ "Setup", "pins" ]
python
train
wonambi-python/wonambi
wonambi/ioeeg/ktlx.py
https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/ioeeg/ktlx.py#L109-L237
def _calculate_conversion(hdr): """Calculate the conversion factor. Returns ------- conv_factor : numpy.ndarray channel-long vector with the channel-specific conversion factor Notes ----- Final units are microvolts It should include all the headbox versions apart from 5 because it depends on subversion. """ discardbits = hdr['discardbits'] n_chan = hdr['num_channels'] if hdr['headbox_type'][0] in (1, 3): # all channels factor = ones((n_chan)) * (8711. / (2 ** 21 - 0.5)) * 2 ** discardbits elif hdr['headbox_type'][0] == 4: # 0 - 23 ch1 = ones((24)) * (8711. / (2 ** 21 - 0.5)) * 2 ** discardbits # 24 - 27 ch2 = ones((4)) * ((5000000. / (2 ** 10 - 0.5)) / (2 ** 6)) * 2 ** discardbits factor = concatenate((ch1, ch2)) elif hdr['headbox_type'][0] == 6: # 0 - 31 ch1 = ones((32)) * (8711. / (2 ** 21 - 0.5)) * 2 ** discardbits # 32 - 35 ch2 = ones((4)) * ((5000000. / (2 ** 10 - 0.5)) / (2 ** 6)) * 2 ** discardbits factor = concatenate((ch1, ch2)) elif hdr['headbox_type'][0] == 8: # 0 - 24 ch1 = ones((25)) * (8711. / ((2 ** 21) - 0.5)) * 2 ** discardbits # 25 - 26 ch2 = ones((2)) * (1 / (2 ** 6)) * 2 ** discardbits factor = concatenate((ch1, ch2)) elif hdr['headbox_type'][0] == 9: # 0 - 32 ch1 = ones((33)) * (8711. / ((2 ** 21) - 0.5)) * 2 ** discardbits # 33 - 34 ch2 = ones((2)) * (1 / (2 ** 6)) * 2 ** discardbits factor = concatenate((ch1, ch2)) elif hdr['headbox_type'][0] == 14: # 0 - 37 ch1 = ones((38)) * (8711. / ((2 ** 21) - 0.5)) * 2 ** discardbits # 38 - 47 ch2 = ones((10)) * ((10800000 / 65536) / (2 ** 6)) * 2 ** discardbits # 48-49 ch3 = ones((2)) * (1 / (2 ** 6)) * 2 ** discardbits factor = concatenate((ch1, ch2, ch3)) elif hdr['headbox_type'][0] == 15: # 0 - 23 ch1 = ones((24)) * (8711. / ((2 ** 21) - 0.5)) * 2 ** discardbits # 24 - 27 (as above) ch2 = ones((4)) * (8711. / ((2 ** 21) - 0.5)) * 2 ** discardbits # 28 - 31 (note 10000000 instead of 10800000) ch3 = ones((4)) * ((10000000 / 65536) / (2 ** 6)) * 2 ** discardbits # 32-33 ch4 = ones((2)) * (1 / (2 ** 6)) * 2 ** discardbits factor = concatenate((ch1, ch2, ch3, ch4)) elif hdr['headbox_type'][0] == 17: # 0 - 39 ch1 = ones((40)) * (8711. / ((2 ** 21) - 0.5)) * 2 ** discardbits # 40 - 43 ch2 = ones((4)) * ((10800000 / 65536) / (2 ** 6)) * 2 ** discardbits # 44 - 45 ch3 = ones((2)) * (1 / (2 ** 6)) * 2 ** discardbits factor = concatenate((ch1, ch2, ch3)) elif hdr['headbox_type'][0] == 19: # all channels factor = ones((n_chan)) * (8711. / ((2 ** 21) - 0.5)) * 2 ** discardbits elif hdr['headbox_type'][0] == 21: # 0 - 127 ch1 = ones((128)) * (8711. / ((2 ** 21) - 0.5)) * 2 ** discardbits # 128 - 129 ch2 = ones((2)) * (1 / (2 ** 6)) * 2 ** discardbits # 130 - 255 ch3 = ones((126)) * (8711. / ((2 ** 21) - 0.5)) * 2 ** discardbits factor = concatenate((ch1, ch2, ch3)) elif hdr['headbox_type'][0] == 22: # 0 - 31 ch1 = ones((32)) * (8711. / ((2 ** 21) - 0.5)) * 2 ** discardbits # 32 - 39 ch2 = ones((8)) * ((10800000. / 65536.) / (2 ** 6)) * 2 ** discardbits # 40 - 41 ch3 = ones((2)) * (1 / (2 ** 6)) * 2 ** discardbits # 42 ch4 = ones((1)) * ((10800000. / 65536.) / (2 ** 6)) * 2 ** discardbits factor = concatenate((ch1, ch2, ch3, ch4)) elif hdr['headbox_type'][0] == 23: # 0 - 31 ch1 = ones((32)) * (8711. / ((2 ** 21) - 0.5)) * 2 ** discardbits # 32 - 35 ch2 = ones((4)) * ((10800000. / 65536.) / (2 ** 6)) * 2 ** discardbits # 36 - 37 ch3 = ones((2)) * (1 / (2 ** 6)) * 2 ** discardbits # 38 ch4 = ones((1)) * ((10800000. / 65536.) / (2 ** 6)) * 2 ** discardbits factor = concatenate((ch1, ch2, ch3, ch4)) else: raise NotImplementedError('Implement conversion factor for headbox ' + str(hdr['headbox_type'][0])) return factor[:n_chan]
[ "def", "_calculate_conversion", "(", "hdr", ")", ":", "discardbits", "=", "hdr", "[", "'discardbits'", "]", "n_chan", "=", "hdr", "[", "'num_channels'", "]", "if", "hdr", "[", "'headbox_type'", "]", "[", "0", "]", "in", "(", "1", ",", "3", ")", ":", "# all channels", "factor", "=", "ones", "(", "(", "n_chan", ")", ")", "*", "(", "8711.", "/", "(", "2", "**", "21", "-", "0.5", ")", ")", "*", "2", "**", "discardbits", "elif", "hdr", "[", "'headbox_type'", "]", "[", "0", "]", "==", "4", ":", "# 0 - 23", "ch1", "=", "ones", "(", "(", "24", ")", ")", "*", "(", "8711.", "/", "(", "2", "**", "21", "-", "0.5", ")", ")", "*", "2", "**", "discardbits", "# 24 - 27", "ch2", "=", "ones", "(", "(", "4", ")", ")", "*", "(", "(", "5000000.", "/", "(", "2", "**", "10", "-", "0.5", ")", ")", "/", "(", "2", "**", "6", ")", ")", "*", "2", "**", "discardbits", "factor", "=", "concatenate", "(", "(", "ch1", ",", "ch2", ")", ")", "elif", "hdr", "[", "'headbox_type'", "]", "[", "0", "]", "==", "6", ":", "# 0 - 31", "ch1", "=", "ones", "(", "(", "32", ")", ")", "*", "(", "8711.", "/", "(", "2", "**", "21", "-", "0.5", ")", ")", "*", "2", "**", "discardbits", "# 32 - 35", "ch2", "=", "ones", "(", "(", "4", ")", ")", "*", "(", "(", "5000000.", "/", "(", "2", "**", "10", "-", "0.5", ")", ")", "/", "(", "2", "**", "6", ")", ")", "*", "2", "**", "discardbits", "factor", "=", "concatenate", "(", "(", "ch1", ",", "ch2", ")", ")", "elif", "hdr", "[", "'headbox_type'", "]", "[", "0", "]", "==", "8", ":", "# 0 - 24", "ch1", "=", "ones", "(", "(", "25", ")", ")", "*", "(", "8711.", "/", "(", "(", "2", "**", "21", ")", "-", "0.5", ")", ")", "*", "2", "**", "discardbits", "# 25 - 26", "ch2", "=", "ones", "(", "(", "2", ")", ")", "*", "(", "1", "/", "(", "2", "**", "6", ")", ")", "*", "2", "**", "discardbits", "factor", "=", "concatenate", "(", "(", "ch1", ",", "ch2", ")", ")", "elif", "hdr", "[", "'headbox_type'", "]", "[", "0", "]", "==", "9", ":", "# 0 - 32", "ch1", "=", "ones", "(", "(", "33", ")", ")", "*", "(", "8711.", "/", "(", "(", "2", "**", "21", ")", "-", "0.5", ")", ")", "*", "2", "**", "discardbits", "# 33 - 34", "ch2", "=", "ones", "(", "(", "2", ")", ")", "*", "(", "1", "/", "(", "2", "**", "6", ")", ")", "*", "2", "**", "discardbits", "factor", "=", "concatenate", "(", "(", "ch1", ",", "ch2", ")", ")", "elif", "hdr", "[", "'headbox_type'", "]", "[", "0", "]", "==", "14", ":", "# 0 - 37", "ch1", "=", "ones", "(", "(", "38", ")", ")", "*", "(", "8711.", "/", "(", "(", "2", "**", "21", ")", "-", "0.5", ")", ")", "*", "2", "**", "discardbits", "# 38 - 47", "ch2", "=", "ones", "(", "(", "10", ")", ")", "*", "(", "(", "10800000", "/", "65536", ")", "/", "(", "2", "**", "6", ")", ")", "*", "2", "**", "discardbits", "# 48-49", "ch3", "=", "ones", "(", "(", "2", ")", ")", "*", "(", "1", "/", "(", "2", "**", "6", ")", ")", "*", "2", "**", "discardbits", "factor", "=", "concatenate", "(", "(", "ch1", ",", "ch2", ",", "ch3", ")", ")", "elif", "hdr", "[", "'headbox_type'", "]", "[", "0", "]", "==", "15", ":", "# 0 - 23", "ch1", "=", "ones", "(", "(", "24", ")", ")", "*", "(", "8711.", "/", "(", "(", "2", "**", "21", ")", "-", "0.5", ")", ")", "*", "2", "**", "discardbits", "# 24 - 27 (as above)", "ch2", "=", "ones", "(", "(", "4", ")", ")", "*", "(", "8711.", "/", "(", "(", "2", "**", "21", ")", "-", "0.5", ")", ")", "*", "2", "**", "discardbits", "# 28 - 31 (note 10000000 instead of 10800000)", "ch3", "=", "ones", "(", "(", "4", ")", ")", "*", "(", "(", "10000000", "/", "65536", ")", "/", "(", "2", "**", "6", ")", ")", "*", "2", "**", "discardbits", "# 32-33", "ch4", "=", "ones", "(", "(", "2", ")", ")", "*", "(", "1", "/", "(", "2", "**", "6", ")", ")", "*", "2", "**", "discardbits", "factor", "=", "concatenate", "(", "(", "ch1", ",", "ch2", ",", "ch3", ",", "ch4", ")", ")", "elif", "hdr", "[", "'headbox_type'", "]", "[", "0", "]", "==", "17", ":", "# 0 - 39", "ch1", "=", "ones", "(", "(", "40", ")", ")", "*", "(", "8711.", "/", "(", "(", "2", "**", "21", ")", "-", "0.5", ")", ")", "*", "2", "**", "discardbits", "# 40 - 43", "ch2", "=", "ones", "(", "(", "4", ")", ")", "*", "(", "(", "10800000", "/", "65536", ")", "/", "(", "2", "**", "6", ")", ")", "*", "2", "**", "discardbits", "# 44 - 45", "ch3", "=", "ones", "(", "(", "2", ")", ")", "*", "(", "1", "/", "(", "2", "**", "6", ")", ")", "*", "2", "**", "discardbits", "factor", "=", "concatenate", "(", "(", "ch1", ",", "ch2", ",", "ch3", ")", ")", "elif", "hdr", "[", "'headbox_type'", "]", "[", "0", "]", "==", "19", ":", "# all channels", "factor", "=", "ones", "(", "(", "n_chan", ")", ")", "*", "(", "8711.", "/", "(", "(", "2", "**", "21", ")", "-", "0.5", ")", ")", "*", "2", "**", "discardbits", "elif", "hdr", "[", "'headbox_type'", "]", "[", "0", "]", "==", "21", ":", "# 0 - 127", "ch1", "=", "ones", "(", "(", "128", ")", ")", "*", "(", "8711.", "/", "(", "(", "2", "**", "21", ")", "-", "0.5", ")", ")", "*", "2", "**", "discardbits", "# 128 - 129", "ch2", "=", "ones", "(", "(", "2", ")", ")", "*", "(", "1", "/", "(", "2", "**", "6", ")", ")", "*", "2", "**", "discardbits", "# 130 - 255", "ch3", "=", "ones", "(", "(", "126", ")", ")", "*", "(", "8711.", "/", "(", "(", "2", "**", "21", ")", "-", "0.5", ")", ")", "*", "2", "**", "discardbits", "factor", "=", "concatenate", "(", "(", "ch1", ",", "ch2", ",", "ch3", ")", ")", "elif", "hdr", "[", "'headbox_type'", "]", "[", "0", "]", "==", "22", ":", "# 0 - 31", "ch1", "=", "ones", "(", "(", "32", ")", ")", "*", "(", "8711.", "/", "(", "(", "2", "**", "21", ")", "-", "0.5", ")", ")", "*", "2", "**", "discardbits", "# 32 - 39", "ch2", "=", "ones", "(", "(", "8", ")", ")", "*", "(", "(", "10800000.", "/", "65536.", ")", "/", "(", "2", "**", "6", ")", ")", "*", "2", "**", "discardbits", "# 40 - 41", "ch3", "=", "ones", "(", "(", "2", ")", ")", "*", "(", "1", "/", "(", "2", "**", "6", ")", ")", "*", "2", "**", "discardbits", "# 42", "ch4", "=", "ones", "(", "(", "1", ")", ")", "*", "(", "(", "10800000.", "/", "65536.", ")", "/", "(", "2", "**", "6", ")", ")", "*", "2", "**", "discardbits", "factor", "=", "concatenate", "(", "(", "ch1", ",", "ch2", ",", "ch3", ",", "ch4", ")", ")", "elif", "hdr", "[", "'headbox_type'", "]", "[", "0", "]", "==", "23", ":", "# 0 - 31", "ch1", "=", "ones", "(", "(", "32", ")", ")", "*", "(", "8711.", "/", "(", "(", "2", "**", "21", ")", "-", "0.5", ")", ")", "*", "2", "**", "discardbits", "# 32 - 35", "ch2", "=", "ones", "(", "(", "4", ")", ")", "*", "(", "(", "10800000.", "/", "65536.", ")", "/", "(", "2", "**", "6", ")", ")", "*", "2", "**", "discardbits", "# 36 - 37", "ch3", "=", "ones", "(", "(", "2", ")", ")", "*", "(", "1", "/", "(", "2", "**", "6", ")", ")", "*", "2", "**", "discardbits", "# 38", "ch4", "=", "ones", "(", "(", "1", ")", ")", "*", "(", "(", "10800000.", "/", "65536.", ")", "/", "(", "2", "**", "6", ")", ")", "*", "2", "**", "discardbits", "factor", "=", "concatenate", "(", "(", "ch1", ",", "ch2", ",", "ch3", ",", "ch4", ")", ")", "else", ":", "raise", "NotImplementedError", "(", "'Implement conversion factor for headbox '", "+", "str", "(", "hdr", "[", "'headbox_type'", "]", "[", "0", "]", ")", ")", "return", "factor", "[", ":", "n_chan", "]" ]
Calculate the conversion factor. Returns ------- conv_factor : numpy.ndarray channel-long vector with the channel-specific conversion factor Notes ----- Final units are microvolts It should include all the headbox versions apart from 5 because it depends on subversion.
[ "Calculate", "the", "conversion", "factor", "." ]
python
train
Skype4Py/Skype4Py
Skype4Py/skype.py
https://github.com/Skype4Py/Skype4Py/blob/c48d83f7034109fe46315d45a066126002c6e0d4/Skype4Py/skype.py#L731-L747
def Property(self, ObjectType, ObjectId, PropName, Set=None): """Queries/sets the properties of an object. :Parameters: ObjectType : str Object type ('USER', 'CALL', 'CHAT', 'CHATMESSAGE', ...). ObjectId : str Object Id, depends on the object type. PropName : str Name of the property to access. Set : unicode or None Value the property should be set to or None if the value should be queried. :return: Property value if Set=None, None otherwise. :rtype: unicode or None """ return self._Property(ObjectType, ObjectId, PropName, Set)
[ "def", "Property", "(", "self", ",", "ObjectType", ",", "ObjectId", ",", "PropName", ",", "Set", "=", "None", ")", ":", "return", "self", ".", "_Property", "(", "ObjectType", ",", "ObjectId", ",", "PropName", ",", "Set", ")" ]
Queries/sets the properties of an object. :Parameters: ObjectType : str Object type ('USER', 'CALL', 'CHAT', 'CHATMESSAGE', ...). ObjectId : str Object Id, depends on the object type. PropName : str Name of the property to access. Set : unicode or None Value the property should be set to or None if the value should be queried. :return: Property value if Set=None, None otherwise. :rtype: unicode or None
[ "Queries", "/", "sets", "the", "properties", "of", "an", "object", "." ]
python
train