nwo
stringlengths 5
86
| sha
stringlengths 40
40
| path
stringlengths 4
189
| language
stringclasses 1
value | identifier
stringlengths 1
94
| parameters
stringlengths 2
4.03k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
11.5k
| docstring
stringlengths 1
33.2k
| docstring_summary
stringlengths 0
5.15k
| docstring_tokens
sequence | function
stringlengths 34
151k
| function_tokens
sequence | url
stringlengths 90
278
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
eventql/eventql | 7ca0dbb2e683b525620ea30dc40540a22d5eb227 | deps/3rdparty/spidermonkey/mozjs/media/webrtc/trunk/tools/gyp/pylib/gyp/generator/make.py | python | MakefileWriter.ComputeDeps | (self, spec) | return (gyp.common.uniquer(deps), gyp.common.uniquer(link_deps)) | Compute the dependencies of a gyp spec.
Returns a tuple (deps, link_deps), where each is a list of
filenames that will need to be put in front of make for either
building (deps) or linking (link_deps). | Compute the dependencies of a gyp spec. | [
"Compute",
"the",
"dependencies",
"of",
"a",
"gyp",
"spec",
"."
] | def ComputeDeps(self, spec):
"""Compute the dependencies of a gyp spec.
Returns a tuple (deps, link_deps), where each is a list of
filenames that will need to be put in front of make for either
building (deps) or linking (link_deps).
"""
deps = []
link_deps = []
if 'dependencies' in spec:
deps.extend([target_outputs[dep] for dep in spec['dependencies']
if target_outputs[dep]])
for dep in spec['dependencies']:
if dep in target_link_deps:
link_deps.append(target_link_deps[dep])
deps.extend(link_deps)
# TODO: It seems we need to transitively link in libraries (e.g. -lfoo)?
# This hack makes it work:
# link_deps.extend(spec.get('libraries', []))
return (gyp.common.uniquer(deps), gyp.common.uniquer(link_deps)) | [
"def",
"ComputeDeps",
"(",
"self",
",",
"spec",
")",
":",
"deps",
"=",
"[",
"]",
"link_deps",
"=",
"[",
"]",
"if",
"'dependencies'",
"in",
"spec",
":",
"deps",
".",
"extend",
"(",
"[",
"target_outputs",
"[",
"dep",
"]",
"for",
"dep",
"in",
"spec",
"[",
"'dependencies'",
"]",
"if",
"target_outputs",
"[",
"dep",
"]",
"]",
")",
"for",
"dep",
"in",
"spec",
"[",
"'dependencies'",
"]",
":",
"if",
"dep",
"in",
"target_link_deps",
":",
"link_deps",
".",
"append",
"(",
"target_link_deps",
"[",
"dep",
"]",
")",
"deps",
".",
"extend",
"(",
"link_deps",
")",
"# TODO: It seems we need to transitively link in libraries (e.g. -lfoo)?",
"# This hack makes it work:",
"# link_deps.extend(spec.get('libraries', []))",
"return",
"(",
"gyp",
".",
"common",
".",
"uniquer",
"(",
"deps",
")",
",",
"gyp",
".",
"common",
".",
"uniquer",
"(",
"link_deps",
")",
")"
] | https://github.com/eventql/eventql/blob/7ca0dbb2e683b525620ea30dc40540a22d5eb227/deps/3rdparty/spidermonkey/mozjs/media/webrtc/trunk/tools/gyp/pylib/gyp/generator/make.py#L1363-L1382 |
|
root-project/root | fcd3583bb14852bf2e8cd2415717cbaac0e75896 | bindings/pyroot/pythonizations/python/ROOT/_pythonization/_roofit/_rooabsdata.py | python | RooAbsData.reduce | (self, *args, **kwargs) | return self._reduce(*args, **kwargs) | r"""The RooAbsData::reduce() function is pythonized with the command argument pythonization.
The keywords must correspond to the CmdArgs of the function. | r"""The RooAbsData::reduce() function is pythonized with the command argument pythonization.
The keywords must correspond to the CmdArgs of the function. | [
"r",
"The",
"RooAbsData",
"::",
"reduce",
"()",
"function",
"is",
"pythonized",
"with",
"the",
"command",
"argument",
"pythonization",
".",
"The",
"keywords",
"must",
"correspond",
"to",
"the",
"CmdArgs",
"of",
"the",
"function",
"."
] | def reduce(self, *args, **kwargs):
r"""The RooAbsData::reduce() function is pythonized with the command argument pythonization.
The keywords must correspond to the CmdArgs of the function.
"""
# Redefinition of `RooAbsData.reduce` for keyword arguments.
args, kwargs = _kwargs_to_roocmdargs(*args, **kwargs)
return self._reduce(*args, **kwargs) | [
"def",
"reduce",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# Redefinition of `RooAbsData.reduce` for keyword arguments.",
"args",
",",
"kwargs",
"=",
"_kwargs_to_roocmdargs",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"self",
".",
"_reduce",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/root-project/root/blob/fcd3583bb14852bf2e8cd2415717cbaac0e75896/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_roofit/_rooabsdata.py#L67-L73 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/msw/dataview.py | python | DataViewRenderer.GetAlignment | (*args, **kwargs) | return _dataview.DataViewRenderer_GetAlignment(*args, **kwargs) | GetAlignment(self) -> int | GetAlignment(self) -> int | [
"GetAlignment",
"(",
"self",
")",
"-",
">",
"int"
] | def GetAlignment(*args, **kwargs):
"""GetAlignment(self) -> int"""
return _dataview.DataViewRenderer_GetAlignment(*args, **kwargs) | [
"def",
"GetAlignment",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_dataview",
".",
"DataViewRenderer_GetAlignment",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/dataview.py#L1180-L1182 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/tools/python/src/Lib/logging/__init__.py | python | LogRecord.getMessage | (self) | return msg | Return the message for this LogRecord.
Return the message for this LogRecord after merging any user-supplied
arguments with the message. | Return the message for this LogRecord. | [
"Return",
"the",
"message",
"for",
"this",
"LogRecord",
"."
] | def getMessage(self):
"""
Return the message for this LogRecord.
Return the message for this LogRecord after merging any user-supplied
arguments with the message.
"""
if not _unicode: #if no unicode support...
msg = str(self.msg)
else:
msg = self.msg
if not isinstance(msg, basestring):
try:
msg = str(self.msg)
except UnicodeError:
msg = self.msg #Defer encoding till later
if self.args:
msg = msg % self.args
return msg | [
"def",
"getMessage",
"(",
"self",
")",
":",
"if",
"not",
"_unicode",
":",
"#if no unicode support...",
"msg",
"=",
"str",
"(",
"self",
".",
"msg",
")",
"else",
":",
"msg",
"=",
"self",
".",
"msg",
"if",
"not",
"isinstance",
"(",
"msg",
",",
"basestring",
")",
":",
"try",
":",
"msg",
"=",
"str",
"(",
"self",
".",
"msg",
")",
"except",
"UnicodeError",
":",
"msg",
"=",
"self",
".",
"msg",
"#Defer encoding till later",
"if",
"self",
".",
"args",
":",
"msg",
"=",
"msg",
"%",
"self",
".",
"args",
"return",
"msg"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python/src/Lib/logging/__init__.py#L312-L330 |
|
baidu-research/tensorflow-allreduce | 66d5b855e90b0949e9fa5cca5599fd729a70e874 | tensorflow/contrib/layers/python/layers/initializers.py | python | xavier_initializer | (uniform=True, seed=None, dtype=dtypes.float32) | return variance_scaling_initializer(factor=1.0, mode='FAN_AVG',
uniform=uniform, seed=seed, dtype=dtype) | Returns an initializer performing "Xavier" initialization for weights.
This function implements the weight initialization from:
Xavier Glorot and Yoshua Bengio (2010):
[Understanding the difficulty of training deep feedforward neural
networks. International conference on artificial intelligence and
statistics.](
http://www.jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf)
This initializer is designed to keep the scale of the gradients roughly the
same in all layers. In uniform distribution this ends up being the range:
`x = sqrt(6. / (in + out)); [-x, x]` and for normal distribution a standard
deviation of `sqrt(2. / (in + out))` is used.
Args:
uniform: Whether to use uniform or normal distributed random initialization.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed} for behavior.
dtype: The data type. Only floating point types are supported.
Returns:
An initializer for a weight matrix. | Returns an initializer performing "Xavier" initialization for weights. | [
"Returns",
"an",
"initializer",
"performing",
"Xavier",
"initialization",
"for",
"weights",
"."
] | def xavier_initializer(uniform=True, seed=None, dtype=dtypes.float32):
"""Returns an initializer performing "Xavier" initialization for weights.
This function implements the weight initialization from:
Xavier Glorot and Yoshua Bengio (2010):
[Understanding the difficulty of training deep feedforward neural
networks. International conference on artificial intelligence and
statistics.](
http://www.jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf)
This initializer is designed to keep the scale of the gradients roughly the
same in all layers. In uniform distribution this ends up being the range:
`x = sqrt(6. / (in + out)); [-x, x]` and for normal distribution a standard
deviation of `sqrt(2. / (in + out))` is used.
Args:
uniform: Whether to use uniform or normal distributed random initialization.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed} for behavior.
dtype: The data type. Only floating point types are supported.
Returns:
An initializer for a weight matrix.
"""
return variance_scaling_initializer(factor=1.0, mode='FAN_AVG',
uniform=uniform, seed=seed, dtype=dtype) | [
"def",
"xavier_initializer",
"(",
"uniform",
"=",
"True",
",",
"seed",
"=",
"None",
",",
"dtype",
"=",
"dtypes",
".",
"float32",
")",
":",
"return",
"variance_scaling_initializer",
"(",
"factor",
"=",
"1.0",
",",
"mode",
"=",
"'FAN_AVG'",
",",
"uniform",
"=",
"uniform",
",",
"seed",
"=",
"seed",
",",
"dtype",
"=",
"dtype",
")"
] | https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/contrib/layers/python/layers/initializers.py#L31-L57 |
|
idaholab/moose | 9eeebc65e098b4c30f8205fb41591fd5b61eb6ff | python/MooseDocs/base/components.py | python | ReaderComponent.__init__ | (self) | Constructs the object and sets the default settings of the object. | Constructs the object and sets the default settings of the object. | [
"Constructs",
"the",
"object",
"and",
"sets",
"the",
"default",
"settings",
"of",
"the",
"object",
"."
] | def __init__(self):
"""
Constructs the object and sets the default settings of the object.
"""
Component.__init__(self)
mixins.ReaderObject.__init__(self)
# Check return type of default settings
defaults = self.defaultSettings()
if not isinstance(defaults, dict):
msg = "The component '{}' must return a dict from the defaultSettings static method."
raise exceptions.MooseDocsException(msg, self) | [
"def",
"__init__",
"(",
"self",
")",
":",
"Component",
".",
"__init__",
"(",
"self",
")",
"mixins",
".",
"ReaderObject",
".",
"__init__",
"(",
"self",
")",
"# Check return type of default settings",
"defaults",
"=",
"self",
".",
"defaultSettings",
"(",
")",
"if",
"not",
"isinstance",
"(",
"defaults",
",",
"dict",
")",
":",
"msg",
"=",
"\"The component '{}' must return a dict from the defaultSettings static method.\"",
"raise",
"exceptions",
".",
"MooseDocsException",
"(",
"msg",
",",
"self",
")"
] | https://github.com/idaholab/moose/blob/9eeebc65e098b4c30f8205fb41591fd5b61eb6ff/python/MooseDocs/base/components.py#L85-L96 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/msw/_gdi.py | python | Cursor.GetHandle | (*args, **kwargs) | return _gdi_.Cursor_GetHandle(*args, **kwargs) | GetHandle(self) -> long
Get the MS Windows handle for the cursor | GetHandle(self) -> long | [
"GetHandle",
"(",
"self",
")",
"-",
">",
"long"
] | def GetHandle(*args, **kwargs):
"""
GetHandle(self) -> long
Get the MS Windows handle for the cursor
"""
return _gdi_.Cursor_GetHandle(*args, **kwargs) | [
"def",
"GetHandle",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_gdi_",
".",
"Cursor_GetHandle",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/_gdi.py#L1550-L1556 |
|
miyosuda/TensorFlowAndroidDemo | 35903e0221aa5f109ea2dbef27f20b52e317f42d | jni-build/jni/include/tensorflow/python/ops/control_flow_ops.py | python | WhileContext.grad_state | (self) | return self._grad_state | The gradient loop state. | The gradient loop state. | [
"The",
"gradient",
"loop",
"state",
"."
] | def grad_state(self):
"""The gradient loop state."""
return self._grad_state | [
"def",
"grad_state",
"(",
"self",
")",
":",
"return",
"self",
".",
"_grad_state"
] | https://github.com/miyosuda/TensorFlowAndroidDemo/blob/35903e0221aa5f109ea2dbef27f20b52e317f42d/jni-build/jni/include/tensorflow/python/ops/control_flow_ops.py#L1425-L1427 |
|
pytorch/pytorch | 7176c92687d3cc847cc046bf002269c6949a21c2 | caffe2/python/schema.py | python | Field._child_base_id | (self, child_index=None) | return pos | Get the base id of the given child | Get the base id of the given child | [
"Get",
"the",
"base",
"id",
"of",
"the",
"given",
"child"
] | def _child_base_id(self, child_index=None):
"""Get the base id of the given child"""
p, i = self._parent
pos = 0 if child_index is None else self._field_offsets[child_index]
if p:
pos += p._child_base_id(i)
return pos | [
"def",
"_child_base_id",
"(",
"self",
",",
"child_index",
"=",
"None",
")",
":",
"p",
",",
"i",
"=",
"self",
".",
"_parent",
"pos",
"=",
"0",
"if",
"child_index",
"is",
"None",
"else",
"self",
".",
"_field_offsets",
"[",
"child_index",
"]",
"if",
"p",
":",
"pos",
"+=",
"p",
".",
"_child_base_id",
"(",
"i",
")",
"return",
"pos"
] | https://github.com/pytorch/pytorch/blob/7176c92687d3cc847cc046bf002269c6949a21c2/caffe2/python/schema.py#L175-L181 |
|
windystrife/UnrealEngine_NVIDIAGameWorks | b50e6338a7c5b26374d66306ebc7807541ff815e | Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/shutil.py | python | copytree | (src, dst, symlinks=False, ignore=None) | Recursively copy a directory tree using copy2().
The destination directory must not already exist.
If exception(s) occur, an Error is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied.
The optional ignore argument is a callable. If given, it
is called with the `src` parameter, which is the directory
being visited by copytree(), and `names` which is the list of
`src` contents, as returned by os.listdir():
callable(src, names) -> ignored_names
Since copytree() is called recursively, the callable will be
called once for each directory that is copied. It returns a
list of names relative to the `src` directory that should
not be copied.
XXX Consider this example code rather than the ultimate tool. | Recursively copy a directory tree using copy2(). | [
"Recursively",
"copy",
"a",
"directory",
"tree",
"using",
"copy2",
"()",
"."
] | def copytree(src, dst, symlinks=False, ignore=None):
"""Recursively copy a directory tree using copy2().
The destination directory must not already exist.
If exception(s) occur, an Error is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied.
The optional ignore argument is a callable. If given, it
is called with the `src` parameter, which is the directory
being visited by copytree(), and `names` which is the list of
`src` contents, as returned by os.listdir():
callable(src, names) -> ignored_names
Since copytree() is called recursively, the callable will be
called once for each directory that is copied. It returns a
list of names relative to the `src` directory that should
not be copied.
XXX Consider this example code rather than the ultimate tool.
"""
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
os.makedirs(dst)
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if symlinks and os.path.islink(srcname):
linkto = os.readlink(srcname)
os.symlink(linkto, dstname)
elif os.path.isdir(srcname):
copytree(srcname, dstname, symlinks, ignore)
else:
# Will raise a SpecialFileError for unsupported file types
copy2(srcname, dstname)
# catch the Error from the recursive copytree so that we can
# continue with other files
except Error, err:
errors.extend(err.args[0])
except EnvironmentError, why:
errors.append((srcname, dstname, str(why)))
try:
copystat(src, dst)
except OSError, why:
if WindowsError is not None and isinstance(why, WindowsError):
# Copying file access times may fail on Windows
pass
else:
errors.append((src, dst, str(why)))
if errors:
raise Error, errors | [
"def",
"copytree",
"(",
"src",
",",
"dst",
",",
"symlinks",
"=",
"False",
",",
"ignore",
"=",
"None",
")",
":",
"names",
"=",
"os",
".",
"listdir",
"(",
"src",
")",
"if",
"ignore",
"is",
"not",
"None",
":",
"ignored_names",
"=",
"ignore",
"(",
"src",
",",
"names",
")",
"else",
":",
"ignored_names",
"=",
"set",
"(",
")",
"os",
".",
"makedirs",
"(",
"dst",
")",
"errors",
"=",
"[",
"]",
"for",
"name",
"in",
"names",
":",
"if",
"name",
"in",
"ignored_names",
":",
"continue",
"srcname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"src",
",",
"name",
")",
"dstname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dst",
",",
"name",
")",
"try",
":",
"if",
"symlinks",
"and",
"os",
".",
"path",
".",
"islink",
"(",
"srcname",
")",
":",
"linkto",
"=",
"os",
".",
"readlink",
"(",
"srcname",
")",
"os",
".",
"symlink",
"(",
"linkto",
",",
"dstname",
")",
"elif",
"os",
".",
"path",
".",
"isdir",
"(",
"srcname",
")",
":",
"copytree",
"(",
"srcname",
",",
"dstname",
",",
"symlinks",
",",
"ignore",
")",
"else",
":",
"# Will raise a SpecialFileError for unsupported file types",
"copy2",
"(",
"srcname",
",",
"dstname",
")",
"# catch the Error from the recursive copytree so that we can",
"# continue with other files",
"except",
"Error",
",",
"err",
":",
"errors",
".",
"extend",
"(",
"err",
".",
"args",
"[",
"0",
"]",
")",
"except",
"EnvironmentError",
",",
"why",
":",
"errors",
".",
"append",
"(",
"(",
"srcname",
",",
"dstname",
",",
"str",
"(",
"why",
")",
")",
")",
"try",
":",
"copystat",
"(",
"src",
",",
"dst",
")",
"except",
"OSError",
",",
"why",
":",
"if",
"WindowsError",
"is",
"not",
"None",
"and",
"isinstance",
"(",
"why",
",",
"WindowsError",
")",
":",
"# Copying file access times may fail on Windows",
"pass",
"else",
":",
"errors",
".",
"append",
"(",
"(",
"src",
",",
"dst",
",",
"str",
"(",
"why",
")",
")",
")",
"if",
"errors",
":",
"raise",
"Error",
",",
"errors"
] | https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/shutil.py#L145-L208 |
||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/AWSPythonSDK/1.5.8/botocore/vendored/requests/cookies.py | python | remove_cookie_by_name | (cookiejar, name, domain=None, path=None) | Unsets a cookie by name, by default over all domains and paths.
Wraps CookieJar.clear(), is O(n). | Unsets a cookie by name, by default over all domains and paths. | [
"Unsets",
"a",
"cookie",
"by",
"name",
"by",
"default",
"over",
"all",
"domains",
"and",
"paths",
"."
] | def remove_cookie_by_name(cookiejar, name, domain=None, path=None):
"""Unsets a cookie by name, by default over all domains and paths.
Wraps CookieJar.clear(), is O(n).
"""
clearables = []
for cookie in cookiejar:
if cookie.name == name:
if domain is None or domain == cookie.domain:
if path is None or path == cookie.path:
clearables.append((cookie.domain, cookie.path, cookie.name))
for domain, path, name in clearables:
cookiejar.clear(domain, path, name) | [
"def",
"remove_cookie_by_name",
"(",
"cookiejar",
",",
"name",
",",
"domain",
"=",
"None",
",",
"path",
"=",
"None",
")",
":",
"clearables",
"=",
"[",
"]",
"for",
"cookie",
"in",
"cookiejar",
":",
"if",
"cookie",
".",
"name",
"==",
"name",
":",
"if",
"domain",
"is",
"None",
"or",
"domain",
"==",
"cookie",
".",
"domain",
":",
"if",
"path",
"is",
"None",
"or",
"path",
"==",
"cookie",
".",
"path",
":",
"clearables",
".",
"append",
"(",
"(",
"cookie",
".",
"domain",
",",
"cookie",
".",
"path",
",",
"cookie",
".",
"name",
")",
")",
"for",
"domain",
",",
"path",
",",
"name",
"in",
"clearables",
":",
"cookiejar",
".",
"clear",
"(",
"domain",
",",
"path",
",",
"name",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/AWSPythonSDK/1.5.8/botocore/vendored/requests/cookies.py#L139-L152 |
||
windystrife/UnrealEngine_NVIDIAGameWorks | b50e6338a7c5b26374d66306ebc7807541ff815e | Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/site.py | python | check_enableusersite | () | return True | Check if user site directory is safe for inclusion
The function tests for the command line flag (including environment var),
process uid/gid equal to effective uid/gid.
None: Disabled for security reasons
False: Disabled by user (command line option)
True: Safe and enabled | Check if user site directory is safe for inclusion | [
"Check",
"if",
"user",
"site",
"directory",
"is",
"safe",
"for",
"inclusion"
] | def check_enableusersite():
"""Check if user site directory is safe for inclusion
The function tests for the command line flag (including environment var),
process uid/gid equal to effective uid/gid.
None: Disabled for security reasons
False: Disabled by user (command line option)
True: Safe and enabled
"""
if sys.flags.no_user_site:
return False
if hasattr(os, "getuid") and hasattr(os, "geteuid"):
# check process uid == effective uid
if os.geteuid() != os.getuid():
return None
if hasattr(os, "getgid") and hasattr(os, "getegid"):
# check process gid == effective gid
if os.getegid() != os.getgid():
return None
return True | [
"def",
"check_enableusersite",
"(",
")",
":",
"if",
"sys",
".",
"flags",
".",
"no_user_site",
":",
"return",
"False",
"if",
"hasattr",
"(",
"os",
",",
"\"getuid\"",
")",
"and",
"hasattr",
"(",
"os",
",",
"\"geteuid\"",
")",
":",
"# check process uid == effective uid",
"if",
"os",
".",
"geteuid",
"(",
")",
"!=",
"os",
".",
"getuid",
"(",
")",
":",
"return",
"None",
"if",
"hasattr",
"(",
"os",
",",
"\"getgid\"",
")",
"and",
"hasattr",
"(",
"os",
",",
"\"getegid\"",
")",
":",
"# check process gid == effective gid",
"if",
"os",
".",
"getegid",
"(",
")",
"!=",
"os",
".",
"getgid",
"(",
")",
":",
"return",
"None",
"return",
"True"
] | https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/site.py#L196-L218 |
|
vgough/encfs | c444f9b9176beea1ad41a7b2e29ca26e709b57f7 | vendor/github.com/google/benchmark/tools/gbench/util.py | python | check_input_file | (filename) | return ftype | Classify the file named by 'filename' and return the classification.
If the file is classified as 'IT_Invalid' print an error message and exit
the program. | Classify the file named by 'filename' and return the classification.
If the file is classified as 'IT_Invalid' print an error message and exit
the program. | [
"Classify",
"the",
"file",
"named",
"by",
"filename",
"and",
"return",
"the",
"classification",
".",
"If",
"the",
"file",
"is",
"classified",
"as",
"IT_Invalid",
"print",
"an",
"error",
"message",
"and",
"exit",
"the",
"program",
"."
] | def check_input_file(filename):
"""
Classify the file named by 'filename' and return the classification.
If the file is classified as 'IT_Invalid' print an error message and exit
the program.
"""
ftype, msg = classify_input_file(filename)
if ftype == IT_Invalid:
print("Invalid input file: %s" % msg)
sys.exit(1)
return ftype | [
"def",
"check_input_file",
"(",
"filename",
")",
":",
"ftype",
",",
"msg",
"=",
"classify_input_file",
"(",
"filename",
")",
"if",
"ftype",
"==",
"IT_Invalid",
":",
"print",
"(",
"\"Invalid input file: %s\"",
"%",
"msg",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"return",
"ftype"
] | https://github.com/vgough/encfs/blob/c444f9b9176beea1ad41a7b2e29ca26e709b57f7/vendor/github.com/google/benchmark/tools/gbench/util.py#L75-L85 |
|
tensorflow/tensorflow | 419e3a6b650ea4bd1b0cba23c4348f8a69f3272e | tensorflow/python/keras/engine/data_adapter.py | python | train_validation_split | (arrays, validation_split) | return train_arrays, val_arrays | Split arrays into train and validation subsets in deterministic order.
The last part of data will become validation data.
Args:
arrays: Tensors to split. Allowed inputs are arbitrarily nested structures
of Tensors and NumPy arrays.
validation_split: Float between 0 and 1. The proportion of the dataset to
include in the validation split. The rest of the dataset will be included
in the training split.
Returns:
`(train_arrays, validation_arrays)` | Split arrays into train and validation subsets in deterministic order. | [
"Split",
"arrays",
"into",
"train",
"and",
"validation",
"subsets",
"in",
"deterministic",
"order",
"."
] | def train_validation_split(arrays, validation_split):
"""Split arrays into train and validation subsets in deterministic order.
The last part of data will become validation data.
Args:
arrays: Tensors to split. Allowed inputs are arbitrarily nested structures
of Tensors and NumPy arrays.
validation_split: Float between 0 and 1. The proportion of the dataset to
include in the validation split. The rest of the dataset will be included
in the training split.
Returns:
`(train_arrays, validation_arrays)`
"""
def _can_split(t):
tensor_types = _get_tensor_types()
return isinstance(t, tensor_types) or t is None
flat_arrays = nest.flatten(arrays)
unsplitable = [type(t) for t in flat_arrays if not _can_split(t)]
if unsplitable:
raise ValueError(
"`validation_split` is only supported for Tensors or NumPy "
"arrays, found following types in the input: {}".format(unsplitable))
if all(t is None for t in flat_arrays):
return arrays, arrays
first_non_none = None
for t in flat_arrays:
if t is not None:
first_non_none = t
break
# Assumes all arrays have the same batch shape or are `None`.
batch_dim = int(first_non_none.shape[0])
split_at = int(math.floor(batch_dim * (1. - validation_split)))
if split_at == 0 or split_at == batch_dim:
raise ValueError(
"Training data contains {batch_dim} samples, which is not sufficient "
"to split it into a validation and training set as specified by "
"`validation_split={validation_split}`. Either provide more data, or a "
"different value for the `validation_split` argument." .format(
batch_dim=batch_dim, validation_split=validation_split))
def _split(t, start, end):
if t is None:
return t
return t[start:end]
train_arrays = nest.map_structure(
functools.partial(_split, start=0, end=split_at), arrays)
val_arrays = nest.map_structure(
functools.partial(_split, start=split_at, end=batch_dim), arrays)
return train_arrays, val_arrays | [
"def",
"train_validation_split",
"(",
"arrays",
",",
"validation_split",
")",
":",
"def",
"_can_split",
"(",
"t",
")",
":",
"tensor_types",
"=",
"_get_tensor_types",
"(",
")",
"return",
"isinstance",
"(",
"t",
",",
"tensor_types",
")",
"or",
"t",
"is",
"None",
"flat_arrays",
"=",
"nest",
".",
"flatten",
"(",
"arrays",
")",
"unsplitable",
"=",
"[",
"type",
"(",
"t",
")",
"for",
"t",
"in",
"flat_arrays",
"if",
"not",
"_can_split",
"(",
"t",
")",
"]",
"if",
"unsplitable",
":",
"raise",
"ValueError",
"(",
"\"`validation_split` is only supported for Tensors or NumPy \"",
"\"arrays, found following types in the input: {}\"",
".",
"format",
"(",
"unsplitable",
")",
")",
"if",
"all",
"(",
"t",
"is",
"None",
"for",
"t",
"in",
"flat_arrays",
")",
":",
"return",
"arrays",
",",
"arrays",
"first_non_none",
"=",
"None",
"for",
"t",
"in",
"flat_arrays",
":",
"if",
"t",
"is",
"not",
"None",
":",
"first_non_none",
"=",
"t",
"break",
"# Assumes all arrays have the same batch shape or are `None`.",
"batch_dim",
"=",
"int",
"(",
"first_non_none",
".",
"shape",
"[",
"0",
"]",
")",
"split_at",
"=",
"int",
"(",
"math",
".",
"floor",
"(",
"batch_dim",
"*",
"(",
"1.",
"-",
"validation_split",
")",
")",
")",
"if",
"split_at",
"==",
"0",
"or",
"split_at",
"==",
"batch_dim",
":",
"raise",
"ValueError",
"(",
"\"Training data contains {batch_dim} samples, which is not sufficient \"",
"\"to split it into a validation and training set as specified by \"",
"\"`validation_split={validation_split}`. Either provide more data, or a \"",
"\"different value for the `validation_split` argument.\"",
".",
"format",
"(",
"batch_dim",
"=",
"batch_dim",
",",
"validation_split",
"=",
"validation_split",
")",
")",
"def",
"_split",
"(",
"t",
",",
"start",
",",
"end",
")",
":",
"if",
"t",
"is",
"None",
":",
"return",
"t",
"return",
"t",
"[",
"start",
":",
"end",
"]",
"train_arrays",
"=",
"nest",
".",
"map_structure",
"(",
"functools",
".",
"partial",
"(",
"_split",
",",
"start",
"=",
"0",
",",
"end",
"=",
"split_at",
")",
",",
"arrays",
")",
"val_arrays",
"=",
"nest",
".",
"map_structure",
"(",
"functools",
".",
"partial",
"(",
"_split",
",",
"start",
"=",
"split_at",
",",
"end",
"=",
"batch_dim",
")",
",",
"arrays",
")",
"return",
"train_arrays",
",",
"val_arrays"
] | https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/keras/engine/data_adapter.py#L1466-L1523 |
|
mindspore-ai/mindspore | fb8fd3338605bb34fa5cea054e535a8b1d753fab | mindspore/python/mindspore/ops/composite/multitype_ops/logic_not_impl.py | python | _logical_not_tensor | (x) | return F.logical_not(x.__bool__()) | Return logical not operation result of x.
Args:
x(Tensor): Tensor.
Returns:
Tensor, Return logical not operation result of x. | Return logical not operation result of x.
Args:
x(Tensor): Tensor.
Returns:
Tensor, Return logical not operation result of x. | [
"Return",
"logical",
"not",
"operation",
"result",
"of",
"x",
".",
"Args",
":",
"x",
"(",
"Tensor",
")",
":",
"Tensor",
".",
"Returns",
":",
"Tensor",
"Return",
"logical",
"not",
"operation",
"result",
"of",
"x",
"."
] | def _logical_not_tensor(x):
"""
Return logical not operation result of x.
Args:
x(Tensor): Tensor.
Returns:
Tensor, Return logical not operation result of x.
"""
if F.isconstant(x):
return F.bool_not(x.__bool__())
return F.logical_not(x.__bool__()) | [
"def",
"_logical_not_tensor",
"(",
"x",
")",
":",
"if",
"F",
".",
"isconstant",
"(",
"x",
")",
":",
"return",
"F",
".",
"bool_not",
"(",
"x",
".",
"__bool__",
"(",
")",
")",
"return",
"F",
".",
"logical_not",
"(",
"x",
".",
"__bool__",
"(",
")",
")"
] | https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/ops/composite/multitype_ops/logic_not_impl.py#L40-L50 |
|
tensorflow/deepmath | b5b721f54de1d5d6a02d78f5da5995237f9995f9 | deepmath/holstep_baselines/conditioned_classification_models.py | python | cnn_2x_siamese | (voc_size, max_len, dropout=0.5) | return model | Two siamese branches, each embedding a statement.
Binary classifier on top.
Args:
voc_size: size of the vocabulary for the input statements.
max_len: maximum length for the input statements.
dropout: Fraction of units to drop.
Returns:
A Keras model instance. | Two siamese branches, each embedding a statement. | [
"Two",
"siamese",
"branches",
"each",
"embedding",
"a",
"statement",
"."
] | def cnn_2x_siamese(voc_size, max_len, dropout=0.5):
"""Two siamese branches, each embedding a statement.
Binary classifier on top.
Args:
voc_size: size of the vocabulary for the input statements.
max_len: maximum length for the input statements.
dropout: Fraction of units to drop.
Returns:
A Keras model instance.
"""
pivot_input = layers.Input(shape=(max_len,), dtype='int32')
statement_input = layers.Input(shape=(max_len,), dtype='int32')
x = layers.Embedding(
output_dim=256,
input_dim=voc_size,
input_length=max_len)(pivot_input)
x = layers.Convolution1D(256, 7, activation='relu')(x)
x = layers.MaxPooling1D(3)(x)
x = layers.Convolution1D(256, 7, activation='relu')(x)
embedded_pivot = layers.GlobalMaxPooling1D()(x)
encoder_model = Model(pivot_input, embedded_pivot)
embedded_statement = encoder_model(statement_input)
concat = layers.merge([embedded_pivot, embedded_statement], mode='concat')
x = layers.Dense(256, activation='relu')(concat)
x = layers.Dropout(dropout)(x)
prediction = layers.Dense(1, activation='sigmoid')(x)
model = Model([pivot_input, statement_input], prediction)
return model | [
"def",
"cnn_2x_siamese",
"(",
"voc_size",
",",
"max_len",
",",
"dropout",
"=",
"0.5",
")",
":",
"pivot_input",
"=",
"layers",
".",
"Input",
"(",
"shape",
"=",
"(",
"max_len",
",",
")",
",",
"dtype",
"=",
"'int32'",
")",
"statement_input",
"=",
"layers",
".",
"Input",
"(",
"shape",
"=",
"(",
"max_len",
",",
")",
",",
"dtype",
"=",
"'int32'",
")",
"x",
"=",
"layers",
".",
"Embedding",
"(",
"output_dim",
"=",
"256",
",",
"input_dim",
"=",
"voc_size",
",",
"input_length",
"=",
"max_len",
")",
"(",
"pivot_input",
")",
"x",
"=",
"layers",
".",
"Convolution1D",
"(",
"256",
",",
"7",
",",
"activation",
"=",
"'relu'",
")",
"(",
"x",
")",
"x",
"=",
"layers",
".",
"MaxPooling1D",
"(",
"3",
")",
"(",
"x",
")",
"x",
"=",
"layers",
".",
"Convolution1D",
"(",
"256",
",",
"7",
",",
"activation",
"=",
"'relu'",
")",
"(",
"x",
")",
"embedded_pivot",
"=",
"layers",
".",
"GlobalMaxPooling1D",
"(",
")",
"(",
"x",
")",
"encoder_model",
"=",
"Model",
"(",
"pivot_input",
",",
"embedded_pivot",
")",
"embedded_statement",
"=",
"encoder_model",
"(",
"statement_input",
")",
"concat",
"=",
"layers",
".",
"merge",
"(",
"[",
"embedded_pivot",
",",
"embedded_statement",
"]",
",",
"mode",
"=",
"'concat'",
")",
"x",
"=",
"layers",
".",
"Dense",
"(",
"256",
",",
"activation",
"=",
"'relu'",
")",
"(",
"concat",
")",
"x",
"=",
"layers",
".",
"Dropout",
"(",
"dropout",
")",
"(",
"x",
")",
"prediction",
"=",
"layers",
".",
"Dense",
"(",
"1",
",",
"activation",
"=",
"'sigmoid'",
")",
"(",
"x",
")",
"model",
"=",
"Model",
"(",
"[",
"pivot_input",
",",
"statement_input",
"]",
",",
"prediction",
")",
"return",
"model"
] | https://github.com/tensorflow/deepmath/blob/b5b721f54de1d5d6a02d78f5da5995237f9995f9/deepmath/holstep_baselines/conditioned_classification_models.py#L25-L58 |
|
apiaryio/snowcrash | b5b39faa85f88ee17459edf39fdc6fe4fc70d2e3 | tools/gyp/pylib/gyp/xcode_emulation.py | python | XcodeSettings.GetInstallName | (self) | return install_name | Return LD_DYLIB_INSTALL_NAME for this target. | Return LD_DYLIB_INSTALL_NAME for this target. | [
"Return",
"LD_DYLIB_INSTALL_NAME",
"for",
"this",
"target",
"."
] | def GetInstallName(self):
"""Return LD_DYLIB_INSTALL_NAME for this target."""
# Xcode sets this for shared_libraries, and for nonbundled loadable_modules.
if (self.spec['type'] != 'shared_library' and
(self.spec['type'] != 'loadable_module' or self._IsBundle())):
return None
default_install_name = \
'$(DYLIB_INSTALL_NAME_BASE:standardizepath)/$(EXECUTABLE_PATH)'
install_name = self.GetPerTargetSetting(
'LD_DYLIB_INSTALL_NAME', default=default_install_name)
# Hardcode support for the variables used in chromium for now, to
# unblock people using the make build.
if '$' in install_name:
assert install_name in ('$(DYLIB_INSTALL_NAME_BASE:standardizepath)/'
'$(WRAPPER_NAME)/$(PRODUCT_NAME)', default_install_name), (
'Variables in LD_DYLIB_INSTALL_NAME are not generally supported '
'yet in target \'%s\' (got \'%s\')' %
(self.spec['target_name'], install_name))
install_name = install_name.replace(
'$(DYLIB_INSTALL_NAME_BASE:standardizepath)',
self._StandardizePath(self.GetInstallNameBase()))
if self._IsBundle():
# These are only valid for bundles, hence the |if|.
install_name = install_name.replace(
'$(WRAPPER_NAME)', self.GetWrapperName())
install_name = install_name.replace(
'$(PRODUCT_NAME)', self.GetProductName())
else:
assert '$(WRAPPER_NAME)' not in install_name
assert '$(PRODUCT_NAME)' not in install_name
install_name = install_name.replace(
'$(EXECUTABLE_PATH)', self.GetExecutablePath())
return install_name | [
"def",
"GetInstallName",
"(",
"self",
")",
":",
"# Xcode sets this for shared_libraries, and for nonbundled loadable_modules.",
"if",
"(",
"self",
".",
"spec",
"[",
"'type'",
"]",
"!=",
"'shared_library'",
"and",
"(",
"self",
".",
"spec",
"[",
"'type'",
"]",
"!=",
"'loadable_module'",
"or",
"self",
".",
"_IsBundle",
"(",
")",
")",
")",
":",
"return",
"None",
"default_install_name",
"=",
"'$(DYLIB_INSTALL_NAME_BASE:standardizepath)/$(EXECUTABLE_PATH)'",
"install_name",
"=",
"self",
".",
"GetPerTargetSetting",
"(",
"'LD_DYLIB_INSTALL_NAME'",
",",
"default",
"=",
"default_install_name",
")",
"# Hardcode support for the variables used in chromium for now, to",
"# unblock people using the make build.",
"if",
"'$'",
"in",
"install_name",
":",
"assert",
"install_name",
"in",
"(",
"'$(DYLIB_INSTALL_NAME_BASE:standardizepath)/'",
"'$(WRAPPER_NAME)/$(PRODUCT_NAME)'",
",",
"default_install_name",
")",
",",
"(",
"'Variables in LD_DYLIB_INSTALL_NAME are not generally supported '",
"'yet in target \\'%s\\' (got \\'%s\\')'",
"%",
"(",
"self",
".",
"spec",
"[",
"'target_name'",
"]",
",",
"install_name",
")",
")",
"install_name",
"=",
"install_name",
".",
"replace",
"(",
"'$(DYLIB_INSTALL_NAME_BASE:standardizepath)'",
",",
"self",
".",
"_StandardizePath",
"(",
"self",
".",
"GetInstallNameBase",
"(",
")",
")",
")",
"if",
"self",
".",
"_IsBundle",
"(",
")",
":",
"# These are only valid for bundles, hence the |if|.",
"install_name",
"=",
"install_name",
".",
"replace",
"(",
"'$(WRAPPER_NAME)'",
",",
"self",
".",
"GetWrapperName",
"(",
")",
")",
"install_name",
"=",
"install_name",
".",
"replace",
"(",
"'$(PRODUCT_NAME)'",
",",
"self",
".",
"GetProductName",
"(",
")",
")",
"else",
":",
"assert",
"'$(WRAPPER_NAME)'",
"not",
"in",
"install_name",
"assert",
"'$(PRODUCT_NAME)'",
"not",
"in",
"install_name",
"install_name",
"=",
"install_name",
".",
"replace",
"(",
"'$(EXECUTABLE_PATH)'",
",",
"self",
".",
"GetExecutablePath",
"(",
")",
")",
"return",
"install_name"
] | https://github.com/apiaryio/snowcrash/blob/b5b39faa85f88ee17459edf39fdc6fe4fc70d2e3/tools/gyp/pylib/gyp/xcode_emulation.py#L713-L749 |
|
cms-sw/cmssw | fd9de012d503d3405420bcbeec0ec879baa57cf2 | RecoEgamma/PhotonIdentification/python/Identification/cutBasedPhotonID_tools.py | python | configureVIDCutBasedPhoID_V2 | ( wpEB, wpEE, isoInputs ) | return parameterSet | This function configures the full cms.PSet for a VID ID and returns it.
The inputs: first object is of the type WorkingPoint_V2, second object
is of the type WorkingPoint_V1, containing the cuts for the Barrel (EB)
and the other one for the Endcap (EE).
The third argument contains data for isolation calculation.
The V2 with respect to V1 has one change: the neutral hadron isolation
cut has an exponential pt scaling for the barrel. | This function configures the full cms.PSet for a VID ID and returns it.
The inputs: first object is of the type WorkingPoint_V2, second object
is of the type WorkingPoint_V1, containing the cuts for the Barrel (EB)
and the other one for the Endcap (EE).
The third argument contains data for isolation calculation. | [
"This",
"function",
"configures",
"the",
"full",
"cms",
".",
"PSet",
"for",
"a",
"VID",
"ID",
"and",
"returns",
"it",
".",
"The",
"inputs",
":",
"first",
"object",
"is",
"of",
"the",
"type",
"WorkingPoint_V2",
"second",
"object",
"is",
"of",
"the",
"type",
"WorkingPoint_V1",
"containing",
"the",
"cuts",
"for",
"the",
"Barrel",
"(",
"EB",
")",
"and",
"the",
"other",
"one",
"for",
"the",
"Endcap",
"(",
"EE",
")",
".",
"The",
"third",
"argument",
"contains",
"data",
"for",
"isolation",
"calculation",
"."
] | def configureVIDCutBasedPhoID_V2( wpEB, wpEE, isoInputs ):
"""
This function configures the full cms.PSet for a VID ID and returns it.
The inputs: first object is of the type WorkingPoint_V2, second object
is of the type WorkingPoint_V1, containing the cuts for the Barrel (EB)
and the other one for the Endcap (EE).
The third argument contains data for isolation calculation.
The V2 with respect to V1 has one change: the neutral hadron isolation
cut has an exponential pt scaling for the barrel.
"""
# print "VID: Configuring cut set %s" % wpEB.idName
parameterSet = cms.PSet(
#
idName = cms.string( wpEB.idName ), # same name stored in the _EB and _EE objects
cutFlow = cms.VPSet(
psetMinPtCut(), # pt cut
psetPhoSCEtaMultiRangeCut(), # eta cut
psetPhoHcalOverEcalBcCut(wpEB,wpEE), # H/E cut
psetPhoFull5x5SigmaIEtaIEtaValueMapCut(wpEB,wpEE), # full 5x5 sigmaIEtaIEta cut
psetChHadIsoWithEALinScalingCut(wpEB,wpEE,isoInputs), # charged hadron isolation cut
psetNeuHadIsoWithEAExpoScalingEBCut(wpEB,wpEE,isoInputs), # neutral hadron isolation cut
psetPhoIsoWithEALinScalingCut(wpEB,wpEE,isoInputs) # photon isolation cut
)
)
#
return parameterSet | [
"def",
"configureVIDCutBasedPhoID_V2",
"(",
"wpEB",
",",
"wpEE",
",",
"isoInputs",
")",
":",
"# print \"VID: Configuring cut set %s\" % wpEB.idName",
"parameterSet",
"=",
"cms",
".",
"PSet",
"(",
"#",
"idName",
"=",
"cms",
".",
"string",
"(",
"wpEB",
".",
"idName",
")",
",",
"# same name stored in the _EB and _EE objects",
"cutFlow",
"=",
"cms",
".",
"VPSet",
"(",
"psetMinPtCut",
"(",
")",
",",
"# pt cut",
"psetPhoSCEtaMultiRangeCut",
"(",
")",
",",
"# eta cut",
"psetPhoHcalOverEcalBcCut",
"(",
"wpEB",
",",
"wpEE",
")",
",",
"# H/E cut",
"psetPhoFull5x5SigmaIEtaIEtaValueMapCut",
"(",
"wpEB",
",",
"wpEE",
")",
",",
"# full 5x5 sigmaIEtaIEta cut",
"psetChHadIsoWithEALinScalingCut",
"(",
"wpEB",
",",
"wpEE",
",",
"isoInputs",
")",
",",
"# charged hadron isolation cut",
"psetNeuHadIsoWithEAExpoScalingEBCut",
"(",
"wpEB",
",",
"wpEE",
",",
"isoInputs",
")",
",",
"# neutral hadron isolation cut",
"psetPhoIsoWithEALinScalingCut",
"(",
"wpEB",
",",
"wpEE",
",",
"isoInputs",
")",
"# photon isolation cut",
")",
")",
"#",
"return",
"parameterSet"
] | https://github.com/cms-sw/cmssw/blob/fd9de012d503d3405420bcbeec0ec879baa57cf2/RecoEgamma/PhotonIdentification/python/Identification/cutBasedPhotonID_tools.py#L345-L371 |
|
SequoiaDB/SequoiaDB | 2894ed7e5bd6fe57330afc900cf76d0ff0df9f64 | driver/python/pysequoiadb/client.py | python | client.create_collection_space | (self, cs_name, options=0) | return cs | Create collection space with specified pagesize.
Parameters:
Name Type Info:
cs_name str The name of collection space to be created.
options int/dict The options to create collection space.
When type is int, means setting PageSize.
-PageSize int The page size of collection space. See Info
as below.
-Domain str The domain of collection space to belongs
-LobPageSize int The page size when stored lob, see Info as below
Return values:
collection space object created.
Exceptions:
pysequoiadb.error.SDBBaseError
Info:
valid page size value:
0 : 64k default page size
4096 : 4k
8192 : 8k
16384 : 16k
32768 : 32k
65536 : 64k
valid LOB page size value:
0 : 256k default Lob page size
4096 : 4k
8192 : 8k
16384 : 16k
32768 : 32k
65536 : 64k
131072 : 128k
262144 : 256k
524288 : 512k | Create collection space with specified pagesize. | [
"Create",
"collection",
"space",
"with",
"specified",
"pagesize",
"."
] | def create_collection_space(self, cs_name, options=0):
"""Create collection space with specified pagesize.
Parameters:
Name Type Info:
cs_name str The name of collection space to be created.
options int/dict The options to create collection space.
When type is int, means setting PageSize.
-PageSize int The page size of collection space. See Info
as below.
-Domain str The domain of collection space to belongs
-LobPageSize int The page size when stored lob, see Info as below
Return values:
collection space object created.
Exceptions:
pysequoiadb.error.SDBBaseError
Info:
valid page size value:
0 : 64k default page size
4096 : 4k
8192 : 8k
16384 : 16k
32768 : 32k
65536 : 64k
valid LOB page size value:
0 : 256k default Lob page size
4096 : 4k
8192 : 8k
16384 : 16k
32768 : 32k
65536 : 64k
131072 : 128k
262144 : 256k
524288 : 512k
"""
ops = {}
if not isinstance(cs_name, str_type):
raise SDBTypeError("name of collection space must be an instance of str_type")
if isinstance(options, int):
if options not in [0, 4096, 8192, 16384, 32768, 65536]:
raise SDBTypeError("page size is invalid")
ops["PageSize"] = options
elif isinstance(options, dict):
ops = options
else:
raise SDBTypeError("options must be an instance of int")
bson_options = bson.BSON.encode(ops)
cs = collectionspace()
try:
rc = sdb.sdb_create_collection_space(self._client, cs_name,
bson_options, cs._cs)
raise_if_error(rc, "Failed to create collection space: %s" % cs_name)
except SDBBaseError:
del cs
raise
return cs | [
"def",
"create_collection_space",
"(",
"self",
",",
"cs_name",
",",
"options",
"=",
"0",
")",
":",
"ops",
"=",
"{",
"}",
"if",
"not",
"isinstance",
"(",
"cs_name",
",",
"str_type",
")",
":",
"raise",
"SDBTypeError",
"(",
"\"name of collection space must be an instance of str_type\"",
")",
"if",
"isinstance",
"(",
"options",
",",
"int",
")",
":",
"if",
"options",
"not",
"in",
"[",
"0",
",",
"4096",
",",
"8192",
",",
"16384",
",",
"32768",
",",
"65536",
"]",
":",
"raise",
"SDBTypeError",
"(",
"\"page size is invalid\"",
")",
"ops",
"[",
"\"PageSize\"",
"]",
"=",
"options",
"elif",
"isinstance",
"(",
"options",
",",
"dict",
")",
":",
"ops",
"=",
"options",
"else",
":",
"raise",
"SDBTypeError",
"(",
"\"options must be an instance of int\"",
")",
"bson_options",
"=",
"bson",
".",
"BSON",
".",
"encode",
"(",
"ops",
")",
"cs",
"=",
"collectionspace",
"(",
")",
"try",
":",
"rc",
"=",
"sdb",
".",
"sdb_create_collection_space",
"(",
"self",
".",
"_client",
",",
"cs_name",
",",
"bson_options",
",",
"cs",
".",
"_cs",
")",
"raise_if_error",
"(",
"rc",
",",
"\"Failed to create collection space: %s\"",
"%",
"cs_name",
")",
"except",
"SDBBaseError",
":",
"del",
"cs",
"raise",
"return",
"cs"
] | https://github.com/SequoiaDB/SequoiaDB/blob/2894ed7e5bd6fe57330afc900cf76d0ff0df9f64/driver/python/pysequoiadb/client.py#L666-L724 |
|
netket/netket | 0d534e54ecbf25b677ea72af6b85947979420652 | netket/operator/_pauli_strings.py | python | PauliStrings.__init__ | (
self,
hilbert: AbstractHilbert,
operators: List[str] = None,
weights: List[Union[float, complex]] = None,
*,
cutoff: float = 1.0e-10,
dtype: DType = complex,
) | Constructs a new ``PauliStrings`` operator given a set of Pauli operators.
This class has two possible forms for initialization: ``PauliStrings(hilbert, operators, ...)`` or ``PauliStrings(operators, ...)``.
When no hilbert argument is passed, the hilbert defaults to Qubit, where the number of qubits is automatically deduced from the operators.
Args:
hilbert: A hilbert space, optional (is no ``AbstractHilbert`` is passed, default is Qubit)
operators (list(string)): A list of Pauli operators in string format, e.g. ['IXX', 'XZI'].
weights: A list of amplitudes of the corresponding Pauli operator.
cutoff (float): a cutoff to remove small matrix elements
Examples:
Constructs a new ``PauliStrings`` operator X_0*X_1 + 3.*Z_0*Z_1 with both construction schemes.
>>> import netket as nk
>>> operators, weights = ['XX','ZZ'], [1,3]
>>> op = nk.operator.PauliStrings(operators, weights)
>>> op.hilbert
Qubit(N=2)
>>> op.hilbert.size
2
>>> hilbert = nk.hilbert.Spin(1/2, 2)
>>> op = nk.operator.PauliStrings(hilbert, operators, weights)
>>> op.hilbert
Spin(s=1/2, N=2) | Constructs a new ``PauliStrings`` operator given a set of Pauli operators.
This class has two possible forms for initialization: ``PauliStrings(hilbert, operators, ...)`` or ``PauliStrings(operators, ...)``.
When no hilbert argument is passed, the hilbert defaults to Qubit, where the number of qubits is automatically deduced from the operators. | [
"Constructs",
"a",
"new",
"PauliStrings",
"operator",
"given",
"a",
"set",
"of",
"Pauli",
"operators",
".",
"This",
"class",
"has",
"two",
"possible",
"forms",
"for",
"initialization",
":",
"PauliStrings",
"(",
"hilbert",
"operators",
"...",
")",
"or",
"PauliStrings",
"(",
"operators",
"...",
")",
".",
"When",
"no",
"hilbert",
"argument",
"is",
"passed",
"the",
"hilbert",
"defaults",
"to",
"Qubit",
"where",
"the",
"number",
"of",
"qubits",
"is",
"automatically",
"deduced",
"from",
"the",
"operators",
"."
] | def __init__(
self,
hilbert: AbstractHilbert,
operators: List[str] = None,
weights: List[Union[float, complex]] = None,
*,
cutoff: float = 1.0e-10,
dtype: DType = complex,
):
"""
Constructs a new ``PauliStrings`` operator given a set of Pauli operators.
This class has two possible forms for initialization: ``PauliStrings(hilbert, operators, ...)`` or ``PauliStrings(operators, ...)``.
When no hilbert argument is passed, the hilbert defaults to Qubit, where the number of qubits is automatically deduced from the operators.
Args:
hilbert: A hilbert space, optional (is no ``AbstractHilbert`` is passed, default is Qubit)
operators (list(string)): A list of Pauli operators in string format, e.g. ['IXX', 'XZI'].
weights: A list of amplitudes of the corresponding Pauli operator.
cutoff (float): a cutoff to remove small matrix elements
Examples:
Constructs a new ``PauliStrings`` operator X_0*X_1 + 3.*Z_0*Z_1 with both construction schemes.
>>> import netket as nk
>>> operators, weights = ['XX','ZZ'], [1,3]
>>> op = nk.operator.PauliStrings(operators, weights)
>>> op.hilbert
Qubit(N=2)
>>> op.hilbert.size
2
>>> hilbert = nk.hilbert.Spin(1/2, 2)
>>> op = nk.operator.PauliStrings(hilbert, operators, weights)
>>> op.hilbert
Spin(s=1/2, N=2)
"""
if hilbert is None:
raise ValueError("None-valued hilbert passed.")
if not isinstance(hilbert, AbstractHilbert):
# if first argument is not Hilbert, then shift all arguments by one
hilbert, operators, weights = None, hilbert, operators
if operators is None:
raise ValueError(
"None valued operators passed. (Might arised when passing None valued hilbert explicitly)"
)
if len(operators) == 0:
raise ValueError("No Pauli operators passed.")
if weights is None:
# default weight is 1
weights = [True for i in operators]
if len(weights) != len(operators):
raise ValueError("weights should have the same length as operators.")
if not np.isscalar(cutoff) or cutoff < 0:
raise ValueError("invalid cutoff in PauliStrings.")
_hilb_size = len(operators[0])
consistent = all(len(op) == _hilb_size for op in operators)
if not consistent:
raise ValueError("Pauli strings have inhomogeneous lengths.")
consistent = all(bool(valid_pauli_regex.search(op)) for op in operators)
if not consistent:
raise ValueError(
"""Operators in string must be one of
the Pauli operators X,Y,Z, or the identity I"""
)
if hilbert is None:
hilbert = Qubit(_hilb_size)
super().__init__(hilbert)
if self.hilbert.local_size != 2:
raise ValueError(
"PauliStrings only work for local hilbert size 2 where PauliMatrices are defined"
)
self._cutoff = cutoff
b_weights = np.asarray(weights, dtype=dtype)
self._is_hermitian = np.allclose(b_weights.imag, 0.0)
self._orig_operators = np.array(operators, dtype=str)
self._orig_weights = np.array(weights, dtype=dtype)
self._dtype = dtype
self._initialized = False | [
"def",
"__init__",
"(",
"self",
",",
"hilbert",
":",
"AbstractHilbert",
",",
"operators",
":",
"List",
"[",
"str",
"]",
"=",
"None",
",",
"weights",
":",
"List",
"[",
"Union",
"[",
"float",
",",
"complex",
"]",
"]",
"=",
"None",
",",
"*",
",",
"cutoff",
":",
"float",
"=",
"1.0e-10",
",",
"dtype",
":",
"DType",
"=",
"complex",
",",
")",
":",
"if",
"hilbert",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"None-valued hilbert passed.\"",
")",
"if",
"not",
"isinstance",
"(",
"hilbert",
",",
"AbstractHilbert",
")",
":",
"# if first argument is not Hilbert, then shift all arguments by one",
"hilbert",
",",
"operators",
",",
"weights",
"=",
"None",
",",
"hilbert",
",",
"operators",
"if",
"operators",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"None valued operators passed. (Might arised when passing None valued hilbert explicitly)\"",
")",
"if",
"len",
"(",
"operators",
")",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"\"No Pauli operators passed.\"",
")",
"if",
"weights",
"is",
"None",
":",
"# default weight is 1",
"weights",
"=",
"[",
"True",
"for",
"i",
"in",
"operators",
"]",
"if",
"len",
"(",
"weights",
")",
"!=",
"len",
"(",
"operators",
")",
":",
"raise",
"ValueError",
"(",
"\"weights should have the same length as operators.\"",
")",
"if",
"not",
"np",
".",
"isscalar",
"(",
"cutoff",
")",
"or",
"cutoff",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"invalid cutoff in PauliStrings.\"",
")",
"_hilb_size",
"=",
"len",
"(",
"operators",
"[",
"0",
"]",
")",
"consistent",
"=",
"all",
"(",
"len",
"(",
"op",
")",
"==",
"_hilb_size",
"for",
"op",
"in",
"operators",
")",
"if",
"not",
"consistent",
":",
"raise",
"ValueError",
"(",
"\"Pauli strings have inhomogeneous lengths.\"",
")",
"consistent",
"=",
"all",
"(",
"bool",
"(",
"valid_pauli_regex",
".",
"search",
"(",
"op",
")",
")",
"for",
"op",
"in",
"operators",
")",
"if",
"not",
"consistent",
":",
"raise",
"ValueError",
"(",
"\"\"\"Operators in string must be one of\n the Pauli operators X,Y,Z, or the identity I\"\"\"",
")",
"if",
"hilbert",
"is",
"None",
":",
"hilbert",
"=",
"Qubit",
"(",
"_hilb_size",
")",
"super",
"(",
")",
".",
"__init__",
"(",
"hilbert",
")",
"if",
"self",
".",
"hilbert",
".",
"local_size",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"\"PauliStrings only work for local hilbert size 2 where PauliMatrices are defined\"",
")",
"self",
".",
"_cutoff",
"=",
"cutoff",
"b_weights",
"=",
"np",
".",
"asarray",
"(",
"weights",
",",
"dtype",
"=",
"dtype",
")",
"self",
".",
"_is_hermitian",
"=",
"np",
".",
"allclose",
"(",
"b_weights",
".",
"imag",
",",
"0.0",
")",
"self",
".",
"_orig_operators",
"=",
"np",
".",
"array",
"(",
"operators",
",",
"dtype",
"=",
"str",
")",
"self",
".",
"_orig_weights",
"=",
"np",
".",
"array",
"(",
"weights",
",",
"dtype",
"=",
"dtype",
")",
"self",
".",
"_dtype",
"=",
"dtype",
"self",
".",
"_initialized",
"=",
"False"
] | https://github.com/netket/netket/blob/0d534e54ecbf25b677ea72af6b85947979420652/netket/operator/_pauli_strings.py#L33-L122 |
||
Xilinx/Vitis-AI | fc74d404563d9951b57245443c73bef389f3657f | tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/training/saver.py | python | BaseSaverBuilder.sharded_filename | (self, filename_tensor, shard, num_shards) | return gen_io_ops.sharded_filename(filename_tensor, shard, num_shards) | Append sharding information to a filename.
Args:
filename_tensor: A string tensor.
shard: Integer. The shard for the filename.
num_shards: An int Tensor for the number of shards.
Returns:
A string tensor. | Append sharding information to a filename. | [
"Append",
"sharding",
"information",
"to",
"a",
"filename",
"."
] | def sharded_filename(self, filename_tensor, shard, num_shards):
"""Append sharding information to a filename.
Args:
filename_tensor: A string tensor.
shard: Integer. The shard for the filename.
num_shards: An int Tensor for the number of shards.
Returns:
A string tensor.
"""
return gen_io_ops.sharded_filename(filename_tensor, shard, num_shards) | [
"def",
"sharded_filename",
"(",
"self",
",",
"filename_tensor",
",",
"shard",
",",
"num_shards",
")",
":",
"return",
"gen_io_ops",
".",
"sharded_filename",
"(",
"filename_tensor",
",",
"shard",
",",
"num_shards",
")"
] | https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/training/saver.py#L183-L194 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/threading.py | python | _RLock.acquire | (self, blocking=True, timeout=-1) | return rc | Acquire a lock, blocking or non-blocking.
When invoked without arguments: if this thread already owns the lock,
increment the recursion level by one, and return immediately. Otherwise,
if another thread owns the lock, block until the lock is unlocked. Once
the lock is unlocked (not owned by any thread), then grab ownership, set
the recursion level to one, and return. If more than one thread is
blocked waiting until the lock is unlocked, only one at a time will be
able to grab ownership of the lock. There is no return value in this
case.
When invoked with the blocking argument set to true, do the same thing
as when called without arguments, and return true.
When invoked with the blocking argument set to false, do not block. If a
call without an argument would block, return false immediately;
otherwise, do the same thing as when called without arguments, and
return true.
When invoked with the floating-point timeout argument set to a positive
value, block for at most the number of seconds specified by timeout
and as long as the lock cannot be acquired. Return true if the lock has
been acquired, false if the timeout has elapsed. | Acquire a lock, blocking or non-blocking. | [
"Acquire",
"a",
"lock",
"blocking",
"or",
"non",
"-",
"blocking",
"."
] | def acquire(self, blocking=True, timeout=-1):
"""Acquire a lock, blocking or non-blocking.
When invoked without arguments: if this thread already owns the lock,
increment the recursion level by one, and return immediately. Otherwise,
if another thread owns the lock, block until the lock is unlocked. Once
the lock is unlocked (not owned by any thread), then grab ownership, set
the recursion level to one, and return. If more than one thread is
blocked waiting until the lock is unlocked, only one at a time will be
able to grab ownership of the lock. There is no return value in this
case.
When invoked with the blocking argument set to true, do the same thing
as when called without arguments, and return true.
When invoked with the blocking argument set to false, do not block. If a
call without an argument would block, return false immediately;
otherwise, do the same thing as when called without arguments, and
return true.
When invoked with the floating-point timeout argument set to a positive
value, block for at most the number of seconds specified by timeout
and as long as the lock cannot be acquired. Return true if the lock has
been acquired, false if the timeout has elapsed.
"""
me = get_ident()
if self._owner == me:
self._count += 1
return 1
rc = self._block.acquire(blocking, timeout)
if rc:
self._owner = me
self._count = 1
return rc | [
"def",
"acquire",
"(",
"self",
",",
"blocking",
"=",
"True",
",",
"timeout",
"=",
"-",
"1",
")",
":",
"me",
"=",
"get_ident",
"(",
")",
"if",
"self",
".",
"_owner",
"==",
"me",
":",
"self",
".",
"_count",
"+=",
"1",
"return",
"1",
"rc",
"=",
"self",
".",
"_block",
".",
"acquire",
"(",
"blocking",
",",
"timeout",
")",
"if",
"rc",
":",
"self",
".",
"_owner",
"=",
"me",
"self",
".",
"_count",
"=",
"1",
"return",
"rc"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/threading.py#L118-L152 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_cocoa/dataview.py | python | DataViewTreeCtrl.IsContainer | (*args, **kwargs) | return _dataview.DataViewTreeCtrl_IsContainer(*args, **kwargs) | IsContainer(self, DataViewItem item) -> bool | IsContainer(self, DataViewItem item) -> bool | [
"IsContainer",
"(",
"self",
"DataViewItem",
"item",
")",
"-",
">",
"bool"
] | def IsContainer(*args, **kwargs):
"""IsContainer(self, DataViewItem item) -> bool"""
return _dataview.DataViewTreeCtrl_IsContainer(*args, **kwargs) | [
"def",
"IsContainer",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_dataview",
".",
"DataViewTreeCtrl_IsContainer",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/dataview.py#L2485-L2487 |
|
neoml-lib/neoml | a0d370fba05269a1b2258cef126f77bbd2054a3e | NeoML/Python/neoml/Dnn/Accuracy.py | python | ConfusionMatrix.reset | (self, reset) | Specifies if the calculations should be reset on each run. | Specifies if the calculations should be reset on each run. | [
"Specifies",
"if",
"the",
"calculations",
"should",
"be",
"reset",
"on",
"each",
"run",
"."
] | def reset(self, reset):
"""Specifies if the calculations should be reset on each run.
"""
self._internal.set_reset(bool(reset)) | [
"def",
"reset",
"(",
"self",
",",
"reset",
")",
":",
"self",
".",
"_internal",
".",
"set_reset",
"(",
"bool",
"(",
"reset",
")",
")"
] | https://github.com/neoml-lib/neoml/blob/a0d370fba05269a1b2258cef126f77bbd2054a3e/NeoML/Python/neoml/Dnn/Accuracy.py#L140-L143 |
||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/scipy/py2/scipy/stats/_binned_statistic.py | python | binned_statistic_dd | (sample, values, statistic='mean',
bins=10, range=None, expand_binnumbers=False) | return BinnedStatisticddResult(result, edges, binnumbers) | Compute a multidimensional binned statistic for a set of data.
This is a generalization of a histogramdd function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values within each bin.
Parameters
----------
sample : array_like
Data to histogram passed as a sequence of D arrays of length N, or
as an (N,D) array.
values : (N,) array_like or list of (N,) array_like
The data on which the statistic will be computed. This must be
the same shape as `sample`, or a list of sequences - each with the
same shape as `sample`. If `values` is such a list, the statistic
will be computed on each independently.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* 'min' : compute the minimum of values for points within each bin.
Empty bins will be represented by NaN.
* 'max' : compute the maximum of values for point within each bin.
Empty bins will be represented by NaN.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : sequence or int, optional
The bin specification must be in one of the following forms:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... = bins).
* The number of bins for all dimensions (nx = ny = ... = bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitly in `bins`. Defaults to the minimum and maximum
values along each dimension.
expand_binnumbers : bool, optional
'False' (default): the returned `binnumber` is a shape (N,) array of
linearized bin indices.
'True': the returned `binnumber` is 'unraveled' into a shape (D,N)
ndarray, where each row gives the bin numbers in the corresponding
dimension.
See the `binnumber` returned value, and the `Examples` section of
`binned_statistic_2d`.
.. versionadded:: 0.17.0
Returns
-------
statistic : ndarray, shape(nx1, nx2, nx3,...)
The values of the selected statistic in each two-dimensional bin.
bin_edges : list of ndarrays
A list of D arrays describing the (nxi + 1) bin edges for each
dimension.
binnumber : (N,) array of ints or (D,N) ndarray of ints
This assigns to each element of `sample` an integer that represents the
bin in which this observation falls. The representation depends on the
`expand_binnumbers` argument. See `Notes` for details.
See Also
--------
numpy.digitize, numpy.histogramdd, binned_statistic, binned_statistic_2d
Notes
-----
Binedges:
All but the last (righthand-most) bin is half-open in each dimension. In
other words, if `bins` is ``[1, 2, 3, 4]``, then the first bin is
``[1, 2)`` (including 1, but excluding 2) and the second ``[2, 3)``. The
last bin, however, is ``[3, 4]``, which *includes* 4.
`binnumber`:
This returned argument assigns to each element of `sample` an integer that
represents the bin in which it belongs. The representation depends on the
`expand_binnumbers` argument. If 'False' (default): The returned
`binnumber` is a shape (N,) array of linearized indices mapping each
element of `sample` to its corresponding bin (using row-major ordering).
If 'True': The returned `binnumber` is a shape (D,N) ndarray where
each row indicates bin placements for each dimension respectively. In each
dimension, a binnumber of `i` means the corresponding value is between
(bin_edges[D][i-1], bin_edges[D][i]), for each dimension 'D'.
.. versionadded:: 0.11.0 | Compute a multidimensional binned statistic for a set of data. | [
"Compute",
"a",
"multidimensional",
"binned",
"statistic",
"for",
"a",
"set",
"of",
"data",
"."
] | def binned_statistic_dd(sample, values, statistic='mean',
bins=10, range=None, expand_binnumbers=False):
"""
Compute a multidimensional binned statistic for a set of data.
This is a generalization of a histogramdd function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values within each bin.
Parameters
----------
sample : array_like
Data to histogram passed as a sequence of D arrays of length N, or
as an (N,D) array.
values : (N,) array_like or list of (N,) array_like
The data on which the statistic will be computed. This must be
the same shape as `sample`, or a list of sequences - each with the
same shape as `sample`. If `values` is such a list, the statistic
will be computed on each independently.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* 'min' : compute the minimum of values for points within each bin.
Empty bins will be represented by NaN.
* 'max' : compute the maximum of values for point within each bin.
Empty bins will be represented by NaN.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : sequence or int, optional
The bin specification must be in one of the following forms:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... = bins).
* The number of bins for all dimensions (nx = ny = ... = bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitly in `bins`. Defaults to the minimum and maximum
values along each dimension.
expand_binnumbers : bool, optional
'False' (default): the returned `binnumber` is a shape (N,) array of
linearized bin indices.
'True': the returned `binnumber` is 'unraveled' into a shape (D,N)
ndarray, where each row gives the bin numbers in the corresponding
dimension.
See the `binnumber` returned value, and the `Examples` section of
`binned_statistic_2d`.
.. versionadded:: 0.17.0
Returns
-------
statistic : ndarray, shape(nx1, nx2, nx3,...)
The values of the selected statistic in each two-dimensional bin.
bin_edges : list of ndarrays
A list of D arrays describing the (nxi + 1) bin edges for each
dimension.
binnumber : (N,) array of ints or (D,N) ndarray of ints
This assigns to each element of `sample` an integer that represents the
bin in which this observation falls. The representation depends on the
`expand_binnumbers` argument. See `Notes` for details.
See Also
--------
numpy.digitize, numpy.histogramdd, binned_statistic, binned_statistic_2d
Notes
-----
Binedges:
All but the last (righthand-most) bin is half-open in each dimension. In
other words, if `bins` is ``[1, 2, 3, 4]``, then the first bin is
``[1, 2)`` (including 1, but excluding 2) and the second ``[2, 3)``. The
last bin, however, is ``[3, 4]``, which *includes* 4.
`binnumber`:
This returned argument assigns to each element of `sample` an integer that
represents the bin in which it belongs. The representation depends on the
`expand_binnumbers` argument. If 'False' (default): The returned
`binnumber` is a shape (N,) array of linearized indices mapping each
element of `sample` to its corresponding bin (using row-major ordering).
If 'True': The returned `binnumber` is a shape (D,N) ndarray where
each row indicates bin placements for each dimension respectively. In each
dimension, a binnumber of `i` means the corresponding value is between
(bin_edges[D][i-1], bin_edges[D][i]), for each dimension 'D'.
.. versionadded:: 0.11.0
"""
known_stats = ['mean', 'median', 'count', 'sum', 'std','min','max']
if not callable(statistic) and statistic not in known_stats:
raise ValueError('invalid statistic %r' % (statistic,))
# `Ndim` is the number of dimensions (e.g. `2` for `binned_statistic_2d`)
# `Dlen` is the length of elements along each dimension.
# This code is based on np.histogramdd
try:
# `sample` is an ND-array.
Dlen, Ndim = sample.shape
except (AttributeError, ValueError):
# `sample` is a sequence of 1D arrays.
sample = np.atleast_2d(sample).T
Dlen, Ndim = sample.shape
# Store initial shape of `values` to preserve it in the output
values = np.asarray(values)
input_shape = list(values.shape)
# Make sure that `values` is 2D to iterate over rows
values = np.atleast_2d(values)
Vdim, Vlen = values.shape
# Make sure `values` match `sample`
if(statistic != 'count' and Vlen != Dlen):
raise AttributeError('The number of `values` elements must match the '
'length of each `sample` dimension.')
nbin = np.empty(Ndim, int) # Number of bins in each dimension
edges = Ndim * [None] # Bin edges for each dim (will be 2D array)
dedges = Ndim * [None] # Spacing between edges (will be 2D array)
try:
M = len(bins)
if M != Ndim:
raise AttributeError('The dimension of bins must be equal '
'to the dimension of the sample x.')
except TypeError:
bins = Ndim * [bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
smin = np.atleast_1d(np.array(sample.min(axis=0), float))
smax = np.atleast_1d(np.array(sample.max(axis=0), float))
else:
smin = np.zeros(Ndim)
smax = np.zeros(Ndim)
for i in xrange(Ndim):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in xrange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# Create edge arrays
for i in xrange(Ndim):
if np.isscalar(bins[i]):
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = np.linspace(smin[i], smax[i], nbin[i] - 1)
else:
edges[i] = np.asarray(bins[i], float)
nbin[i] = len(edges[i]) + 1 # +1 for outlier bins
dedges[i] = np.diff(edges[i])
nbin = np.asarray(nbin)
# Compute the bin number each sample falls into, in each dimension
sampBin = [
np.digitize(sample[:, i], edges[i])
for i in xrange(Ndim)
]
# Using `digitize`, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right
# edge to be counted in the last bin, and not as an outlier.
for i in xrange(Ndim):
# Find the rounding precision
decimal = int(-np.log10(dedges[i].min())) + 6
# Find which points are on the rightmost edge.
on_edge = np.where(np.around(sample[:, i], decimal) ==
np.around(edges[i][-1], decimal))[0]
# Shift these points one bin to the left.
sampBin[i][on_edge] -= 1
# Compute the sample indices in the flattened statistic matrix.
binnumbers = np.ravel_multi_index(sampBin, nbin)
result = np.empty([Vdim, nbin.prod()], float)
if statistic == 'mean':
result.fill(np.nan)
flatcount = np.bincount(binnumbers, None)
a = flatcount.nonzero()
for vv in xrange(Vdim):
flatsum = np.bincount(binnumbers, values[vv])
result[vv, a] = flatsum[a] / flatcount[a]
elif statistic == 'std':
result.fill(0)
flatcount = np.bincount(binnumbers, None)
a = flatcount.nonzero()
for vv in xrange(Vdim):
flatsum = np.bincount(binnumbers, values[vv])
flatsum2 = np.bincount(binnumbers, values[vv] ** 2)
result[vv, a] = np.sqrt(flatsum2[a] / flatcount[a] -
(flatsum[a] / flatcount[a]) ** 2)
elif statistic == 'count':
result.fill(0)
flatcount = np.bincount(binnumbers, None)
a = np.arange(len(flatcount))
result[:, a] = flatcount[np.newaxis, :]
elif statistic == 'sum':
result.fill(0)
for vv in xrange(Vdim):
flatsum = np.bincount(binnumbers, values[vv])
a = np.arange(len(flatsum))
result[vv, a] = flatsum
elif statistic == 'median':
result.fill(np.nan)
for i in np.unique(binnumbers):
for vv in xrange(Vdim):
result[vv, i] = np.median(values[vv, binnumbers == i])
elif statistic == 'min':
result.fill(np.nan)
for i in np.unique(binnumbers):
for vv in xrange(Vdim):
result[vv, i] = np.min(values[vv, binnumbers == i])
elif statistic == 'max':
result.fill(np.nan)
for i in np.unique(binnumbers):
for vv in xrange(Vdim):
result[vv, i] = np.max(values[vv, binnumbers == i])
elif callable(statistic):
with np.errstate(invalid='ignore'), suppress_warnings() as sup:
sup.filter(RuntimeWarning)
try:
null = statistic([])
except Exception:
null = np.nan
result.fill(null)
for i in np.unique(binnumbers):
for vv in xrange(Vdim):
result[vv, i] = statistic(values[vv, binnumbers == i])
# Shape into a proper matrix
result = result.reshape(np.append(Vdim, nbin))
# Remove outliers (indices 0 and -1 for each bin-dimension).
core = tuple([slice(None)] + Ndim * [slice(1, -1)])
result = result[core]
# Unravel binnumbers into an ndarray, each row the bins for each dimension
if(expand_binnumbers and Ndim > 1):
binnumbers = np.asarray(np.unravel_index(binnumbers, nbin))
if np.any(result.shape[1:] != nbin - 2):
raise RuntimeError('Internal Shape Error')
# Reshape to have output (`reulst`) match input (`values`) shape
result = result.reshape(input_shape[:-1] + list(nbin-2))
return BinnedStatisticddResult(result, edges, binnumbers) | [
"def",
"binned_statistic_dd",
"(",
"sample",
",",
"values",
",",
"statistic",
"=",
"'mean'",
",",
"bins",
"=",
"10",
",",
"range",
"=",
"None",
",",
"expand_binnumbers",
"=",
"False",
")",
":",
"known_stats",
"=",
"[",
"'mean'",
",",
"'median'",
",",
"'count'",
",",
"'sum'",
",",
"'std'",
",",
"'min'",
",",
"'max'",
"]",
"if",
"not",
"callable",
"(",
"statistic",
")",
"and",
"statistic",
"not",
"in",
"known_stats",
":",
"raise",
"ValueError",
"(",
"'invalid statistic %r'",
"%",
"(",
"statistic",
",",
")",
")",
"# `Ndim` is the number of dimensions (e.g. `2` for `binned_statistic_2d`)",
"# `Dlen` is the length of elements along each dimension.",
"# This code is based on np.histogramdd",
"try",
":",
"# `sample` is an ND-array.",
"Dlen",
",",
"Ndim",
"=",
"sample",
".",
"shape",
"except",
"(",
"AttributeError",
",",
"ValueError",
")",
":",
"# `sample` is a sequence of 1D arrays.",
"sample",
"=",
"np",
".",
"atleast_2d",
"(",
"sample",
")",
".",
"T",
"Dlen",
",",
"Ndim",
"=",
"sample",
".",
"shape",
"# Store initial shape of `values` to preserve it in the output",
"values",
"=",
"np",
".",
"asarray",
"(",
"values",
")",
"input_shape",
"=",
"list",
"(",
"values",
".",
"shape",
")",
"# Make sure that `values` is 2D to iterate over rows",
"values",
"=",
"np",
".",
"atleast_2d",
"(",
"values",
")",
"Vdim",
",",
"Vlen",
"=",
"values",
".",
"shape",
"# Make sure `values` match `sample`",
"if",
"(",
"statistic",
"!=",
"'count'",
"and",
"Vlen",
"!=",
"Dlen",
")",
":",
"raise",
"AttributeError",
"(",
"'The number of `values` elements must match the '",
"'length of each `sample` dimension.'",
")",
"nbin",
"=",
"np",
".",
"empty",
"(",
"Ndim",
",",
"int",
")",
"# Number of bins in each dimension",
"edges",
"=",
"Ndim",
"*",
"[",
"None",
"]",
"# Bin edges for each dim (will be 2D array)",
"dedges",
"=",
"Ndim",
"*",
"[",
"None",
"]",
"# Spacing between edges (will be 2D array)",
"try",
":",
"M",
"=",
"len",
"(",
"bins",
")",
"if",
"M",
"!=",
"Ndim",
":",
"raise",
"AttributeError",
"(",
"'The dimension of bins must be equal '",
"'to the dimension of the sample x.'",
")",
"except",
"TypeError",
":",
"bins",
"=",
"Ndim",
"*",
"[",
"bins",
"]",
"# Select range for each dimension",
"# Used only if number of bins is given.",
"if",
"range",
"is",
"None",
":",
"smin",
"=",
"np",
".",
"atleast_1d",
"(",
"np",
".",
"array",
"(",
"sample",
".",
"min",
"(",
"axis",
"=",
"0",
")",
",",
"float",
")",
")",
"smax",
"=",
"np",
".",
"atleast_1d",
"(",
"np",
".",
"array",
"(",
"sample",
".",
"max",
"(",
"axis",
"=",
"0",
")",
",",
"float",
")",
")",
"else",
":",
"smin",
"=",
"np",
".",
"zeros",
"(",
"Ndim",
")",
"smax",
"=",
"np",
".",
"zeros",
"(",
"Ndim",
")",
"for",
"i",
"in",
"xrange",
"(",
"Ndim",
")",
":",
"smin",
"[",
"i",
"]",
",",
"smax",
"[",
"i",
"]",
"=",
"range",
"[",
"i",
"]",
"# Make sure the bins have a finite width.",
"for",
"i",
"in",
"xrange",
"(",
"len",
"(",
"smin",
")",
")",
":",
"if",
"smin",
"[",
"i",
"]",
"==",
"smax",
"[",
"i",
"]",
":",
"smin",
"[",
"i",
"]",
"=",
"smin",
"[",
"i",
"]",
"-",
".5",
"smax",
"[",
"i",
"]",
"=",
"smax",
"[",
"i",
"]",
"+",
".5",
"# Create edge arrays",
"for",
"i",
"in",
"xrange",
"(",
"Ndim",
")",
":",
"if",
"np",
".",
"isscalar",
"(",
"bins",
"[",
"i",
"]",
")",
":",
"nbin",
"[",
"i",
"]",
"=",
"bins",
"[",
"i",
"]",
"+",
"2",
"# +2 for outlier bins",
"edges",
"[",
"i",
"]",
"=",
"np",
".",
"linspace",
"(",
"smin",
"[",
"i",
"]",
",",
"smax",
"[",
"i",
"]",
",",
"nbin",
"[",
"i",
"]",
"-",
"1",
")",
"else",
":",
"edges",
"[",
"i",
"]",
"=",
"np",
".",
"asarray",
"(",
"bins",
"[",
"i",
"]",
",",
"float",
")",
"nbin",
"[",
"i",
"]",
"=",
"len",
"(",
"edges",
"[",
"i",
"]",
")",
"+",
"1",
"# +1 for outlier bins",
"dedges",
"[",
"i",
"]",
"=",
"np",
".",
"diff",
"(",
"edges",
"[",
"i",
"]",
")",
"nbin",
"=",
"np",
".",
"asarray",
"(",
"nbin",
")",
"# Compute the bin number each sample falls into, in each dimension",
"sampBin",
"=",
"[",
"np",
".",
"digitize",
"(",
"sample",
"[",
":",
",",
"i",
"]",
",",
"edges",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"xrange",
"(",
"Ndim",
")",
"]",
"# Using `digitize`, values that fall on an edge are put in the right bin.",
"# For the rightmost bin, we want values equal to the right",
"# edge to be counted in the last bin, and not as an outlier.",
"for",
"i",
"in",
"xrange",
"(",
"Ndim",
")",
":",
"# Find the rounding precision",
"decimal",
"=",
"int",
"(",
"-",
"np",
".",
"log10",
"(",
"dedges",
"[",
"i",
"]",
".",
"min",
"(",
")",
")",
")",
"+",
"6",
"# Find which points are on the rightmost edge.",
"on_edge",
"=",
"np",
".",
"where",
"(",
"np",
".",
"around",
"(",
"sample",
"[",
":",
",",
"i",
"]",
",",
"decimal",
")",
"==",
"np",
".",
"around",
"(",
"edges",
"[",
"i",
"]",
"[",
"-",
"1",
"]",
",",
"decimal",
")",
")",
"[",
"0",
"]",
"# Shift these points one bin to the left.",
"sampBin",
"[",
"i",
"]",
"[",
"on_edge",
"]",
"-=",
"1",
"# Compute the sample indices in the flattened statistic matrix.",
"binnumbers",
"=",
"np",
".",
"ravel_multi_index",
"(",
"sampBin",
",",
"nbin",
")",
"result",
"=",
"np",
".",
"empty",
"(",
"[",
"Vdim",
",",
"nbin",
".",
"prod",
"(",
")",
"]",
",",
"float",
")",
"if",
"statistic",
"==",
"'mean'",
":",
"result",
".",
"fill",
"(",
"np",
".",
"nan",
")",
"flatcount",
"=",
"np",
".",
"bincount",
"(",
"binnumbers",
",",
"None",
")",
"a",
"=",
"flatcount",
".",
"nonzero",
"(",
")",
"for",
"vv",
"in",
"xrange",
"(",
"Vdim",
")",
":",
"flatsum",
"=",
"np",
".",
"bincount",
"(",
"binnumbers",
",",
"values",
"[",
"vv",
"]",
")",
"result",
"[",
"vv",
",",
"a",
"]",
"=",
"flatsum",
"[",
"a",
"]",
"/",
"flatcount",
"[",
"a",
"]",
"elif",
"statistic",
"==",
"'std'",
":",
"result",
".",
"fill",
"(",
"0",
")",
"flatcount",
"=",
"np",
".",
"bincount",
"(",
"binnumbers",
",",
"None",
")",
"a",
"=",
"flatcount",
".",
"nonzero",
"(",
")",
"for",
"vv",
"in",
"xrange",
"(",
"Vdim",
")",
":",
"flatsum",
"=",
"np",
".",
"bincount",
"(",
"binnumbers",
",",
"values",
"[",
"vv",
"]",
")",
"flatsum2",
"=",
"np",
".",
"bincount",
"(",
"binnumbers",
",",
"values",
"[",
"vv",
"]",
"**",
"2",
")",
"result",
"[",
"vv",
",",
"a",
"]",
"=",
"np",
".",
"sqrt",
"(",
"flatsum2",
"[",
"a",
"]",
"/",
"flatcount",
"[",
"a",
"]",
"-",
"(",
"flatsum",
"[",
"a",
"]",
"/",
"flatcount",
"[",
"a",
"]",
")",
"**",
"2",
")",
"elif",
"statistic",
"==",
"'count'",
":",
"result",
".",
"fill",
"(",
"0",
")",
"flatcount",
"=",
"np",
".",
"bincount",
"(",
"binnumbers",
",",
"None",
")",
"a",
"=",
"np",
".",
"arange",
"(",
"len",
"(",
"flatcount",
")",
")",
"result",
"[",
":",
",",
"a",
"]",
"=",
"flatcount",
"[",
"np",
".",
"newaxis",
",",
":",
"]",
"elif",
"statistic",
"==",
"'sum'",
":",
"result",
".",
"fill",
"(",
"0",
")",
"for",
"vv",
"in",
"xrange",
"(",
"Vdim",
")",
":",
"flatsum",
"=",
"np",
".",
"bincount",
"(",
"binnumbers",
",",
"values",
"[",
"vv",
"]",
")",
"a",
"=",
"np",
".",
"arange",
"(",
"len",
"(",
"flatsum",
")",
")",
"result",
"[",
"vv",
",",
"a",
"]",
"=",
"flatsum",
"elif",
"statistic",
"==",
"'median'",
":",
"result",
".",
"fill",
"(",
"np",
".",
"nan",
")",
"for",
"i",
"in",
"np",
".",
"unique",
"(",
"binnumbers",
")",
":",
"for",
"vv",
"in",
"xrange",
"(",
"Vdim",
")",
":",
"result",
"[",
"vv",
",",
"i",
"]",
"=",
"np",
".",
"median",
"(",
"values",
"[",
"vv",
",",
"binnumbers",
"==",
"i",
"]",
")",
"elif",
"statistic",
"==",
"'min'",
":",
"result",
".",
"fill",
"(",
"np",
".",
"nan",
")",
"for",
"i",
"in",
"np",
".",
"unique",
"(",
"binnumbers",
")",
":",
"for",
"vv",
"in",
"xrange",
"(",
"Vdim",
")",
":",
"result",
"[",
"vv",
",",
"i",
"]",
"=",
"np",
".",
"min",
"(",
"values",
"[",
"vv",
",",
"binnumbers",
"==",
"i",
"]",
")",
"elif",
"statistic",
"==",
"'max'",
":",
"result",
".",
"fill",
"(",
"np",
".",
"nan",
")",
"for",
"i",
"in",
"np",
".",
"unique",
"(",
"binnumbers",
")",
":",
"for",
"vv",
"in",
"xrange",
"(",
"Vdim",
")",
":",
"result",
"[",
"vv",
",",
"i",
"]",
"=",
"np",
".",
"max",
"(",
"values",
"[",
"vv",
",",
"binnumbers",
"==",
"i",
"]",
")",
"elif",
"callable",
"(",
"statistic",
")",
":",
"with",
"np",
".",
"errstate",
"(",
"invalid",
"=",
"'ignore'",
")",
",",
"suppress_warnings",
"(",
")",
"as",
"sup",
":",
"sup",
".",
"filter",
"(",
"RuntimeWarning",
")",
"try",
":",
"null",
"=",
"statistic",
"(",
"[",
"]",
")",
"except",
"Exception",
":",
"null",
"=",
"np",
".",
"nan",
"result",
".",
"fill",
"(",
"null",
")",
"for",
"i",
"in",
"np",
".",
"unique",
"(",
"binnumbers",
")",
":",
"for",
"vv",
"in",
"xrange",
"(",
"Vdim",
")",
":",
"result",
"[",
"vv",
",",
"i",
"]",
"=",
"statistic",
"(",
"values",
"[",
"vv",
",",
"binnumbers",
"==",
"i",
"]",
")",
"# Shape into a proper matrix",
"result",
"=",
"result",
".",
"reshape",
"(",
"np",
".",
"append",
"(",
"Vdim",
",",
"nbin",
")",
")",
"# Remove outliers (indices 0 and -1 for each bin-dimension).",
"core",
"=",
"tuple",
"(",
"[",
"slice",
"(",
"None",
")",
"]",
"+",
"Ndim",
"*",
"[",
"slice",
"(",
"1",
",",
"-",
"1",
")",
"]",
")",
"result",
"=",
"result",
"[",
"core",
"]",
"# Unravel binnumbers into an ndarray, each row the bins for each dimension",
"if",
"(",
"expand_binnumbers",
"and",
"Ndim",
">",
"1",
")",
":",
"binnumbers",
"=",
"np",
".",
"asarray",
"(",
"np",
".",
"unravel_index",
"(",
"binnumbers",
",",
"nbin",
")",
")",
"if",
"np",
".",
"any",
"(",
"result",
".",
"shape",
"[",
"1",
":",
"]",
"!=",
"nbin",
"-",
"2",
")",
":",
"raise",
"RuntimeError",
"(",
"'Internal Shape Error'",
")",
"# Reshape to have output (`reulst`) match input (`values`) shape",
"result",
"=",
"result",
".",
"reshape",
"(",
"input_shape",
"[",
":",
"-",
"1",
"]",
"+",
"list",
"(",
"nbin",
"-",
"2",
")",
")",
"return",
"BinnedStatisticddResult",
"(",
"result",
",",
"edges",
",",
"binnumbers",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/py2/scipy/stats/_binned_statistic.py#L354-L619 |
|
wlanjie/AndroidFFmpeg | 7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf | tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/trace.py | python | find_lines_from_code | (code, strs) | return linenos | Return dict where keys are lines in the line number table. | Return dict where keys are lines in the line number table. | [
"Return",
"dict",
"where",
"keys",
"are",
"lines",
"in",
"the",
"line",
"number",
"table",
"."
] | def find_lines_from_code(code, strs):
"""Return dict where keys are lines in the line number table."""
linenos = {}
for _, lineno in dis.findlinestarts(code):
if lineno not in strs:
linenos[lineno] = 1
return linenos | [
"def",
"find_lines_from_code",
"(",
"code",
",",
"strs",
")",
":",
"linenos",
"=",
"{",
"}",
"for",
"_",
",",
"lineno",
"in",
"dis",
".",
"findlinestarts",
"(",
"code",
")",
":",
"if",
"lineno",
"not",
"in",
"strs",
":",
"linenos",
"[",
"lineno",
"]",
"=",
"1",
"return",
"linenos"
] | https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/trace.py#L393-L401 |
|
hughperkins/tf-coriander | 970d3df6c11400ad68405f22b0c42a52374e94ca | tensorflow/python/framework/function.py | python | Defun.__init__ | (self, *input_types, **kwargs) | Create a `Defun` decorator.
Args:
*input_types: A list of `tf.DType`
**kwargs: Optional keyword arguments, including
func_name - (optional). A python string, the name to use to
declare this `Function` in the graph.
grad_func - (optional). A function implementing the gradient
of the function-to-register. This is either a
`_DefinedFunction` or a `Declare` object. The gradient
function must satisify the criterion defined in
function.proto:GradientDef.
python_grad_func - (optional). A function implementing the
gradient of the function python-side. This function must
take the current op and the gradients w.r.t. its outputs,
and return the gradients w.r.t. the inputs. That is it must
implement the interface expected by `tf.RegisterGradient`).
This will be called by tf.gradients to add the gradient ops
to the graph. At most one of grad_func and python_grad_func
can be specified. | Create a `Defun` decorator. | [
"Create",
"a",
"Defun",
"decorator",
"."
] | def __init__(self, *input_types, **kwargs):
"""Create a `Defun` decorator.
Args:
*input_types: A list of `tf.DType`
**kwargs: Optional keyword arguments, including
func_name - (optional). A python string, the name to use to
declare this `Function` in the graph.
grad_func - (optional). A function implementing the gradient
of the function-to-register. This is either a
`_DefinedFunction` or a `Declare` object. The gradient
function must satisify the criterion defined in
function.proto:GradientDef.
python_grad_func - (optional). A function implementing the
gradient of the function python-side. This function must
take the current op and the gradients w.r.t. its outputs,
and return the gradients w.r.t. the inputs. That is it must
implement the interface expected by `tf.RegisterGradient`).
This will be called by tf.gradients to add the gradient ops
to the graph. At most one of grad_func and python_grad_func
can be specified.
"""
self._input_types = input_types
self._func_name = kwargs.pop("func_name", None)
self._grad_func = kwargs.pop("grad_func", None)
self._python_grad_func = kwargs.pop("python_grad_func", None)
self._extra_kwargs = kwargs | [
"def",
"__init__",
"(",
"self",
",",
"*",
"input_types",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_input_types",
"=",
"input_types",
"self",
".",
"_func_name",
"=",
"kwargs",
".",
"pop",
"(",
"\"func_name\"",
",",
"None",
")",
"self",
".",
"_grad_func",
"=",
"kwargs",
".",
"pop",
"(",
"\"grad_func\"",
",",
"None",
")",
"self",
".",
"_python_grad_func",
"=",
"kwargs",
".",
"pop",
"(",
"\"python_grad_func\"",
",",
"None",
")",
"self",
".",
"_extra_kwargs",
"=",
"kwargs"
] | https://github.com/hughperkins/tf-coriander/blob/970d3df6c11400ad68405f22b0c42a52374e94ca/tensorflow/python/framework/function.py#L715-L743 |
||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/scipy/scipy/sparse/compressed.py | python | _cs_matrix._scalar_binopt | (self, other, op) | return res | Scalar version of self._binopt, for cases in which no new nonzeros
are added. Produces a new spmatrix in canonical form. | Scalar version of self._binopt, for cases in which no new nonzeros
are added. Produces a new spmatrix in canonical form. | [
"Scalar",
"version",
"of",
"self",
".",
"_binopt",
"for",
"cases",
"in",
"which",
"no",
"new",
"nonzeros",
"are",
"added",
".",
"Produces",
"a",
"new",
"spmatrix",
"in",
"canonical",
"form",
"."
] | def _scalar_binopt(self, other, op):
"""Scalar version of self._binopt, for cases in which no new nonzeros
are added. Produces a new spmatrix in canonical form.
"""
self.sum_duplicates()
res = self._with_data(op(self.data, other), copy=True)
res.eliminate_zeros()
return res | [
"def",
"_scalar_binopt",
"(",
"self",
",",
"other",
",",
"op",
")",
":",
"self",
".",
"sum_duplicates",
"(",
")",
"res",
"=",
"self",
".",
"_with_data",
"(",
"op",
"(",
"self",
".",
"data",
",",
"other",
")",
",",
"copy",
"=",
"True",
")",
"res",
".",
"eliminate_zeros",
"(",
")",
"return",
"res"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/scipy/sparse/compressed.py#L196-L203 |
|
pmq20/node-packer | 12c46c6e44fbc14d9ee645ebd17d5296b324f7e0 | lts/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/MSVSUtil.py | python | ShardTargets | (target_list, target_dicts) | return (new_target_list, new_target_dicts) | Shard some targets apart to work around the linkers limits.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
Returns:
Tuple of the new sharded versions of the inputs. | Shard some targets apart to work around the linkers limits. | [
"Shard",
"some",
"targets",
"apart",
"to",
"work",
"around",
"the",
"linkers",
"limits",
"."
] | def ShardTargets(target_list, target_dicts):
"""Shard some targets apart to work around the linkers limits.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
Returns:
Tuple of the new sharded versions of the inputs.
"""
# Gather the targets to shard, and how many pieces.
targets_to_shard = {}
for t in target_dicts:
shards = int(target_dicts[t].get('msvs_shard', 0))
if shards:
targets_to_shard[t] = shards
# Shard target_list.
new_target_list = []
for t in target_list:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
new_target_list.append(_ShardName(t, i))
else:
new_target_list.append(t)
# Shard target_dict.
new_target_dicts = {}
for t in target_dicts:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
name = _ShardName(t, i)
new_target_dicts[name] = copy.copy(target_dicts[t])
new_target_dicts[name]['target_name'] = _ShardName(
new_target_dicts[name]['target_name'], i)
sources = new_target_dicts[name].get('sources', [])
new_sources = []
for pos in range(i, len(sources), targets_to_shard[t]):
new_sources.append(sources[pos])
new_target_dicts[name]['sources'] = new_sources
else:
new_target_dicts[t] = target_dicts[t]
# Shard dependencies.
for t in new_target_dicts:
for deptype in ('dependencies', 'dependencies_original'):
dependencies = copy.copy(new_target_dicts[t].get(deptype, []))
new_dependencies = []
for d in dependencies:
if d in targets_to_shard:
for i in range(targets_to_shard[d]):
new_dependencies.append(_ShardName(d, i))
else:
new_dependencies.append(d)
new_target_dicts[t][deptype] = new_dependencies
return (new_target_list, new_target_dicts) | [
"def",
"ShardTargets",
"(",
"target_list",
",",
"target_dicts",
")",
":",
"# Gather the targets to shard, and how many pieces.",
"targets_to_shard",
"=",
"{",
"}",
"for",
"t",
"in",
"target_dicts",
":",
"shards",
"=",
"int",
"(",
"target_dicts",
"[",
"t",
"]",
".",
"get",
"(",
"'msvs_shard'",
",",
"0",
")",
")",
"if",
"shards",
":",
"targets_to_shard",
"[",
"t",
"]",
"=",
"shards",
"# Shard target_list.",
"new_target_list",
"=",
"[",
"]",
"for",
"t",
"in",
"target_list",
":",
"if",
"t",
"in",
"targets_to_shard",
":",
"for",
"i",
"in",
"range",
"(",
"targets_to_shard",
"[",
"t",
"]",
")",
":",
"new_target_list",
".",
"append",
"(",
"_ShardName",
"(",
"t",
",",
"i",
")",
")",
"else",
":",
"new_target_list",
".",
"append",
"(",
"t",
")",
"# Shard target_dict.",
"new_target_dicts",
"=",
"{",
"}",
"for",
"t",
"in",
"target_dicts",
":",
"if",
"t",
"in",
"targets_to_shard",
":",
"for",
"i",
"in",
"range",
"(",
"targets_to_shard",
"[",
"t",
"]",
")",
":",
"name",
"=",
"_ShardName",
"(",
"t",
",",
"i",
")",
"new_target_dicts",
"[",
"name",
"]",
"=",
"copy",
".",
"copy",
"(",
"target_dicts",
"[",
"t",
"]",
")",
"new_target_dicts",
"[",
"name",
"]",
"[",
"'target_name'",
"]",
"=",
"_ShardName",
"(",
"new_target_dicts",
"[",
"name",
"]",
"[",
"'target_name'",
"]",
",",
"i",
")",
"sources",
"=",
"new_target_dicts",
"[",
"name",
"]",
".",
"get",
"(",
"'sources'",
",",
"[",
"]",
")",
"new_sources",
"=",
"[",
"]",
"for",
"pos",
"in",
"range",
"(",
"i",
",",
"len",
"(",
"sources",
")",
",",
"targets_to_shard",
"[",
"t",
"]",
")",
":",
"new_sources",
".",
"append",
"(",
"sources",
"[",
"pos",
"]",
")",
"new_target_dicts",
"[",
"name",
"]",
"[",
"'sources'",
"]",
"=",
"new_sources",
"else",
":",
"new_target_dicts",
"[",
"t",
"]",
"=",
"target_dicts",
"[",
"t",
"]",
"# Shard dependencies.",
"for",
"t",
"in",
"new_target_dicts",
":",
"for",
"deptype",
"in",
"(",
"'dependencies'",
",",
"'dependencies_original'",
")",
":",
"dependencies",
"=",
"copy",
".",
"copy",
"(",
"new_target_dicts",
"[",
"t",
"]",
".",
"get",
"(",
"deptype",
",",
"[",
"]",
")",
")",
"new_dependencies",
"=",
"[",
"]",
"for",
"d",
"in",
"dependencies",
":",
"if",
"d",
"in",
"targets_to_shard",
":",
"for",
"i",
"in",
"range",
"(",
"targets_to_shard",
"[",
"d",
"]",
")",
":",
"new_dependencies",
".",
"append",
"(",
"_ShardName",
"(",
"d",
",",
"i",
")",
")",
"else",
":",
"new_dependencies",
".",
"append",
"(",
"d",
")",
"new_target_dicts",
"[",
"t",
"]",
"[",
"deptype",
"]",
"=",
"new_dependencies",
"return",
"(",
"new_target_list",
",",
"new_target_dicts",
")"
] | https://github.com/pmq20/node-packer/blob/12c46c6e44fbc14d9ee645ebd17d5296b324f7e0/lts/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/MSVSUtil.py#L73-L125 |
|
wyrover/book-code | 7f4883d9030d553bc6bcfa3da685e34789839900 | 3rdparty/protobuf/python/google/protobuf/descriptor.py | python | FileDescriptor.__init__ | (self, name, package, options=None, serialized_pb=None,
dependencies=None, public_dependencies=None,
syntax=None, pool=None) | Constructor. | Constructor. | [
"Constructor",
"."
] | def __init__(self, name, package, options=None, serialized_pb=None,
dependencies=None, public_dependencies=None,
syntax=None, pool=None):
"""Constructor."""
super(FileDescriptor, self).__init__(options, 'FileOptions')
if pool is None:
from google.protobuf import descriptor_pool
pool = descriptor_pool.Default()
self.pool = pool
self.message_types_by_name = {}
self.name = name
self.package = package
self.syntax = syntax or "proto2"
self.serialized_pb = serialized_pb
self.enum_types_by_name = {}
self.extensions_by_name = {}
self.services_by_name = {}
self.dependencies = (dependencies or [])
self.public_dependencies = (public_dependencies or [])
if (api_implementation.Type() == 'cpp' and
self.serialized_pb is not None):
_message.default_pool.AddSerializedFile(self.serialized_pb) | [
"def",
"__init__",
"(",
"self",
",",
"name",
",",
"package",
",",
"options",
"=",
"None",
",",
"serialized_pb",
"=",
"None",
",",
"dependencies",
"=",
"None",
",",
"public_dependencies",
"=",
"None",
",",
"syntax",
"=",
"None",
",",
"pool",
"=",
"None",
")",
":",
"super",
"(",
"FileDescriptor",
",",
"self",
")",
".",
"__init__",
"(",
"options",
",",
"'FileOptions'",
")",
"if",
"pool",
"is",
"None",
":",
"from",
"google",
".",
"protobuf",
"import",
"descriptor_pool",
"pool",
"=",
"descriptor_pool",
".",
"Default",
"(",
")",
"self",
".",
"pool",
"=",
"pool",
"self",
".",
"message_types_by_name",
"=",
"{",
"}",
"self",
".",
"name",
"=",
"name",
"self",
".",
"package",
"=",
"package",
"self",
".",
"syntax",
"=",
"syntax",
"or",
"\"proto2\"",
"self",
".",
"serialized_pb",
"=",
"serialized_pb",
"self",
".",
"enum_types_by_name",
"=",
"{",
"}",
"self",
".",
"extensions_by_name",
"=",
"{",
"}",
"self",
".",
"services_by_name",
"=",
"{",
"}",
"self",
".",
"dependencies",
"=",
"(",
"dependencies",
"or",
"[",
"]",
")",
"self",
".",
"public_dependencies",
"=",
"(",
"public_dependencies",
"or",
"[",
"]",
")",
"if",
"(",
"api_implementation",
".",
"Type",
"(",
")",
"==",
"'cpp'",
"and",
"self",
".",
"serialized_pb",
"is",
"not",
"None",
")",
":",
"_message",
".",
"default_pool",
".",
"AddSerializedFile",
"(",
"self",
".",
"serialized_pb",
")"
] | https://github.com/wyrover/book-code/blob/7f4883d9030d553bc6bcfa3da685e34789839900/3rdparty/protobuf/python/google/protobuf/descriptor.py#L831-L855 |
||
apache/mesos | 97d9a4063332aae3825d78de71611657e05cf5e2 | support/apply-reviews.py | python | review_api_url | (review_id) | return '{base}/{review}/'.format(
base=REVIEWBOARD_API_URL,
review=review_id) | Returns a Review Board API URL given a review ID. | Returns a Review Board API URL given a review ID. | [
"Returns",
"a",
"Review",
"Board",
"API",
"URL",
"given",
"a",
"review",
"ID",
"."
] | def review_api_url(review_id):
"""Returns a Review Board API URL given a review ID."""
# Reviewboard REST API expects '/' at the end of the URL.
return '{base}/{review}/'.format(
base=REVIEWBOARD_API_URL,
review=review_id) | [
"def",
"review_api_url",
"(",
"review_id",
")",
":",
"# Reviewboard REST API expects '/' at the end of the URL.",
"return",
"'{base}/{review}/'",
".",
"format",
"(",
"base",
"=",
"REVIEWBOARD_API_URL",
",",
"review",
"=",
"review_id",
")"
] | https://github.com/apache/mesos/blob/97d9a4063332aae3825d78de71611657e05cf5e2/support/apply-reviews.py#L51-L56 |
|
domino-team/openwrt-cc | 8b181297c34d14d3ca521cc9f31430d561dbc688 | package/gli-pub/openwrt-node-packages-master/node/node-v6.9.1/deps/v8_inspector/third_party/jinja2/jinja2/debug.py | python | translate_syntax_error | (error, source=None) | return fake_exc_info(exc_info, filename, error.lineno) | Rewrites a syntax error to please traceback systems. | Rewrites a syntax error to please traceback systems. | [
"Rewrites",
"a",
"syntax",
"error",
"to",
"please",
"traceback",
"systems",
"."
] | def translate_syntax_error(error, source=None):
"""Rewrites a syntax error to please traceback systems."""
error.source = source
error.translated = True
exc_info = (error.__class__, error, None)
filename = error.filename
if filename is None:
filename = '<unknown>'
return fake_exc_info(exc_info, filename, error.lineno) | [
"def",
"translate_syntax_error",
"(",
"error",
",",
"source",
"=",
"None",
")",
":",
"error",
".",
"source",
"=",
"source",
"error",
".",
"translated",
"=",
"True",
"exc_info",
"=",
"(",
"error",
".",
"__class__",
",",
"error",
",",
"None",
")",
"filename",
"=",
"error",
".",
"filename",
"if",
"filename",
"is",
"None",
":",
"filename",
"=",
"'<unknown>'",
"return",
"fake_exc_info",
"(",
"exc_info",
",",
"filename",
",",
"error",
".",
"lineno",
")"
] | https://github.com/domino-team/openwrt-cc/blob/8b181297c34d14d3ca521cc9f31430d561dbc688/package/gli-pub/openwrt-node-packages-master/node/node-v6.9.1/deps/v8_inspector/third_party/jinja2/jinja2/debug.py#L143-L151 |
|
ideawu/ssdb | f229ba277c7f7d0ca5a441c0c6fb3d1209af68e4 | deps/cpy/antlr3/tree.py | python | TreeNodeStream.get | (self, i) | Get a tree node at an absolute index i; 0..n-1.
If you don't want to buffer up nodes, then this method makes no
sense for you. | Get a tree node at an absolute index i; 0..n-1.
If you don't want to buffer up nodes, then this method makes no
sense for you. | [
"Get",
"a",
"tree",
"node",
"at",
"an",
"absolute",
"index",
"i",
";",
"0",
"..",
"n",
"-",
"1",
".",
"If",
"you",
"don",
"t",
"want",
"to",
"buffer",
"up",
"nodes",
"then",
"this",
"method",
"makes",
"no",
"sense",
"for",
"you",
"."
] | def get(self, i):
"""Get a tree node at an absolute index i; 0..n-1.
If you don't want to buffer up nodes, then this method makes no
sense for you.
"""
raise NotImplementedError | [
"def",
"get",
"(",
"self",
",",
"i",
")",
":",
"raise",
"NotImplementedError"
] | https://github.com/ideawu/ssdb/blob/f229ba277c7f7d0ca5a441c0c6fb3d1209af68e4/deps/cpy/antlr3/tree.py#L1558-L1564 |
||
Ewenwan/MVision | 97b394dfa48cb21c82cd003b1a952745e413a17f | CNN/MobileNet/MobileNet_v2_ssd_caffe/ssd_detect.py | python | main | (args) | main | main | [
"main"
] | def main(args):
'''main '''
# 定义一个检测器类
detection = CaffeDetection(args.gpu_id,
args.model_def, args.model_weights,
args.image_resize, args.labelmap_file)
# 检测并获取结果
result = detection.detect(args.image_file)
# 打印结果
print result
#结果显示
img = Image.open(args.image_file)#打开图像
draw = ImageDraw.Draw(img)#显示
width, height = img.size#原来图像大小
print width, height
for item in result:
# 获取坐标实际整数值
xmin = int(round(item[0] * width))
ymin = int(round(item[1] * height))
xmax = int(round(item[2] * width))
ymax = int(round(item[3] * height))
draw.rectangle([xmin, ymin, xmax, ymax], outline=(255, 0, 0))#红色框
# [6] label_name [5] score
draw.text([xmin, ymin], item[-1] + str(item[-2]), (0, 0, 255))#显示文本标签 绿色
print item
print [xmin, ymin, xmax, ymax]
print [xmin, ymin], item[-1]
img.save('detect_result.jpg') | [
"def",
"main",
"(",
"args",
")",
":",
"# 定义一个检测器类",
"detection",
"=",
"CaffeDetection",
"(",
"args",
".",
"gpu_id",
",",
"args",
".",
"model_def",
",",
"args",
".",
"model_weights",
",",
"args",
".",
"image_resize",
",",
"args",
".",
"labelmap_file",
")",
"# 检测并获取结果",
"result",
"=",
"detection",
".",
"detect",
"(",
"args",
".",
"image_file",
")",
"# 打印结果",
"print",
"result",
"#结果显示",
"img",
"=",
"Image",
".",
"open",
"(",
"args",
".",
"image_file",
")",
"#打开图像",
"draw",
"=",
"ImageDraw",
".",
"Draw",
"(",
"img",
")",
"#显示",
"width",
",",
"height",
"=",
"img",
".",
"size",
"#原来图像大小",
"print",
"width",
",",
"height",
"for",
"item",
"in",
"result",
":",
"# 获取坐标实际整数值",
"xmin",
"=",
"int",
"(",
"round",
"(",
"item",
"[",
"0",
"]",
"*",
"width",
")",
")",
"ymin",
"=",
"int",
"(",
"round",
"(",
"item",
"[",
"1",
"]",
"*",
"height",
")",
")",
"xmax",
"=",
"int",
"(",
"round",
"(",
"item",
"[",
"2",
"]",
"*",
"width",
")",
")",
"ymax",
"=",
"int",
"(",
"round",
"(",
"item",
"[",
"3",
"]",
"*",
"height",
")",
")",
"draw",
".",
"rectangle",
"(",
"[",
"xmin",
",",
"ymin",
",",
"xmax",
",",
"ymax",
"]",
",",
"outline",
"=",
"(",
"255",
",",
"0",
",",
"0",
")",
")",
"#红色框",
"# [6] label_name [5] score ",
"draw",
".",
"text",
"(",
"[",
"xmin",
",",
"ymin",
"]",
",",
"item",
"[",
"-",
"1",
"]",
"+",
"str",
"(",
"item",
"[",
"-",
"2",
"]",
")",
",",
"(",
"0",
",",
"0",
",",
"255",
")",
")",
"#显示文本标签 绿色",
"print",
"item",
"print",
"[",
"xmin",
",",
"ymin",
",",
"xmax",
",",
"ymax",
"]",
"print",
"[",
"xmin",
",",
"ymin",
"]",
",",
"item",
"[",
"-",
"1",
"]",
"img",
".",
"save",
"(",
"'detect_result.jpg'",
")"
] | https://github.com/Ewenwan/MVision/blob/97b394dfa48cb21c82cd003b1a952745e413a17f/CNN/MobileNet/MobileNet_v2_ssd_caffe/ssd_detect.py#L122-L149 |
||
adobe/chromium | cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7 | third_party/closure_linter/closure_linter/ecmametadatapass.py | python | ParseError.__init__ | (self, token, message=None) | Initialize a parse error at the given token with an optional message.
Args:
token: The token where the parse error occurred.
message: A message describing the parse error. | Initialize a parse error at the given token with an optional message. | [
"Initialize",
"a",
"parse",
"error",
"at",
"the",
"given",
"token",
"with",
"an",
"optional",
"message",
"."
] | def __init__(self, token, message=None):
"""Initialize a parse error at the given token with an optional message.
Args:
token: The token where the parse error occurred.
message: A message describing the parse error.
"""
Exception.__init__(self, message)
self.token = token | [
"def",
"__init__",
"(",
"self",
",",
"token",
",",
"message",
"=",
"None",
")",
":",
"Exception",
".",
"__init__",
"(",
"self",
",",
"message",
")",
"self",
".",
"token",
"=",
"token"
] | https://github.com/adobe/chromium/blob/cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7/third_party/closure_linter/closure_linter/ecmametadatapass.py#L35-L43 |
||
weolar/miniblink49 | 1c4678db0594a4abde23d3ebbcc7cd13c3170777 | third_party/jinja2/optimizer.py | python | Optimizer.fold | (self, node) | Do constant folding. | Do constant folding. | [
"Do",
"constant",
"folding",
"."
] | def fold(self, node):
"""Do constant folding."""
node = self.generic_visit(node)
try:
return nodes.Const.from_untrusted(node.as_const(),
lineno=node.lineno,
environment=self.environment)
except nodes.Impossible:
return node | [
"def",
"fold",
"(",
"self",
",",
"node",
")",
":",
"node",
"=",
"self",
".",
"generic_visit",
"(",
"node",
")",
"try",
":",
"return",
"nodes",
".",
"Const",
".",
"from_untrusted",
"(",
"node",
".",
"as_const",
"(",
")",
",",
"lineno",
"=",
"node",
".",
"lineno",
",",
"environment",
"=",
"self",
".",
"environment",
")",
"except",
"nodes",
".",
"Impossible",
":",
"return",
"node"
] | https://github.com/weolar/miniblink49/blob/1c4678db0594a4abde23d3ebbcc7cd13c3170777/third_party/jinja2/optimizer.py#L54-L62 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/gtk/dataview.py | python | DataViewIconText.GetIcon | (*args, **kwargs) | return _dataview.DataViewIconText_GetIcon(*args, **kwargs) | GetIcon(self) -> Icon | GetIcon(self) -> Icon | [
"GetIcon",
"(",
"self",
")",
"-",
">",
"Icon"
] | def GetIcon(*args, **kwargs):
"""GetIcon(self) -> Icon"""
return _dataview.DataViewIconText_GetIcon(*args, **kwargs) | [
"def",
"GetIcon",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_dataview",
".",
"DataViewIconText_GetIcon",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/dataview.py#L1315-L1317 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/traitlets/py3/traitlets/config/application.py | python | Application.print_description | (self) | Print the application description. | Print the application description. | [
"Print",
"the",
"application",
"description",
"."
] | def print_description(self):
"""Print the application description."""
print('\n'.join(self.emit_description())) | [
"def",
"print_description",
"(",
"self",
")",
":",
"print",
"(",
"'\\n'",
".",
"join",
"(",
"self",
".",
"emit_description",
"(",
")",
")",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/traitlets/py3/traitlets/config/application.py#L552-L554 |
||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/windows/Lib/mailbox.py | python | _mboxMMDFMessage.remove_flag | (self, flag) | Unset the given string flag(s) without changing others. | Unset the given string flag(s) without changing others. | [
"Unset",
"the",
"given",
"string",
"flag",
"(",
"s",
")",
"without",
"changing",
"others",
"."
] | def remove_flag(self, flag):
"""Unset the given string flag(s) without changing others."""
if 'Status' in self or 'X-Status' in self:
self.set_flags(''.join(set(self.get_flags()) - set(flag))) | [
"def",
"remove_flag",
"(",
"self",
",",
"flag",
")",
":",
"if",
"'Status'",
"in",
"self",
"or",
"'X-Status'",
"in",
"self",
":",
"self",
".",
"set_flags",
"(",
"''",
".",
"join",
"(",
"set",
"(",
"self",
".",
"get_flags",
"(",
")",
")",
"-",
"set",
"(",
"flag",
")",
")",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/mailbox.py#L1688-L1691 |
||
miyosuda/TensorFlowAndroidMNIST | 7b5a4603d2780a8a2834575706e9001977524007 | jni-build/jni/include/tensorflow/python/framework/function.py | python | _add_output_list | (op, start, limit, dtype_lst, func) | return ret_name | Adds a _ArrayToList node in the func for op.outputs[start:limit]. | Adds a _ArrayToList node in the func for op.outputs[start:limit]. | [
"Adds",
"a",
"_ArrayToList",
"node",
"in",
"the",
"func",
"for",
"op",
".",
"outputs",
"[",
"start",
":",
"limit",
"]",
"."
] | def _add_output_list(op, start, limit, dtype_lst, func):
"""Adds a _ArrayToList node in the func for op.outputs[start:limit]."""
ret_name = op.name + "_Lst_" + str(start) + "_" + str(limit)
num = limit - start
assert len(dtype_lst) == num
# Adds an identity node for each element in the array N*T so that
# uses of each element can be added easily later. These Identity
# will be eliminated before graph execution.
for i in xrange(num):
node = function_pb2.FunctionDef.Node()
node.op = "Identity"
node.arg.append(ret_name + ":" + str(i))
node.ret.append(_make_argname_from_tensor_name(op.outputs[i].name))
node.attr["T"].CopyFrom(attr_value_pb2.AttrValue(type=dtype_lst[i]))
func.node.extend([node])
return ret_name | [
"def",
"_add_output_list",
"(",
"op",
",",
"start",
",",
"limit",
",",
"dtype_lst",
",",
"func",
")",
":",
"ret_name",
"=",
"op",
".",
"name",
"+",
"\"_Lst_\"",
"+",
"str",
"(",
"start",
")",
"+",
"\"_\"",
"+",
"str",
"(",
"limit",
")",
"num",
"=",
"limit",
"-",
"start",
"assert",
"len",
"(",
"dtype_lst",
")",
"==",
"num",
"# Adds an identity node for each element in the array N*T so that",
"# uses of each element can be added easily later. These Identity",
"# will be eliminated before graph execution.",
"for",
"i",
"in",
"xrange",
"(",
"num",
")",
":",
"node",
"=",
"function_pb2",
".",
"FunctionDef",
".",
"Node",
"(",
")",
"node",
".",
"op",
"=",
"\"Identity\"",
"node",
".",
"arg",
".",
"append",
"(",
"ret_name",
"+",
"\":\"",
"+",
"str",
"(",
"i",
")",
")",
"node",
".",
"ret",
".",
"append",
"(",
"_make_argname_from_tensor_name",
"(",
"op",
".",
"outputs",
"[",
"i",
"]",
".",
"name",
")",
")",
"node",
".",
"attr",
"[",
"\"T\"",
"]",
".",
"CopyFrom",
"(",
"attr_value_pb2",
".",
"AttrValue",
"(",
"type",
"=",
"dtype_lst",
"[",
"i",
"]",
")",
")",
"func",
".",
"node",
".",
"extend",
"(",
"[",
"node",
"]",
")",
"return",
"ret_name"
] | https://github.com/miyosuda/TensorFlowAndroidMNIST/blob/7b5a4603d2780a8a2834575706e9001977524007/jni-build/jni/include/tensorflow/python/framework/function.py#L99-L114 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/enum.py | python | Flag._generate_next_value_ | (name, start, count, last_values) | return 2 ** (high_bit+1) | Generate the next value when not given.
name: the name of the member
start: the initial start value or None
count: the number of existing members
last_value: the last value assigned or None | Generate the next value when not given. | [
"Generate",
"the",
"next",
"value",
"when",
"not",
"given",
"."
] | def _generate_next_value_(name, start, count, last_values):
"""
Generate the next value when not given.
name: the name of the member
start: the initial start value or None
count: the number of existing members
last_value: the last value assigned or None
"""
if not count:
return start if start is not None else 1
for last_value in reversed(last_values):
try:
high_bit = _high_bit(last_value)
break
except Exception:
raise TypeError('Invalid Flag value: %r' % last_value) from None
return 2 ** (high_bit+1) | [
"def",
"_generate_next_value_",
"(",
"name",
",",
"start",
",",
"count",
",",
"last_values",
")",
":",
"if",
"not",
"count",
":",
"return",
"start",
"if",
"start",
"is",
"not",
"None",
"else",
"1",
"for",
"last_value",
"in",
"reversed",
"(",
"last_values",
")",
":",
"try",
":",
"high_bit",
"=",
"_high_bit",
"(",
"last_value",
")",
"break",
"except",
"Exception",
":",
"raise",
"TypeError",
"(",
"'Invalid Flag value: %r'",
"%",
"last_value",
")",
"from",
"None",
"return",
"2",
"**",
"(",
"high_bit",
"+",
"1",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/enum.py#L685-L702 |
|
idaholab/moose | 9eeebc65e098b4c30f8205fb41591fd5b61eb6ff | python/MooseDocs/common/get_content.py | python | get_content | (items, in_ext) | return list(nodes.values()) | Create a tree of files for processing.
Inputs:
items: [list[dict(),...] A list of dict items, each dict entry must contain the 'root_dir'
and 'content' fields that are passed to the doc_import function.
in_ext[tuple]: Set of extensions to be converted (e.g., ('.md', )).
out_ext[str]: The extension of rendered result (e.g., '.html'). | Create a tree of files for processing. | [
"Create",
"a",
"tree",
"of",
"files",
"for",
"processing",
"."
] | def get_content(items, in_ext):
"""
Create a tree of files for processing.
Inputs:
items: [list[dict(),...] A list of dict items, each dict entry must contain the 'root_dir'
and 'content' fields that are passed to the doc_import function.
in_ext[tuple]: Set of extensions to be converted (e.g., ('.md', )).
out_ext[str]: The extension of rendered result (e.g., '.html').
"""
if not isinstance(items, list) or any(not isinstance(x, dict) for x in items):
LOG.error('The supplied items must be a list of dict items, each with a "root_dir" and '
'optionally a "content" entry.')
return None
roots = set()
nodes = dict()
for root, filename, external in get_files(items, in_ext):
roots.add(root)
key = filename.replace(root, '').strip('/')
parts = key.split('/')
# Create directory nodes if they don't exist
for i in range(1, len(parts)):
dir_key = os.path.join(*parts[:i])
if dir_key not in nodes:
nodes[dir_key] = pages.Directory(dir_key, external=external,
source=os.path.join(root, dir_key))
# Create the file node, if it doesn't already exist. This enforces that the first
# item in the supplied content lists is the page that is rendered.
if key not in nodes:
nodes[key] = create_file_page(key, filename, in_ext)
nodes[key].external = external
# Update the project files
for root in roots:
if mooseutils.git_is_repo(root):
MooseDocs.PROJECT_FILES.update(mooseutils.git_ls_files(mooseutils.git_root_dir(root)))
else:
MooseDocs.PROJECT_FILES.update(mooseutils.list_files(root))
return list(nodes.values()) | [
"def",
"get_content",
"(",
"items",
",",
"in_ext",
")",
":",
"if",
"not",
"isinstance",
"(",
"items",
",",
"list",
")",
"or",
"any",
"(",
"not",
"isinstance",
"(",
"x",
",",
"dict",
")",
"for",
"x",
"in",
"items",
")",
":",
"LOG",
".",
"error",
"(",
"'The supplied items must be a list of dict items, each with a \"root_dir\" and '",
"'optionally a \"content\" entry.'",
")",
"return",
"None",
"roots",
"=",
"set",
"(",
")",
"nodes",
"=",
"dict",
"(",
")",
"for",
"root",
",",
"filename",
",",
"external",
"in",
"get_files",
"(",
"items",
",",
"in_ext",
")",
":",
"roots",
".",
"add",
"(",
"root",
")",
"key",
"=",
"filename",
".",
"replace",
"(",
"root",
",",
"''",
")",
".",
"strip",
"(",
"'/'",
")",
"parts",
"=",
"key",
".",
"split",
"(",
"'/'",
")",
"# Create directory nodes if they don't exist",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"parts",
")",
")",
":",
"dir_key",
"=",
"os",
".",
"path",
".",
"join",
"(",
"*",
"parts",
"[",
":",
"i",
"]",
")",
"if",
"dir_key",
"not",
"in",
"nodes",
":",
"nodes",
"[",
"dir_key",
"]",
"=",
"pages",
".",
"Directory",
"(",
"dir_key",
",",
"external",
"=",
"external",
",",
"source",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"dir_key",
")",
")",
"# Create the file node, if it doesn't already exist. This enforces that the first",
"# item in the supplied content lists is the page that is rendered.",
"if",
"key",
"not",
"in",
"nodes",
":",
"nodes",
"[",
"key",
"]",
"=",
"create_file_page",
"(",
"key",
",",
"filename",
",",
"in_ext",
")",
"nodes",
"[",
"key",
"]",
".",
"external",
"=",
"external",
"# Update the project files",
"for",
"root",
"in",
"roots",
":",
"if",
"mooseutils",
".",
"git_is_repo",
"(",
"root",
")",
":",
"MooseDocs",
".",
"PROJECT_FILES",
".",
"update",
"(",
"mooseutils",
".",
"git_ls_files",
"(",
"mooseutils",
".",
"git_root_dir",
"(",
"root",
")",
")",
")",
"else",
":",
"MooseDocs",
".",
"PROJECT_FILES",
".",
"update",
"(",
"mooseutils",
".",
"list_files",
"(",
"root",
")",
")",
"return",
"list",
"(",
"nodes",
".",
"values",
"(",
")",
")"
] | https://github.com/idaholab/moose/blob/9eeebc65e098b4c30f8205fb41591fd5b61eb6ff/python/MooseDocs/common/get_content.py#L169-L212 |
|
root-project/root | fcd3583bb14852bf2e8cd2415717cbaac0e75896 | interpreter/llvm/src/tools/clang/bindings/python/clang/cindex.py | python | register_functions | (lib, ignore_errors) | Register function prototypes with a libclang library instance.
This must be called as part of library instantiation so Python knows how
to call out to the shared library. | Register function prototypes with a libclang library instance. | [
"Register",
"function",
"prototypes",
"with",
"a",
"libclang",
"library",
"instance",
"."
] | def register_functions(lib, ignore_errors):
"""Register function prototypes with a libclang library instance.
This must be called as part of library instantiation so Python knows how
to call out to the shared library.
"""
def register(item):
return register_function(lib, item, ignore_errors)
for f in functionList:
register(f) | [
"def",
"register_functions",
"(",
"lib",
",",
"ignore_errors",
")",
":",
"def",
"register",
"(",
"item",
")",
":",
"return",
"register_function",
"(",
"lib",
",",
"item",
",",
"ignore_errors",
")",
"for",
"f",
"in",
"functionList",
":",
"register",
"(",
"f",
")"
] | https://github.com/root-project/root/blob/fcd3583bb14852bf2e8cd2415717cbaac0e75896/interpreter/llvm/src/tools/clang/bindings/python/clang/cindex.py#L4083-L4094 |
||
mantidproject/mantid | 03deeb89254ec4289edb8771e0188c2090a02f32 | qt/python/mantidqtinterfaces/mantidqtinterfaces/reduction_gui/instruments/interface.py | python | InstrumentInterface.reduce | (self) | Pass the interface data to the scripter and reduce | Pass the interface data to the scripter and reduce | [
"Pass",
"the",
"interface",
"data",
"to",
"the",
"scripter",
"and",
"reduce"
] | def reduce(self):
"""
Pass the interface data to the scripter and reduce
"""
try:
self.scripter.update()
except:
print("Error in the user interface\n %s" % str(traceback.format_exc()))
self.scripter.push_state()
return
# Save the last reduction for later
try:
red_path = os.path.join(self.ERROR_REPORT_DIR, self.LAST_REDUCTION_NAME)
self.save_file(red_path)
except:
print("Could not save last reduction\n %s" % str(traceback.format_exc()))
try:
self.set_running(True)
if self.live_button_is_checked():
# Intercept and redirect if live data requested
self.scripter.apply_live()
else:
# Otherwise take the 'normal' path
self.scripter.apply()
self.set_running(False)
except RuntimeError:
if self._settings.debug:
msg = "Reduction could not be executed:\n\n%s" % unicode(traceback.format_exc())
else:
msg = "Reduction could not be executed:\n\n%s" % sys.exc_info()[1]
log_path = os.path.join(self.ERROR_REPORT_DIR, self.ERROR_REPORT_NAME)
msg += "\n\nWhen contacting the Mantid Team, please send this file:\n%s\n" % log_path
self._warning("Reduction failed", msg)
self._error_report(traceback.format_exc())
except:
msg = "Reduction could not be executed:\n\n%s" % sys.exc_info()[1]
msg += "\n\nPlease check your reduction parameters\n"
log_path = os.path.join(self.ERROR_REPORT_DIR, self.ERROR_REPORT_NAME)
msg += "\n\nWhen contacting the Mantid Team, please send this file:\n%s\n" % log_path
self._warning("Reduction failed", msg)
self._error_report(traceback.format_exc())
# Update widgets
self.scripter.push_state() | [
"def",
"reduce",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"scripter",
".",
"update",
"(",
")",
"except",
":",
"print",
"(",
"\"Error in the user interface\\n %s\"",
"%",
"str",
"(",
"traceback",
".",
"format_exc",
"(",
")",
")",
")",
"self",
".",
"scripter",
".",
"push_state",
"(",
")",
"return",
"# Save the last reduction for later",
"try",
":",
"red_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"ERROR_REPORT_DIR",
",",
"self",
".",
"LAST_REDUCTION_NAME",
")",
"self",
".",
"save_file",
"(",
"red_path",
")",
"except",
":",
"print",
"(",
"\"Could not save last reduction\\n %s\"",
"%",
"str",
"(",
"traceback",
".",
"format_exc",
"(",
")",
")",
")",
"try",
":",
"self",
".",
"set_running",
"(",
"True",
")",
"if",
"self",
".",
"live_button_is_checked",
"(",
")",
":",
"# Intercept and redirect if live data requested",
"self",
".",
"scripter",
".",
"apply_live",
"(",
")",
"else",
":",
"# Otherwise take the 'normal' path",
"self",
".",
"scripter",
".",
"apply",
"(",
")",
"self",
".",
"set_running",
"(",
"False",
")",
"except",
"RuntimeError",
":",
"if",
"self",
".",
"_settings",
".",
"debug",
":",
"msg",
"=",
"\"Reduction could not be executed:\\n\\n%s\"",
"%",
"unicode",
"(",
"traceback",
".",
"format_exc",
"(",
")",
")",
"else",
":",
"msg",
"=",
"\"Reduction could not be executed:\\n\\n%s\"",
"%",
"sys",
".",
"exc_info",
"(",
")",
"[",
"1",
"]",
"log_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"ERROR_REPORT_DIR",
",",
"self",
".",
"ERROR_REPORT_NAME",
")",
"msg",
"+=",
"\"\\n\\nWhen contacting the Mantid Team, please send this file:\\n%s\\n\"",
"%",
"log_path",
"self",
".",
"_warning",
"(",
"\"Reduction failed\"",
",",
"msg",
")",
"self",
".",
"_error_report",
"(",
"traceback",
".",
"format_exc",
"(",
")",
")",
"except",
":",
"msg",
"=",
"\"Reduction could not be executed:\\n\\n%s\"",
"%",
"sys",
".",
"exc_info",
"(",
")",
"[",
"1",
"]",
"msg",
"+=",
"\"\\n\\nPlease check your reduction parameters\\n\"",
"log_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"ERROR_REPORT_DIR",
",",
"self",
".",
"ERROR_REPORT_NAME",
")",
"msg",
"+=",
"\"\\n\\nWhen contacting the Mantid Team, please send this file:\\n%s\\n\"",
"%",
"log_path",
"self",
".",
"_warning",
"(",
"\"Reduction failed\"",
",",
"msg",
")",
"self",
".",
"_error_report",
"(",
"traceback",
".",
"format_exc",
"(",
")",
")",
"# Update widgets",
"self",
".",
"scripter",
".",
"push_state",
"(",
")"
] | https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/qt/python/mantidqtinterfaces/mantidqtinterfaces/reduction_gui/instruments/interface.py#L166-L210 |
||
benoitsteiner/tensorflow-opencl | cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5 | tensorflow/python/training/monitored_session.py | python | _MonitoredSession.run_step_fn | (self, step_fn) | Run ops using a step function.
Args:
step_fn: A function or a method with a single argument of type
`StepContext`. The function may use methods of the argument to
perform computations with access to a raw session.
The returned value of the `step_fn` will be returned from `run_step_fn`,
unless a stop is requested. In that case, the next `should_stop` call
will return True.
Example usage:
```python
with tf.Graph().as_default():
c = tf.placeholder(dtypes.float32)
v = tf.add(c, 4.0)
w = tf.add(c, 0.5)
def step_fn(step_context):
a = step_context.session.run(fetches=v, feed_dict={c: 0.5})
if a <= 4.5:
step_context.request_stop()
return step_context.run_with_hooks(fetches=w, feed_dict={c: 0.1})
with tf.MonitoredSession() as session:
while not session.should_stop():
a = session.run_step_fn(step_fn)
```
Hooks interact with the `run_with_hooks()` call inside the `step_fn`
as they do with a `MonitoredSession.run` call.
Returns:
Returns the returned value of `step_fn`.
Raises:
StopIteration: if `step_fn` has called `request_stop()`. It may be
caught by `with tf.MonitoredSession()` to close the session.
ValueError: if `step_fn` doesn't have a single argument called
`step_context`. It may also optionally have `self` for cases when it
belongs to an object. | Run ops using a step function. | [
"Run",
"ops",
"using",
"a",
"step",
"function",
"."
] | def run_step_fn(self, step_fn):
"""Run ops using a step function.
Args:
step_fn: A function or a method with a single argument of type
`StepContext`. The function may use methods of the argument to
perform computations with access to a raw session.
The returned value of the `step_fn` will be returned from `run_step_fn`,
unless a stop is requested. In that case, the next `should_stop` call
will return True.
Example usage:
```python
with tf.Graph().as_default():
c = tf.placeholder(dtypes.float32)
v = tf.add(c, 4.0)
w = tf.add(c, 0.5)
def step_fn(step_context):
a = step_context.session.run(fetches=v, feed_dict={c: 0.5})
if a <= 4.5:
step_context.request_stop()
return step_context.run_with_hooks(fetches=w, feed_dict={c: 0.1})
with tf.MonitoredSession() as session:
while not session.should_stop():
a = session.run_step_fn(step_fn)
```
Hooks interact with the `run_with_hooks()` call inside the `step_fn`
as they do with a `MonitoredSession.run` call.
Returns:
Returns the returned value of `step_fn`.
Raises:
StopIteration: if `step_fn` has called `request_stop()`. It may be
caught by `with tf.MonitoredSession()` to close the session.
ValueError: if `step_fn` doesn't have a single argument called
`step_context`. It may also optionally have `self` for cases when it
belongs to an object.
"""
step_fn_arguments = util.fn_args(step_fn)
if step_fn_arguments != ('step_context',) and step_fn_arguments != (
'self',
'step_context',
):
raise ValueError(
'`step_fn` may either have one `step_context` argument, or'
' `self` and `step_context` arguments if it\'s an instance'
' method. Got {} instead.'.format(step_fn_arguments))
try:
return step_fn(_MonitoredSession.StepContext(self._tf_sess(), self.run))
except StopIteration:
self._stop_requested_in_step_fn = True
raise | [
"def",
"run_step_fn",
"(",
"self",
",",
"step_fn",
")",
":",
"step_fn_arguments",
"=",
"util",
".",
"fn_args",
"(",
"step_fn",
")",
"if",
"step_fn_arguments",
"!=",
"(",
"'step_context'",
",",
")",
"and",
"step_fn_arguments",
"!=",
"(",
"'self'",
",",
"'step_context'",
",",
")",
":",
"raise",
"ValueError",
"(",
"'`step_fn` may either have one `step_context` argument, or'",
"' `self` and `step_context` arguments if it\\'s an instance'",
"' method. Got {} instead.'",
".",
"format",
"(",
"step_fn_arguments",
")",
")",
"try",
":",
"return",
"step_fn",
"(",
"_MonitoredSession",
".",
"StepContext",
"(",
"self",
".",
"_tf_sess",
"(",
")",
",",
"self",
".",
"run",
")",
")",
"except",
"StopIteration",
":",
"self",
".",
"_stop_requested_in_step_fn",
"=",
"True",
"raise"
] | https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/python/training/monitored_session.py#L525-L581 |
||
ricardoquesada/Spidermonkey | 4a75ea2543408bd1b2c515aa95901523eeef7858 | python/mozbuild/mozpack/manifests.py | python | InstallManifest.write | (self, path=None, fileobj=None) | Serialize this manifest to a file or file object.
If path is specified, that file will be written to. If fileobj is specified,
the serialized content will be written to that file object.
It is an error if both are specified. | Serialize this manifest to a file or file object. | [
"Serialize",
"this",
"manifest",
"to",
"a",
"file",
"or",
"file",
"object",
"."
] | def write(self, path=None, fileobj=None):
"""Serialize this manifest to a file or file object.
If path is specified, that file will be written to. If fileobj is specified,
the serialized content will be written to that file object.
It is an error if both are specified.
"""
with _auto_fileobj(path, fileobj, 'wb') as fh:
fh.write('%d\n' % self.CURRENT_VERSION)
for dest in sorted(self._dests):
entry = self._dests[dest]
parts = ['%d' % entry[0], dest]
parts.extend(entry[1:])
fh.write('%s\n' % self.FIELD_SEPARATOR.join(
p.encode('utf-8') for p in parts)) | [
"def",
"write",
"(",
"self",
",",
"path",
"=",
"None",
",",
"fileobj",
"=",
"None",
")",
":",
"with",
"_auto_fileobj",
"(",
"path",
",",
"fileobj",
",",
"'wb'",
")",
"as",
"fh",
":",
"fh",
".",
"write",
"(",
"'%d\\n'",
"%",
"self",
".",
"CURRENT_VERSION",
")",
"for",
"dest",
"in",
"sorted",
"(",
"self",
".",
"_dests",
")",
":",
"entry",
"=",
"self",
".",
"_dests",
"[",
"dest",
"]",
"parts",
"=",
"[",
"'%d'",
"%",
"entry",
"[",
"0",
"]",
",",
"dest",
"]",
"parts",
".",
"extend",
"(",
"entry",
"[",
"1",
":",
"]",
")",
"fh",
".",
"write",
"(",
"'%s\\n'",
"%",
"self",
".",
"FIELD_SEPARATOR",
".",
"join",
"(",
"p",
".",
"encode",
"(",
"'utf-8'",
")",
"for",
"p",
"in",
"parts",
")",
")"
] | https://github.com/ricardoquesada/Spidermonkey/blob/4a75ea2543408bd1b2c515aa95901523eeef7858/python/mozbuild/mozpack/manifests.py#L212-L229 |
||
LisaAnne/lisa-caffe-public | 49b8643ddef23a4f6120017968de30c45e693f59 | python/caffe/io.py | python | Transformer.set_transpose | (self, in_, order) | Set the input channel order for e.g. RGB to BGR conversion
as needed for the reference ImageNet model.
Take
in_: which input to assign this channel order
order: the order to transpose the dimensions | Set the input channel order for e.g. RGB to BGR conversion
as needed for the reference ImageNet model. | [
"Set",
"the",
"input",
"channel",
"order",
"for",
"e",
".",
"g",
".",
"RGB",
"to",
"BGR",
"conversion",
"as",
"needed",
"for",
"the",
"reference",
"ImageNet",
"model",
"."
] | def set_transpose(self, in_, order):
"""
Set the input channel order for e.g. RGB to BGR conversion
as needed for the reference ImageNet model.
Take
in_: which input to assign this channel order
order: the order to transpose the dimensions
"""
self.__check_input(in_)
if len(order) != len(self.inputs[in_]) - 1:
raise Exception('Transpose order needs to have the same number of '
'dimensions as the input.')
self.transpose[in_] = order | [
"def",
"set_transpose",
"(",
"self",
",",
"in_",
",",
"order",
")",
":",
"self",
".",
"__check_input",
"(",
"in_",
")",
"if",
"len",
"(",
"order",
")",
"!=",
"len",
"(",
"self",
".",
"inputs",
"[",
"in_",
"]",
")",
"-",
"1",
":",
"raise",
"Exception",
"(",
"'Transpose order needs to have the same number of '",
"'dimensions as the input.'",
")",
"self",
".",
"transpose",
"[",
"in_",
"]",
"=",
"order"
] | https://github.com/LisaAnne/lisa-caffe-public/blob/49b8643ddef23a4f6120017968de30c45e693f59/python/caffe/io.py#L183-L196 |
||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/traitlets/py2/traitlets/traitlets.py | python | MetaHasDescriptors.__new__ | (mcls, name, bases, classdict) | return super(MetaHasDescriptors, mcls).__new__(mcls, name, bases, classdict) | Create the HasDescriptors class. | Create the HasDescriptors class. | [
"Create",
"the",
"HasDescriptors",
"class",
"."
] | def __new__(mcls, name, bases, classdict):
"""Create the HasDescriptors class."""
for k, v in classdict.items():
# ----------------------------------------------------------------
# Support of deprecated behavior allowing for TraitType types
# to be used instead of TraitType instances.
if inspect.isclass(v) and issubclass(v, TraitType):
warn("Traits should be given as instances, not types (for example, `Int()`, not `Int`)."
" Passing types is deprecated in traitlets 4.1.",
DeprecationWarning, stacklevel=2)
classdict[k] = v()
# ----------------------------------------------------------------
return super(MetaHasDescriptors, mcls).__new__(mcls, name, bases, classdict) | [
"def",
"__new__",
"(",
"mcls",
",",
"name",
",",
"bases",
",",
"classdict",
")",
":",
"for",
"k",
",",
"v",
"in",
"classdict",
".",
"items",
"(",
")",
":",
"# ----------------------------------------------------------------",
"# Support of deprecated behavior allowing for TraitType types",
"# to be used instead of TraitType instances.",
"if",
"inspect",
".",
"isclass",
"(",
"v",
")",
"and",
"issubclass",
"(",
"v",
",",
"TraitType",
")",
":",
"warn",
"(",
"\"Traits should be given as instances, not types (for example, `Int()`, not `Int`).\"",
"\" Passing types is deprecated in traitlets 4.1.\"",
",",
"DeprecationWarning",
",",
"stacklevel",
"=",
"2",
")",
"classdict",
"[",
"k",
"]",
"=",
"v",
"(",
")",
"# ----------------------------------------------------------------",
"return",
"super",
"(",
"MetaHasDescriptors",
",",
"mcls",
")",
".",
"__new__",
"(",
"mcls",
",",
"name",
",",
"bases",
",",
"classdict",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/traitlets/py2/traitlets/traitlets.py#L722-L735 |
|
krishauser/Klampt | 972cc83ea5befac3f653c1ba20f80155768ad519 | Python/python2_version/klampt/math/so3.py | python | quaternion | (R) | Given a Klamp't rotation representation, produces the corresponding
unit quaternion (w,x,y,z). | Given a Klamp't rotation representation, produces the corresponding
unit quaternion (w,x,y,z). | [
"Given",
"a",
"Klamp",
"t",
"rotation",
"representation",
"produces",
"the",
"corresponding",
"unit",
"quaternion",
"(",
"w",
"x",
"y",
"z",
")",
"."
] | def quaternion(R):
"""Given a Klamp't rotation representation, produces the corresponding
unit quaternion (w,x,y,z)."""
tr = trace(R) + 1.0;
a11,a21,a31,a12,a22,a32,a13,a23,a33 = R
#If the trace is nonzero, it's a nondegenerate rotation
if tr > 1e-5:
s = math.sqrt(tr)
w = s * 0.5
s = 0.5 / s
x = (a32 - a23) * s
y = (a13 - a31) * s
z = (a21 - a12) * s
return vectorops.unit((w,x,y,z))
else:
#degenerate it's a rotation of 180 degrees
nxt = [1, 2, 0]
#check for largest diagonal entry
i = 0
if a22 > a11: i = 1
if a33 > max(a11,a22): i = 2
j = nxt[i]
k = nxt[j]
M = matrix(R)
q = [0.0]*4
s = math.sqrt((M[i][i] - (M[j][j] + M[k][k])) + 1.0);
q[i] = s * 0.5
if abs(s)<1e-7:
raise ValueError("Could not solve for quaternion... Invalid rotation matrix?")
else:
s = 0.5 / s;
q[3] = (M[k][j] - M[j][k]) * s;
q[j] = (M[i][j] + M[j][i]) * s;
q[k] = (M[i][k] + M[i][k]) * s;
w,x,y,z = q[3],q[0],q[1],q[2]
return vectorops.unit([w,x,y,z]) | [
"def",
"quaternion",
"(",
"R",
")",
":",
"tr",
"=",
"trace",
"(",
"R",
")",
"+",
"1.0",
"a11",
",",
"a21",
",",
"a31",
",",
"a12",
",",
"a22",
",",
"a32",
",",
"a13",
",",
"a23",
",",
"a33",
"=",
"R",
"#If the trace is nonzero, it's a nondegenerate rotation",
"if",
"tr",
">",
"1e-5",
":",
"s",
"=",
"math",
".",
"sqrt",
"(",
"tr",
")",
"w",
"=",
"s",
"*",
"0.5",
"s",
"=",
"0.5",
"/",
"s",
"x",
"=",
"(",
"a32",
"-",
"a23",
")",
"*",
"s",
"y",
"=",
"(",
"a13",
"-",
"a31",
")",
"*",
"s",
"z",
"=",
"(",
"a21",
"-",
"a12",
")",
"*",
"s",
"return",
"vectorops",
".",
"unit",
"(",
"(",
"w",
",",
"x",
",",
"y",
",",
"z",
")",
")",
"else",
":",
"#degenerate it's a rotation of 180 degrees",
"nxt",
"=",
"[",
"1",
",",
"2",
",",
"0",
"]",
"#check for largest diagonal entry",
"i",
"=",
"0",
"if",
"a22",
">",
"a11",
":",
"i",
"=",
"1",
"if",
"a33",
">",
"max",
"(",
"a11",
",",
"a22",
")",
":",
"i",
"=",
"2",
"j",
"=",
"nxt",
"[",
"i",
"]",
"k",
"=",
"nxt",
"[",
"j",
"]",
"M",
"=",
"matrix",
"(",
"R",
")",
"q",
"=",
"[",
"0.0",
"]",
"*",
"4",
"s",
"=",
"math",
".",
"sqrt",
"(",
"(",
"M",
"[",
"i",
"]",
"[",
"i",
"]",
"-",
"(",
"M",
"[",
"j",
"]",
"[",
"j",
"]",
"+",
"M",
"[",
"k",
"]",
"[",
"k",
"]",
")",
")",
"+",
"1.0",
")",
"q",
"[",
"i",
"]",
"=",
"s",
"*",
"0.5",
"if",
"abs",
"(",
"s",
")",
"<",
"1e-7",
":",
"raise",
"ValueError",
"(",
"\"Could not solve for quaternion... Invalid rotation matrix?\"",
")",
"else",
":",
"s",
"=",
"0.5",
"/",
"s",
"q",
"[",
"3",
"]",
"=",
"(",
"M",
"[",
"k",
"]",
"[",
"j",
"]",
"-",
"M",
"[",
"j",
"]",
"[",
"k",
"]",
")",
"*",
"s",
"q",
"[",
"j",
"]",
"=",
"(",
"M",
"[",
"i",
"]",
"[",
"j",
"]",
"+",
"M",
"[",
"j",
"]",
"[",
"i",
"]",
")",
"*",
"s",
"q",
"[",
"k",
"]",
"=",
"(",
"M",
"[",
"i",
"]",
"[",
"k",
"]",
"+",
"M",
"[",
"i",
"]",
"[",
"k",
"]",
")",
"*",
"s",
"w",
",",
"x",
",",
"y",
",",
"z",
"=",
"q",
"[",
"3",
"]",
",",
"q",
"[",
"0",
"]",
",",
"q",
"[",
"1",
"]",
",",
"q",
"[",
"2",
"]",
"return",
"vectorops",
".",
"unit",
"(",
"[",
"w",
",",
"x",
",",
"y",
",",
"z",
"]",
")"
] | https://github.com/krishauser/Klampt/blob/972cc83ea5befac3f653c1ba20f80155768ad519/Python/python2_version/klampt/math/so3.py#L226-L264 |
||
edvardHua/PoseEstimationForMobile | e31fb850c92ba7e220f861e9484b9cd1bdd5696f | training/docker/cocoapi/PythonAPI/pycocotools/coco.py | python | COCO.info | (self) | Print information about the annotation file.
:return: | Print information about the annotation file.
:return: | [
"Print",
"information",
"about",
"the",
"annotation",
"file",
".",
":",
"return",
":"
] | def info(self):
"""
Print information about the annotation file.
:return:
"""
for key, value in self.dataset['info'].items():
print('{}: {}'.format(key, value)) | [
"def",
"info",
"(",
"self",
")",
":",
"for",
"key",
",",
"value",
"in",
"self",
".",
"dataset",
"[",
"'info'",
"]",
".",
"items",
"(",
")",
":",
"print",
"(",
"'{}: {}'",
".",
"format",
"(",
"key",
",",
"value",
")",
")"
] | https://github.com/edvardHua/PoseEstimationForMobile/blob/e31fb850c92ba7e220f861e9484b9cd1bdd5696f/training/docker/cocoapi/PythonAPI/pycocotools/coco.py#L121-L127 |
||
mantidproject/mantid | 03deeb89254ec4289edb8771e0188c2090a02f32 | qt/python/mantidqt/mantidqt/widgets/superplot/presenter.py | python | SuperplotPresenter.on_drop | (self, name) | Triggered when a drop event is received in the list widget. Here, name
is assumed to be a workspace name.
Args:
name (str): workspace name | Triggered when a drop event is received in the list widget. Here, name
is assumed to be a workspace name. | [
"Triggered",
"when",
"a",
"drop",
"event",
"is",
"received",
"in",
"the",
"list",
"widget",
".",
"Here",
"name",
"is",
"assumed",
"to",
"be",
"a",
"workspace",
"name",
"."
] | def on_drop(self, name):
"""
Triggered when a drop event is received in the list widget. Here, name
is assumed to be a workspace name.
Args:
name (str): workspace name
"""
selection = self._view.get_selection()
self._model.add_workspace(name)
self._update_list()
self._view.set_selection(selection)
self._update_plot() | [
"def",
"on_drop",
"(",
"self",
",",
"name",
")",
":",
"selection",
"=",
"self",
".",
"_view",
".",
"get_selection",
"(",
")",
"self",
".",
"_model",
".",
"add_workspace",
"(",
"name",
")",
"self",
".",
"_update_list",
"(",
")",
"self",
".",
"_view",
".",
"set_selection",
"(",
"selection",
")",
"self",
".",
"_update_plot",
"(",
")"
] | https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/qt/python/mantidqt/mantidqt/widgets/superplot/presenter.py#L240-L252 |
||
benoitsteiner/tensorflow-opencl | cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5 | tensorflow/contrib/slim/python/slim/data/dataset_data_provider.py | python | DatasetDataProvider.__init__ | (self,
dataset,
num_readers=1,
reader_kwargs=None,
shuffle=True,
num_epochs=None,
common_queue_capacity=256,
common_queue_min=128,
record_key='record_key',
seed=None,
scope=None) | Creates a DatasetDataProvider.
Args:
dataset: An instance of the Dataset class.
num_readers: The number of parallel readers to use.
reader_kwargs: An optional dict of kwargs for the reader.
shuffle: Whether to shuffle the data sources and common queue when
reading.
num_epochs: The number of times each data source is read. If left as None,
the data will be cycled through indefinitely.
common_queue_capacity: The capacity of the common queue.
common_queue_min: The minimum number of elements in the common queue after
a dequeue.
record_key: The item name to use for the dataset record keys in the
provided tensors.
seed: The seed to use if shuffling.
scope: Optional name scope for the ops.
Raises:
ValueError: If `record_key` matches one of the items in the dataset. | Creates a DatasetDataProvider. | [
"Creates",
"a",
"DatasetDataProvider",
"."
] | def __init__(self,
dataset,
num_readers=1,
reader_kwargs=None,
shuffle=True,
num_epochs=None,
common_queue_capacity=256,
common_queue_min=128,
record_key='record_key',
seed=None,
scope=None):
"""Creates a DatasetDataProvider.
Args:
dataset: An instance of the Dataset class.
num_readers: The number of parallel readers to use.
reader_kwargs: An optional dict of kwargs for the reader.
shuffle: Whether to shuffle the data sources and common queue when
reading.
num_epochs: The number of times each data source is read. If left as None,
the data will be cycled through indefinitely.
common_queue_capacity: The capacity of the common queue.
common_queue_min: The minimum number of elements in the common queue after
a dequeue.
record_key: The item name to use for the dataset record keys in the
provided tensors.
seed: The seed to use if shuffling.
scope: Optional name scope for the ops.
Raises:
ValueError: If `record_key` matches one of the items in the dataset.
"""
key, data = parallel_reader.parallel_read(
dataset.data_sources,
reader_class=dataset.reader,
num_epochs=num_epochs,
num_readers=num_readers,
reader_kwargs=reader_kwargs,
shuffle=shuffle,
capacity=common_queue_capacity,
min_after_dequeue=common_queue_min,
seed=seed,
scope=scope)
items = dataset.decoder.list_items()
tensors = dataset.decoder.decode(data, items)
if record_key in items:
raise ValueError('The item name used for `record_key` cannot also be '
'used for a dataset item: %s', record_key)
items.append(record_key)
tensors.append(key)
super(DatasetDataProvider, self).__init__(
items_to_tensors=dict(zip(items, tensors)),
num_samples=dataset.num_samples) | [
"def",
"__init__",
"(",
"self",
",",
"dataset",
",",
"num_readers",
"=",
"1",
",",
"reader_kwargs",
"=",
"None",
",",
"shuffle",
"=",
"True",
",",
"num_epochs",
"=",
"None",
",",
"common_queue_capacity",
"=",
"256",
",",
"common_queue_min",
"=",
"128",
",",
"record_key",
"=",
"'record_key'",
",",
"seed",
"=",
"None",
",",
"scope",
"=",
"None",
")",
":",
"key",
",",
"data",
"=",
"parallel_reader",
".",
"parallel_read",
"(",
"dataset",
".",
"data_sources",
",",
"reader_class",
"=",
"dataset",
".",
"reader",
",",
"num_epochs",
"=",
"num_epochs",
",",
"num_readers",
"=",
"num_readers",
",",
"reader_kwargs",
"=",
"reader_kwargs",
",",
"shuffle",
"=",
"shuffle",
",",
"capacity",
"=",
"common_queue_capacity",
",",
"min_after_dequeue",
"=",
"common_queue_min",
",",
"seed",
"=",
"seed",
",",
"scope",
"=",
"scope",
")",
"items",
"=",
"dataset",
".",
"decoder",
".",
"list_items",
"(",
")",
"tensors",
"=",
"dataset",
".",
"decoder",
".",
"decode",
"(",
"data",
",",
"items",
")",
"if",
"record_key",
"in",
"items",
":",
"raise",
"ValueError",
"(",
"'The item name used for `record_key` cannot also be '",
"'used for a dataset item: %s'",
",",
"record_key",
")",
"items",
".",
"append",
"(",
"record_key",
")",
"tensors",
".",
"append",
"(",
"key",
")",
"super",
"(",
"DatasetDataProvider",
",",
"self",
")",
".",
"__init__",
"(",
"items_to_tensors",
"=",
"dict",
"(",
"zip",
"(",
"items",
",",
"tensors",
")",
")",
",",
"num_samples",
"=",
"dataset",
".",
"num_samples",
")"
] | https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/contrib/slim/python/slim/data/dataset_data_provider.py#L53-L107 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | wx/lib/masked/numctrl.py | python | NumCtrl.SetAllowNone | (self, allow_none) | Change the behavior of the validation code, allowing control
to have a value of None or not, as appropriate. If the value
of the control is currently None, and allow_none is False, the
value of the control will be set to the minimum value of the
control, or 0 if no lower bound is set. | Change the behavior of the validation code, allowing control
to have a value of None or not, as appropriate. If the value
of the control is currently None, and allow_none is False, the
value of the control will be set to the minimum value of the
control, or 0 if no lower bound is set. | [
"Change",
"the",
"behavior",
"of",
"the",
"validation",
"code",
"allowing",
"control",
"to",
"have",
"a",
"value",
"of",
"None",
"or",
"not",
"as",
"appropriate",
".",
"If",
"the",
"value",
"of",
"the",
"control",
"is",
"currently",
"None",
"and",
"allow_none",
"is",
"False",
"the",
"value",
"of",
"the",
"control",
"will",
"be",
"set",
"to",
"the",
"minimum",
"value",
"of",
"the",
"control",
"or",
"0",
"if",
"no",
"lower",
"bound",
"is",
"set",
"."
] | def SetAllowNone(self, allow_none):
"""
Change the behavior of the validation code, allowing control
to have a value of None or not, as appropriate. If the value
of the control is currently None, and allow_none is False, the
value of the control will be set to the minimum value of the
control, or 0 if no lower bound is set.
"""
self._allowNone = allow_none
if not allow_none and self.GetValue() is None:
min = self.GetMin()
if min is not None: self.SetValue(min)
else: self.SetValue(0) | [
"def",
"SetAllowNone",
"(",
"self",
",",
"allow_none",
")",
":",
"self",
".",
"_allowNone",
"=",
"allow_none",
"if",
"not",
"allow_none",
"and",
"self",
".",
"GetValue",
"(",
")",
"is",
"None",
":",
"min",
"=",
"self",
".",
"GetMin",
"(",
")",
"if",
"min",
"is",
"not",
"None",
":",
"self",
".",
"SetValue",
"(",
"min",
")",
"else",
":",
"self",
".",
"SetValue",
"(",
"0",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/masked/numctrl.py#L1497-L1509 |
||
domino-team/openwrt-cc | 8b181297c34d14d3ca521cc9f31430d561dbc688 | package/gli-pub/openwrt-node-packages-master/node/node-v6.9.1/deps/v8_inspector/third_party/jinja2/jinja2/sandbox.py | python | modifies_known_mutable | (obj, attr) | return False | This function checks if an attribute on a builtin mutable object
(list, dict, set or deque) would modify it if called. It also supports
the "user"-versions of the objects (`sets.Set`, `UserDict.*` etc.) and
with Python 2.6 onwards the abstract base classes `MutableSet`,
`MutableMapping`, and `MutableSequence`.
>>> modifies_known_mutable({}, "clear")
True
>>> modifies_known_mutable({}, "keys")
False
>>> modifies_known_mutable([], "append")
True
>>> modifies_known_mutable([], "index")
False
If called with an unsupported object (such as unicode) `False` is
returned.
>>> modifies_known_mutable("foo", "upper")
False | This function checks if an attribute on a builtin mutable object
(list, dict, set or deque) would modify it if called. It also supports
the "user"-versions of the objects (`sets.Set`, `UserDict.*` etc.) and
with Python 2.6 onwards the abstract base classes `MutableSet`,
`MutableMapping`, and `MutableSequence`. | [
"This",
"function",
"checks",
"if",
"an",
"attribute",
"on",
"a",
"builtin",
"mutable",
"object",
"(",
"list",
"dict",
"set",
"or",
"deque",
")",
"would",
"modify",
"it",
"if",
"called",
".",
"It",
"also",
"supports",
"the",
"user",
"-",
"versions",
"of",
"the",
"objects",
"(",
"sets",
".",
"Set",
"UserDict",
".",
"*",
"etc",
".",
")",
"and",
"with",
"Python",
"2",
".",
"6",
"onwards",
"the",
"abstract",
"base",
"classes",
"MutableSet",
"MutableMapping",
"and",
"MutableSequence",
"."
] | def modifies_known_mutable(obj, attr):
"""This function checks if an attribute on a builtin mutable object
(list, dict, set or deque) would modify it if called. It also supports
the "user"-versions of the objects (`sets.Set`, `UserDict.*` etc.) and
with Python 2.6 onwards the abstract base classes `MutableSet`,
`MutableMapping`, and `MutableSequence`.
>>> modifies_known_mutable({}, "clear")
True
>>> modifies_known_mutable({}, "keys")
False
>>> modifies_known_mutable([], "append")
True
>>> modifies_known_mutable([], "index")
False
If called with an unsupported object (such as unicode) `False` is
returned.
>>> modifies_known_mutable("foo", "upper")
False
"""
for typespec, unsafe in _mutable_spec:
if isinstance(obj, typespec):
return attr in unsafe
return False | [
"def",
"modifies_known_mutable",
"(",
"obj",
",",
"attr",
")",
":",
"for",
"typespec",
",",
"unsafe",
"in",
"_mutable_spec",
":",
"if",
"isinstance",
"(",
"obj",
",",
"typespec",
")",
":",
"return",
"attr",
"in",
"unsafe",
"return",
"False"
] | https://github.com/domino-team/openwrt-cc/blob/8b181297c34d14d3ca521cc9f31430d561dbc688/package/gli-pub/openwrt-node-packages-master/node/node-v6.9.1/deps/v8_inspector/third_party/jinja2/jinja2/sandbox.py#L151-L176 |
|
smilehao/xlua-framework | a03801538be2b0e92d39332d445b22caca1ef61f | ConfigData/trunk/tools/protobuf-2.5.0/protobuf-2.5.0/python/google/protobuf/descriptor_pool.py | python | DescriptorPool._MakeEnumValueDescriptor | (self, value_proto, index) | return descriptor.EnumValueDescriptor(
name=value_proto.name,
index=index,
number=value_proto.number,
options=value_proto.options,
type=None) | Creates a enum value descriptor object from a enum value proto.
Args:
value_proto: The proto describing the enum value.
index: The index of the enum value.
Returns:
An initialized EnumValueDescriptor object. | Creates a enum value descriptor object from a enum value proto. | [
"Creates",
"a",
"enum",
"value",
"descriptor",
"object",
"from",
"a",
"enum",
"value",
"proto",
"."
] | def _MakeEnumValueDescriptor(self, value_proto, index):
"""Creates a enum value descriptor object from a enum value proto.
Args:
value_proto: The proto describing the enum value.
index: The index of the enum value.
Returns:
An initialized EnumValueDescriptor object.
"""
return descriptor.EnumValueDescriptor(
name=value_proto.name,
index=index,
number=value_proto.number,
options=value_proto.options,
type=None) | [
"def",
"_MakeEnumValueDescriptor",
"(",
"self",
",",
"value_proto",
",",
"index",
")",
":",
"return",
"descriptor",
".",
"EnumValueDescriptor",
"(",
"name",
"=",
"value_proto",
".",
"name",
",",
"index",
"=",
"index",
",",
"number",
"=",
"value_proto",
".",
"number",
",",
"options",
"=",
"value_proto",
".",
"options",
",",
"type",
"=",
"None",
")"
] | https://github.com/smilehao/xlua-framework/blob/a03801538be2b0e92d39332d445b22caca1ef61f/ConfigData/trunk/tools/protobuf-2.5.0/protobuf-2.5.0/python/google/protobuf/descriptor_pool.py#L437-L453 |
|
ricardoquesada/Spidermonkey | 4a75ea2543408bd1b2c515aa95901523eeef7858 | config/configobj.py | python | Section.merge | (self, indict) | A recursive update - useful for merging config files.
>>> a = '''[section1]
... option1 = True
... [[subsection]]
... more_options = False
... # end of file'''.splitlines()
>>> b = '''# File is user.ini
... [section1]
... option1 = False
... # end of file'''.splitlines()
>>> c1 = ConfigObj(b)
>>> c2 = ConfigObj(a)
>>> c2.merge(c1)
>>> c2
{'section1': {'option1': 'False', 'subsection': {'more_options': 'False'}}} | A recursive update - useful for merging config files.
>>> a = '''[section1]
... option1 = True
... [[subsection]]
... more_options = False
... # end of file'''.splitlines()
>>> b = '''# File is user.ini
... [section1]
... option1 = False
... # end of file'''.splitlines()
>>> c1 = ConfigObj(b)
>>> c2 = ConfigObj(a)
>>> c2.merge(c1)
>>> c2
{'section1': {'option1': 'False', 'subsection': {'more_options': 'False'}}} | [
"A",
"recursive",
"update",
"-",
"useful",
"for",
"merging",
"config",
"files",
".",
">>>",
"a",
"=",
"[",
"section1",
"]",
"...",
"option1",
"=",
"True",
"...",
"[[",
"subsection",
"]]",
"...",
"more_options",
"=",
"False",
"...",
"#",
"end",
"of",
"file",
".",
"splitlines",
"()",
">>>",
"b",
"=",
"#",
"File",
"is",
"user",
".",
"ini",
"...",
"[",
"section1",
"]",
"...",
"option1",
"=",
"False",
"...",
"#",
"end",
"of",
"file",
".",
"splitlines",
"()",
">>>",
"c1",
"=",
"ConfigObj",
"(",
"b",
")",
">>>",
"c2",
"=",
"ConfigObj",
"(",
"a",
")",
">>>",
"c2",
".",
"merge",
"(",
"c1",
")",
">>>",
"c2",
"{",
"section1",
":",
"{",
"option1",
":",
"False",
"subsection",
":",
"{",
"more_options",
":",
"False",
"}}}"
] | def merge(self, indict):
"""
A recursive update - useful for merging config files.
>>> a = '''[section1]
... option1 = True
... [[subsection]]
... more_options = False
... # end of file'''.splitlines()
>>> b = '''# File is user.ini
... [section1]
... option1 = False
... # end of file'''.splitlines()
>>> c1 = ConfigObj(b)
>>> c2 = ConfigObj(a)
>>> c2.merge(c1)
>>> c2
{'section1': {'option1': 'False', 'subsection': {'more_options': 'False'}}}
"""
for key, val in indict.items():
if (key in self and isinstance(self[key], dict) and
isinstance(val, dict)):
self[key].merge(val)
else:
self[key] = val | [
"def",
"merge",
"(",
"self",
",",
"indict",
")",
":",
"for",
"key",
",",
"val",
"in",
"indict",
".",
"items",
"(",
")",
":",
"if",
"(",
"key",
"in",
"self",
"and",
"isinstance",
"(",
"self",
"[",
"key",
"]",
",",
"dict",
")",
"and",
"isinstance",
"(",
"val",
",",
"dict",
")",
")",
":",
"self",
"[",
"key",
"]",
".",
"merge",
"(",
"val",
")",
"else",
":",
"self",
"[",
"key",
"]",
"=",
"val"
] | https://github.com/ricardoquesada/Spidermonkey/blob/4a75ea2543408bd1b2c515aa95901523eeef7858/config/configobj.py#L728-L752 |
||
hpi-xnor/BMXNet-v2 | af2b1859eafc5c721b1397cef02f946aaf2ce20d | example/rnn/large_word_lm/run_utils.py | python | evaluate | (mod, data_iter, epoch, log_interval) | return loss | Run evaluation on cpu. | Run evaluation on cpu. | [
"Run",
"evaluation",
"on",
"cpu",
"."
] | def evaluate(mod, data_iter, epoch, log_interval):
""" Run evaluation on cpu. """
start = time.time()
total_L = 0.0
nbatch = 0
density = 0
mod.set_states(value=0)
for batch in data_iter:
mod.forward(batch, is_train=False)
outputs = mod.get_outputs(merge_multi_context=False)
states = outputs[:-1]
total_L += outputs[-1][0]
mod.set_states(states=states)
nbatch += 1
# don't include padding data in the test perplexity
density += batch.data[1].mean()
if (nbatch + 1) % log_interval == 0:
logging.info("Eval batch %d loss : %.7f" % (nbatch, (total_L / density).asscalar()))
data_iter.reset()
loss = (total_L / density).asscalar()
ppl = math.exp(loss) if loss < 100 else 1e37
end = time.time()
logging.info('Iter[%d]\t\t CE loss %.7f, ppl %.7f. Eval duration = %.2f seconds ' % \
(epoch, loss, ppl, end - start))
return loss | [
"def",
"evaluate",
"(",
"mod",
",",
"data_iter",
",",
"epoch",
",",
"log_interval",
")",
":",
"start",
"=",
"time",
".",
"time",
"(",
")",
"total_L",
"=",
"0.0",
"nbatch",
"=",
"0",
"density",
"=",
"0",
"mod",
".",
"set_states",
"(",
"value",
"=",
"0",
")",
"for",
"batch",
"in",
"data_iter",
":",
"mod",
".",
"forward",
"(",
"batch",
",",
"is_train",
"=",
"False",
")",
"outputs",
"=",
"mod",
".",
"get_outputs",
"(",
"merge_multi_context",
"=",
"False",
")",
"states",
"=",
"outputs",
"[",
":",
"-",
"1",
"]",
"total_L",
"+=",
"outputs",
"[",
"-",
"1",
"]",
"[",
"0",
"]",
"mod",
".",
"set_states",
"(",
"states",
"=",
"states",
")",
"nbatch",
"+=",
"1",
"# don't include padding data in the test perplexity",
"density",
"+=",
"batch",
".",
"data",
"[",
"1",
"]",
".",
"mean",
"(",
")",
"if",
"(",
"nbatch",
"+",
"1",
")",
"%",
"log_interval",
"==",
"0",
":",
"logging",
".",
"info",
"(",
"\"Eval batch %d loss : %.7f\"",
"%",
"(",
"nbatch",
",",
"(",
"total_L",
"/",
"density",
")",
".",
"asscalar",
"(",
")",
")",
")",
"data_iter",
".",
"reset",
"(",
")",
"loss",
"=",
"(",
"total_L",
"/",
"density",
")",
".",
"asscalar",
"(",
")",
"ppl",
"=",
"math",
".",
"exp",
"(",
"loss",
")",
"if",
"loss",
"<",
"100",
"else",
"1e37",
"end",
"=",
"time",
".",
"time",
"(",
")",
"logging",
".",
"info",
"(",
"'Iter[%d]\\t\\t CE loss %.7f, ppl %.7f. Eval duration = %.2f seconds '",
"%",
"(",
"epoch",
",",
"loss",
",",
"ppl",
",",
"end",
"-",
"start",
")",
")",
"return",
"loss"
] | https://github.com/hpi-xnor/BMXNet-v2/blob/af2b1859eafc5c721b1397cef02f946aaf2ce20d/example/rnn/large_word_lm/run_utils.py#L66-L90 |
|
leela-zero/leela-zero | e3ed6310d33d75078ba74c3adf887d18439fc2e3 | training/tf/chunkparser.py | python | ChunkParser.parse | (self) | Read data from child workers and yield batches
of raw tensors | Read data from child workers and yield batches
of raw tensors | [
"Read",
"data",
"from",
"child",
"workers",
"and",
"yield",
"batches",
"of",
"raw",
"tensors"
] | def parse(self):
"""
Read data from child workers and yield batches
of raw tensors
"""
gen = self.v2_gen() # read from workers
gen = self.tuple_gen(gen) # convert v2->tuple
gen = self.batch_gen(gen) # assemble into batches
for b in gen:
yield b | [
"def",
"parse",
"(",
"self",
")",
":",
"gen",
"=",
"self",
".",
"v2_gen",
"(",
")",
"# read from workers",
"gen",
"=",
"self",
".",
"tuple_gen",
"(",
"gen",
")",
"# convert v2->tuple",
"gen",
"=",
"self",
".",
"batch_gen",
"(",
"gen",
")",
"# assemble into batches",
"for",
"b",
"in",
"gen",
":",
"yield",
"b"
] | https://github.com/leela-zero/leela-zero/blob/e3ed6310d33d75078ba74c3adf887d18439fc2e3/training/tf/chunkparser.py#L377-L386 |
||
nvdla/sw | 79538ba1b52b040a4a4645f630e457fa01839e90 | umd/external/protobuf-2.6/python/mox.py | python | And.equals | (self, rhs) | return True | Checks whether all Comparators are equal to rhs.
Args:
# rhs: can be anything
Returns:
bool | Checks whether all Comparators are equal to rhs. | [
"Checks",
"whether",
"all",
"Comparators",
"are",
"equal",
"to",
"rhs",
"."
] | def equals(self, rhs):
"""Checks whether all Comparators are equal to rhs.
Args:
# rhs: can be anything
Returns:
bool
"""
for comparator in self._comparators:
if not comparator.equals(rhs):
return False
return True | [
"def",
"equals",
"(",
"self",
",",
"rhs",
")",
":",
"for",
"comparator",
"in",
"self",
".",
"_comparators",
":",
"if",
"not",
"comparator",
".",
"equals",
"(",
"rhs",
")",
":",
"return",
"False",
"return",
"True"
] | https://github.com/nvdla/sw/blob/79538ba1b52b040a4a4645f630e457fa01839e90/umd/external/protobuf-2.6/python/mox.py#L1059-L1073 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/windows/Lib/pydoc.py | python | render_doc | (thing, title='Python Library Documentation: %s', forceload=0,
renderer=None) | return title % desc + '\n\n' + renderer.document(object, name) | Render text documentation, given an object or a path to an object. | Render text documentation, given an object or a path to an object. | [
"Render",
"text",
"documentation",
"given",
"an",
"object",
"or",
"a",
"path",
"to",
"an",
"object",
"."
] | def render_doc(thing, title='Python Library Documentation: %s', forceload=0,
renderer=None):
"""Render text documentation, given an object or a path to an object."""
if renderer is None:
renderer = text
object, name = resolve(thing, forceload)
desc = describe(object)
module = inspect.getmodule(object)
if name and '.' in name:
desc += ' in ' + name[:name.rfind('.')]
elif module and module is not object:
desc += ' in module ' + module.__name__
if not (inspect.ismodule(object) or
inspect.isclass(object) or
inspect.isroutine(object) or
inspect.isgetsetdescriptor(object) or
inspect.ismemberdescriptor(object) or
isinstance(object, property)):
# If the passed object is a piece of data or an instance,
# document its available methods instead of its value.
object = type(object)
desc += ' object'
return title % desc + '\n\n' + renderer.document(object, name) | [
"def",
"render_doc",
"(",
"thing",
",",
"title",
"=",
"'Python Library Documentation: %s'",
",",
"forceload",
"=",
"0",
",",
"renderer",
"=",
"None",
")",
":",
"if",
"renderer",
"is",
"None",
":",
"renderer",
"=",
"text",
"object",
",",
"name",
"=",
"resolve",
"(",
"thing",
",",
"forceload",
")",
"desc",
"=",
"describe",
"(",
"object",
")",
"module",
"=",
"inspect",
".",
"getmodule",
"(",
"object",
")",
"if",
"name",
"and",
"'.'",
"in",
"name",
":",
"desc",
"+=",
"' in '",
"+",
"name",
"[",
":",
"name",
".",
"rfind",
"(",
"'.'",
")",
"]",
"elif",
"module",
"and",
"module",
"is",
"not",
"object",
":",
"desc",
"+=",
"' in module '",
"+",
"module",
".",
"__name__",
"if",
"not",
"(",
"inspect",
".",
"ismodule",
"(",
"object",
")",
"or",
"inspect",
".",
"isclass",
"(",
"object",
")",
"or",
"inspect",
".",
"isroutine",
"(",
"object",
")",
"or",
"inspect",
".",
"isgetsetdescriptor",
"(",
"object",
")",
"or",
"inspect",
".",
"ismemberdescriptor",
"(",
"object",
")",
"or",
"isinstance",
"(",
"object",
",",
"property",
")",
")",
":",
"# If the passed object is a piece of data or an instance,",
"# document its available methods instead of its value.",
"object",
"=",
"type",
"(",
"object",
")",
"desc",
"+=",
"' object'",
"return",
"title",
"%",
"desc",
"+",
"'\\n\\n'",
"+",
"renderer",
".",
"document",
"(",
"object",
",",
"name",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/pydoc.py#L1641-L1664 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/numpy/py2/numpy/polynomial/laguerre.py | python | laggrid3d | (x, y, z, c) | return c | Evaluate a 3-D Laguerre series on the Cartesian product of x, y, and z.
This function returns the values:
.. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * L_i(a) * L_j(b) * L_k(c)
where the points `(a, b, c)` consist of all triples formed by taking
`a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form
a grid with `x` in the first dimension, `y` in the second, and `z` in
the third.
The parameters `x`, `y`, and `z` are converted to arrays only if they
are tuples or a lists, otherwise they are treated as a scalars. In
either case, either `x`, `y`, and `z` or their elements must support
multiplication and addition both with themselves and with the elements
of `c`.
If `c` has fewer than three dimensions, ones are implicitly appended to
its shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape + y.shape + z.shape.
Parameters
----------
x, y, z : array_like, compatible objects
The three dimensional series is evaluated at the points in the
Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a
list or tuple, it is first converted to an ndarray, otherwise it is
left unchanged and, if it isn't an ndarray, it is treated as a
scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
lagval, lagval2d, laggrid2d, lagval3d
Notes
-----
.. versionadded:: 1.7.0 | Evaluate a 3-D Laguerre series on the Cartesian product of x, y, and z. | [
"Evaluate",
"a",
"3",
"-",
"D",
"Laguerre",
"series",
"on",
"the",
"Cartesian",
"product",
"of",
"x",
"y",
"and",
"z",
"."
] | def laggrid3d(x, y, z, c):
"""
Evaluate a 3-D Laguerre series on the Cartesian product of x, y, and z.
This function returns the values:
.. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * L_i(a) * L_j(b) * L_k(c)
where the points `(a, b, c)` consist of all triples formed by taking
`a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form
a grid with `x` in the first dimension, `y` in the second, and `z` in
the third.
The parameters `x`, `y`, and `z` are converted to arrays only if they
are tuples or a lists, otherwise they are treated as a scalars. In
either case, either `x`, `y`, and `z` or their elements must support
multiplication and addition both with themselves and with the elements
of `c`.
If `c` has fewer than three dimensions, ones are implicitly appended to
its shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape + y.shape + z.shape.
Parameters
----------
x, y, z : array_like, compatible objects
The three dimensional series is evaluated at the points in the
Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a
list or tuple, it is first converted to an ndarray, otherwise it is
left unchanged and, if it isn't an ndarray, it is treated as a
scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
lagval, lagval2d, laggrid2d, lagval3d
Notes
-----
.. versionadded:: 1.7.0
"""
c = lagval(x, c)
c = lagval(y, c)
c = lagval(z, c)
return c | [
"def",
"laggrid3d",
"(",
"x",
",",
"y",
",",
"z",
",",
"c",
")",
":",
"c",
"=",
"lagval",
"(",
"x",
",",
"c",
")",
"c",
"=",
"lagval",
"(",
"y",
",",
"c",
")",
"c",
"=",
"lagval",
"(",
"z",
",",
"c",
")",
"return",
"c"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/numpy/py2/numpy/polynomial/laguerre.py#L1122-L1178 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemFramework/v1/ResourceManager/resource_manager/view.py | python | ViewContext.__output_table | (self, items, specs, sort_column_count=1, indent=False, first_sort_column=0) | Displays a table containing data from items formatted as defined by specs.
items is an array of dict. The properties shown are determined by specs.
specs is an array of dict with the following properties:
Field -- Identifies a property in an item. Required.
Heading -- The heading that is displayed. Required.
Default -- A default value for the property. Defaults to ''
Formatter -- A function that is called to format the property value or the default value.
Hidden -- If present and True, the column is not displayed.
HideWhenEmpty -- If present and True, the column is not displayed if there are no values.
The columns are arranged in the order of the specs. The column widths are automatically determined.
The items are sorted in ascending order by the formatted value of the first n columns, where n
is specified by the sort_column_count parameter (which defaults to 1, causing the the table to
be sorted by the first column only). | Displays a table containing data from items formatted as defined by specs. | [
"Displays",
"a",
"table",
"containing",
"data",
"from",
"items",
"formatted",
"as",
"defined",
"by",
"specs",
"."
] | def __output_table(self, items, specs, sort_column_count=1, indent=False, first_sort_column=0):
""" Displays a table containing data from items formatted as defined by specs.
items is an array of dict. The properties shown are determined by specs.
specs is an array of dict with the following properties:
Field -- Identifies a property in an item. Required.
Heading -- The heading that is displayed. Required.
Default -- A default value for the property. Defaults to ''
Formatter -- A function that is called to format the property value or the default value.
Hidden -- If present and True, the column is not displayed.
HideWhenEmpty -- If present and True, the column is not displayed if there are no values.
The columns are arranged in the order of the specs. The column widths are automatically determined.
The items are sorted in ascending order by the formatted value of the first n columns, where n
is specified by the sort_column_count parameter (which defaults to 1, causing the the table to
be sorted by the first column only).
"""
def default_formatter(v):
return str(v) if v is not None else ''
def get_formatted_value(item, spec):
field = spec['Field']
formatter = spec.get('Formatter', default_formatter)
default = spec.get('Default', None)
return formatter(item.get(field, default))
# For simplicity we generate the formatted value multiple times. If this
# ends up being used to display large tables this may need to be changed.
# We sort working up to the first column and python guarantees that a
# stable sort is used, so things work out how we want.
for sort_column in range((sort_column_count + first_sort_column) - 1, first_sort_column - 1, -1):
items = sorted(items, key=lambda item: get_formatted_value(item, specs[sort_column]))
# determine width of each column
lengths = {}
for item in items:
for spec in specs:
field = spec['Field']
lengths[field] = max(lengths.get(field, 0), len(get_formatted_value(item, spec)))
def is_hidden(spec):
return spec.get('Hidden', False) or (spec.get('HideWhenEmpty', False) and lengths.get(spec['Field'], 0) == 0)
specs = [spec for spec in specs if not is_hidden(spec)]
for spec in specs:
field = spec['Field']
lengths[field] = max(lengths.get(field, 0), len(spec['Heading']))
# determine the prefix for each line
if indent:
prefix = ' '
else:
prefix = ''
# show the headings
heading = '\n'
heading += prefix
for spec in specs:
heading += '{0:{1}} '.format(spec['Heading'], lengths[spec['Field']])
self._output_message(heading)
# show a dividing line under the headings
divider = prefix
for spec in specs:
divider += ('-' * lengths[spec['Field']]) + ' '
self._output_message(divider)
# show the items
for item in items:
line = prefix
for spec in specs:
formatted_value = get_formatted_value(item, spec)
line += '{0:{1}} '.format(formatted_value, lengths[spec['Field']])
self._output_message(line) | [
"def",
"__output_table",
"(",
"self",
",",
"items",
",",
"specs",
",",
"sort_column_count",
"=",
"1",
",",
"indent",
"=",
"False",
",",
"first_sort_column",
"=",
"0",
")",
":",
"def",
"default_formatter",
"(",
"v",
")",
":",
"return",
"str",
"(",
"v",
")",
"if",
"v",
"is",
"not",
"None",
"else",
"''",
"def",
"get_formatted_value",
"(",
"item",
",",
"spec",
")",
":",
"field",
"=",
"spec",
"[",
"'Field'",
"]",
"formatter",
"=",
"spec",
".",
"get",
"(",
"'Formatter'",
",",
"default_formatter",
")",
"default",
"=",
"spec",
".",
"get",
"(",
"'Default'",
",",
"None",
")",
"return",
"formatter",
"(",
"item",
".",
"get",
"(",
"field",
",",
"default",
")",
")",
"# For simplicity we generate the formatted value multiple times. If this",
"# ends up being used to display large tables this may need to be changed.",
"# We sort working up to the first column and python guarantees that a",
"# stable sort is used, so things work out how we want.",
"for",
"sort_column",
"in",
"range",
"(",
"(",
"sort_column_count",
"+",
"first_sort_column",
")",
"-",
"1",
",",
"first_sort_column",
"-",
"1",
",",
"-",
"1",
")",
":",
"items",
"=",
"sorted",
"(",
"items",
",",
"key",
"=",
"lambda",
"item",
":",
"get_formatted_value",
"(",
"item",
",",
"specs",
"[",
"sort_column",
"]",
")",
")",
"# determine width of each column",
"lengths",
"=",
"{",
"}",
"for",
"item",
"in",
"items",
":",
"for",
"spec",
"in",
"specs",
":",
"field",
"=",
"spec",
"[",
"'Field'",
"]",
"lengths",
"[",
"field",
"]",
"=",
"max",
"(",
"lengths",
".",
"get",
"(",
"field",
",",
"0",
")",
",",
"len",
"(",
"get_formatted_value",
"(",
"item",
",",
"spec",
")",
")",
")",
"def",
"is_hidden",
"(",
"spec",
")",
":",
"return",
"spec",
".",
"get",
"(",
"'Hidden'",
",",
"False",
")",
"or",
"(",
"spec",
".",
"get",
"(",
"'HideWhenEmpty'",
",",
"False",
")",
"and",
"lengths",
".",
"get",
"(",
"spec",
"[",
"'Field'",
"]",
",",
"0",
")",
"==",
"0",
")",
"specs",
"=",
"[",
"spec",
"for",
"spec",
"in",
"specs",
"if",
"not",
"is_hidden",
"(",
"spec",
")",
"]",
"for",
"spec",
"in",
"specs",
":",
"field",
"=",
"spec",
"[",
"'Field'",
"]",
"lengths",
"[",
"field",
"]",
"=",
"max",
"(",
"lengths",
".",
"get",
"(",
"field",
",",
"0",
")",
",",
"len",
"(",
"spec",
"[",
"'Heading'",
"]",
")",
")",
"# determine the prefix for each line",
"if",
"indent",
":",
"prefix",
"=",
"' '",
"else",
":",
"prefix",
"=",
"''",
"# show the headings",
"heading",
"=",
"'\\n'",
"heading",
"+=",
"prefix",
"for",
"spec",
"in",
"specs",
":",
"heading",
"+=",
"'{0:{1}} '",
".",
"format",
"(",
"spec",
"[",
"'Heading'",
"]",
",",
"lengths",
"[",
"spec",
"[",
"'Field'",
"]",
"]",
")",
"self",
".",
"_output_message",
"(",
"heading",
")",
"# show a dividing line under the headings",
"divider",
"=",
"prefix",
"for",
"spec",
"in",
"specs",
":",
"divider",
"+=",
"(",
"'-'",
"*",
"lengths",
"[",
"spec",
"[",
"'Field'",
"]",
"]",
")",
"+",
"' '",
"self",
".",
"_output_message",
"(",
"divider",
")",
"# show the items",
"for",
"item",
"in",
"items",
":",
"line",
"=",
"prefix",
"for",
"spec",
"in",
"specs",
":",
"formatted_value",
"=",
"get_formatted_value",
"(",
"item",
",",
"spec",
")",
"line",
"+=",
"'{0:{1}} '",
".",
"format",
"(",
"formatted_value",
",",
"lengths",
"[",
"spec",
"[",
"'Field'",
"]",
"]",
")",
"self",
".",
"_output_message",
"(",
"line",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemFramework/v1/ResourceManager/resource_manager/view.py#L766-L851 |
||
ArduPilot/ardupilot | 6e684b3496122b8158ac412b609d00004b7ac306 | libraries/AP_HAL_ChibiOS/hwdef/scripts/chibios_hwdef.py | python | get_extra_bylabel | (label, name, default=None) | return p.extra_value(name, type=str, default=default) | get extra setting for a label by name | get extra setting for a label by name | [
"get",
"extra",
"setting",
"for",
"a",
"label",
"by",
"name"
] | def get_extra_bylabel(label, name, default=None):
'''get extra setting for a label by name'''
p = bylabel.get(label)
if p is None:
return default
return p.extra_value(name, type=str, default=default) | [
"def",
"get_extra_bylabel",
"(",
"label",
",",
"name",
",",
"default",
"=",
"None",
")",
":",
"p",
"=",
"bylabel",
".",
"get",
"(",
"label",
")",
"if",
"p",
"is",
"None",
":",
"return",
"default",
"return",
"p",
".",
"extra_value",
"(",
"name",
",",
"type",
"=",
"str",
",",
"default",
"=",
"default",
")"
] | https://github.com/ArduPilot/ardupilot/blob/6e684b3496122b8158ac412b609d00004b7ac306/libraries/AP_HAL_ChibiOS/hwdef/scripts/chibios_hwdef.py#L1510-L1515 |
|
klzgrad/naiveproxy | ed2c513637c77b18721fe428d7ed395b4d284c83 | src/build/android/method_count.py | python | DexStatsCollector.GetTotalCounts | (self) | return ret | Returns dict of {metric -> count}, where |count| is sum(metric). | Returns dict of {metric -> count}, where |count| is sum(metric). | [
"Returns",
"dict",
"of",
"{",
"metric",
"-",
">",
"count",
"}",
"where",
"|count|",
"is",
"sum",
"(",
"metric",
")",
"."
] | def GetTotalCounts(self):
"""Returns dict of {metric -> count}, where |count| is sum(metric)."""
ret = {}
for metric in ('fields', 'methods', 'strings', 'types'):
ret[metric] = sum(x[metric] for x in self._counts_by_label.values())
return ret | [
"def",
"GetTotalCounts",
"(",
"self",
")",
":",
"ret",
"=",
"{",
"}",
"for",
"metric",
"in",
"(",
"'fields'",
",",
"'methods'",
",",
"'strings'",
",",
"'types'",
")",
":",
"ret",
"[",
"metric",
"]",
"=",
"sum",
"(",
"x",
"[",
"metric",
"]",
"for",
"x",
"in",
"self",
".",
"_counts_by_label",
".",
"values",
"(",
")",
")",
"return",
"ret"
] | https://github.com/klzgrad/naiveproxy/blob/ed2c513637c77b18721fe428d7ed395b4d284c83/src/build/android/method_count.py#L67-L72 |
|
cms-sw/cmssw | fd9de012d503d3405420bcbeec0ec879baa57cf2 | Validation/RecoTrack/python/plotting/ntupleDataFormat.py | python | _HitObject.ntracks | (self) | return getattr(self._tree, self._prefix+"_trkIdx")[self._index].size() | Returns number of tracks containing this hit. | Returns number of tracks containing this hit. | [
"Returns",
"number",
"of",
"tracks",
"containing",
"this",
"hit",
"."
] | def ntracks(self):
"""Returns number of tracks containing this hit."""
self._checkIsValid()
return getattr(self._tree, self._prefix+"_trkIdx")[self._index].size() | [
"def",
"ntracks",
"(",
"self",
")",
":",
"self",
".",
"_checkIsValid",
"(",
")",
"return",
"getattr",
"(",
"self",
".",
"_tree",
",",
"self",
".",
"_prefix",
"+",
"\"_trkIdx\"",
")",
"[",
"self",
".",
"_index",
"]",
".",
"size",
"(",
")"
] | https://github.com/cms-sw/cmssw/blob/fd9de012d503d3405420bcbeec0ec879baa57cf2/Validation/RecoTrack/python/plotting/ntupleDataFormat.py#L169-L172 |
|
facebook/ThreatExchange | 31914a51820c73c8a0daffe62ccca29a6e3d359e | api-reference-examples/python/pytx/pytx/malware.py | python | Malware.zfh | (self) | return zfh | Return a file handle of the base64-decoded sample in a zip file. | Return a file handle of the base64-decoded sample in a zip file. | [
"Return",
"a",
"file",
"handle",
"of",
"the",
"base64",
"-",
"decoded",
"sample",
"in",
"a",
"zip",
"file",
"."
] | def zfh(self):
"""
Return a file handle of the base64-decoded sample in a zip file.
"""
if self.get(m.SAMPLE) is None:
self.details()
zfh = io.BytesIO()
zfh.write(base64.b64decode(self.get(m.SAMPLE)))
zfh.seek(0)
return zfh | [
"def",
"zfh",
"(",
"self",
")",
":",
"if",
"self",
".",
"get",
"(",
"m",
".",
"SAMPLE",
")",
"is",
"None",
":",
"self",
".",
"details",
"(",
")",
"zfh",
"=",
"io",
".",
"BytesIO",
"(",
")",
"zfh",
".",
"write",
"(",
"base64",
".",
"b64decode",
"(",
"self",
".",
"get",
"(",
"m",
".",
"SAMPLE",
")",
")",
")",
"zfh",
".",
"seek",
"(",
"0",
")",
"return",
"zfh"
] | https://github.com/facebook/ThreatExchange/blob/31914a51820c73c8a0daffe62ccca29a6e3d359e/api-reference-examples/python/pytx/pytx/malware.py#L84-L94 |
|
miyosuda/TensorFlowAndroidDemo | 35903e0221aa5f109ea2dbef27f20b52e317f42d | jni-build/jni/include/tensorflow/contrib/learn/python/learn/graph_actions.py | python | train | (graph,
output_dir,
train_op,
loss_op,
global_step_tensor=None,
init_op=None,
init_feed_dict=None,
init_fn=None,
log_every_steps=10,
supervisor_is_chief=True,
supervisor_master='',
supervisor_save_model_secs=600,
keep_checkpoint_max=5,
supervisor_save_summaries_steps=100,
feed_fn=None,
steps=None,
fail_on_nan_loss=True,
monitors=None,
max_steps=None) | Train a model.
Given `graph`, a directory to write outputs to (`output_dir`), and some ops,
run a training loop. The given `train_op` performs one step of training on the
model. The `loss_op` represents the objective function of the training. It is
expected to increment the `global_step_tensor`, a scalar integer tensor
counting training steps. This function uses `Supervisor` to initialize the
graph (from a checkpoint if one is available in `output_dir`), write summaries
defined in the graph, and write regular checkpoints as defined by
`supervisor_save_model_secs`.
Training continues until `global_step_tensor` evaluates to `max_steps`, or, if
`fail_on_nan_loss`, until `loss_op` evaluates to `NaN`. In that case the
program is terminated with exit code 1.
Args:
graph: A graph to train. It is expected that this graph is not in use
elsewhere.
output_dir: A directory to write outputs to.
train_op: An op that performs one training step when run.
loss_op: A scalar loss tensor.
global_step_tensor: A tensor representing the global step. If none is given,
one is extracted from the graph using the same logic as in `Supervisor`.
init_op: An op that initializes the graph. If `None`, use `Supervisor`'s
default.
init_feed_dict: A dictionary that maps `Tensor` objects to feed values.
This feed dictionary will be used when `init_op` is evaluated.
init_fn: Optional callable passed to Supervisor to initialize the model.
log_every_steps: Output logs regularly. The logs contain timing data and the
current loss.
supervisor_is_chief: Whether the current process is the chief supervisor in
charge of restoring the model and running standard services.
supervisor_master: The master string to use when preparing the session.
supervisor_save_model_secs: Save a checkpoint every
`supervisor_save_model_secs` seconds when training.
keep_checkpoint_max: The maximum number of recent checkpoint files to
keep. As new files are created, older files are deleted. If None or 0,
all checkpoint files are kept. This is simply passed as the max_to_keep
arg to tf.Saver constructor.
supervisor_save_summaries_steps: Save summaries every
`supervisor_save_summaries_steps` seconds when training.
feed_fn: A function that is called every iteration to produce a `feed_dict`
passed to `session.run` calls. Optional.
steps: Trains for this many steps (e.g. current global step + `steps`).
fail_on_nan_loss: If true, raise `NanLossDuringTrainingError` if `loss_op`
evaluates to `NaN`. If false, continue training as if nothing happened.
monitors: List of `BaseMonitor` subclass instances. Used for callbacks
inside the training loop.
max_steps: Number of total steps for which to train model. If `None`,
train forever. Two calls fit(steps=100) means 200 training iterations.
On the other hand two calls of fit(max_steps=100) means, second call
will not do any iteration since first call did all 100 steps.
Returns:
The final loss value.
Raises:
ValueError: If `output_dir`, `train_op`, `loss_op`, or `global_step_tensor`
is not provided. See `tf.contrib.framework.get_global_step` for how we
look up the latter if not provided explicitly.
NanLossDuringTrainingError: If `fail_on_nan_loss` is `True`, and loss ever
evaluates to `NaN`.
ValueError: If both `steps` and `max_steps` are not `None`. | Train a model. | [
"Train",
"a",
"model",
"."
] | def train(graph,
output_dir,
train_op,
loss_op,
global_step_tensor=None,
init_op=None,
init_feed_dict=None,
init_fn=None,
log_every_steps=10,
supervisor_is_chief=True,
supervisor_master='',
supervisor_save_model_secs=600,
keep_checkpoint_max=5,
supervisor_save_summaries_steps=100,
feed_fn=None,
steps=None,
fail_on_nan_loss=True,
monitors=None,
max_steps=None):
"""Train a model.
Given `graph`, a directory to write outputs to (`output_dir`), and some ops,
run a training loop. The given `train_op` performs one step of training on the
model. The `loss_op` represents the objective function of the training. It is
expected to increment the `global_step_tensor`, a scalar integer tensor
counting training steps. This function uses `Supervisor` to initialize the
graph (from a checkpoint if one is available in `output_dir`), write summaries
defined in the graph, and write regular checkpoints as defined by
`supervisor_save_model_secs`.
Training continues until `global_step_tensor` evaluates to `max_steps`, or, if
`fail_on_nan_loss`, until `loss_op` evaluates to `NaN`. In that case the
program is terminated with exit code 1.
Args:
graph: A graph to train. It is expected that this graph is not in use
elsewhere.
output_dir: A directory to write outputs to.
train_op: An op that performs one training step when run.
loss_op: A scalar loss tensor.
global_step_tensor: A tensor representing the global step. If none is given,
one is extracted from the graph using the same logic as in `Supervisor`.
init_op: An op that initializes the graph. If `None`, use `Supervisor`'s
default.
init_feed_dict: A dictionary that maps `Tensor` objects to feed values.
This feed dictionary will be used when `init_op` is evaluated.
init_fn: Optional callable passed to Supervisor to initialize the model.
log_every_steps: Output logs regularly. The logs contain timing data and the
current loss.
supervisor_is_chief: Whether the current process is the chief supervisor in
charge of restoring the model and running standard services.
supervisor_master: The master string to use when preparing the session.
supervisor_save_model_secs: Save a checkpoint every
`supervisor_save_model_secs` seconds when training.
keep_checkpoint_max: The maximum number of recent checkpoint files to
keep. As new files are created, older files are deleted. If None or 0,
all checkpoint files are kept. This is simply passed as the max_to_keep
arg to tf.Saver constructor.
supervisor_save_summaries_steps: Save summaries every
`supervisor_save_summaries_steps` seconds when training.
feed_fn: A function that is called every iteration to produce a `feed_dict`
passed to `session.run` calls. Optional.
steps: Trains for this many steps (e.g. current global step + `steps`).
fail_on_nan_loss: If true, raise `NanLossDuringTrainingError` if `loss_op`
evaluates to `NaN`. If false, continue training as if nothing happened.
monitors: List of `BaseMonitor` subclass instances. Used for callbacks
inside the training loop.
max_steps: Number of total steps for which to train model. If `None`,
train forever. Two calls fit(steps=100) means 200 training iterations.
On the other hand two calls of fit(max_steps=100) means, second call
will not do any iteration since first call did all 100 steps.
Returns:
The final loss value.
Raises:
ValueError: If `output_dir`, `train_op`, `loss_op`, or `global_step_tensor`
is not provided. See `tf.contrib.framework.get_global_step` for how we
look up the latter if not provided explicitly.
NanLossDuringTrainingError: If `fail_on_nan_loss` is `True`, and loss ever
evaluates to `NaN`.
ValueError: If both `steps` and `max_steps` are not `None`.
"""
while True:
try:
return _train_internal(graph,
output_dir,
train_op,
loss_op,
global_step_tensor,
init_op,
init_feed_dict,
init_fn,
log_every_steps,
supervisor_is_chief,
supervisor_master,
supervisor_save_model_secs,
keep_checkpoint_max,
supervisor_save_summaries_steps,
feed_fn,
steps,
fail_on_nan_loss,
monitors,
max_steps)
except errors.AbortedError:
# Happens when PS restarts, keep training.
logging.warning('Training got Aborted error. Keep training.') | [
"def",
"train",
"(",
"graph",
",",
"output_dir",
",",
"train_op",
",",
"loss_op",
",",
"global_step_tensor",
"=",
"None",
",",
"init_op",
"=",
"None",
",",
"init_feed_dict",
"=",
"None",
",",
"init_fn",
"=",
"None",
",",
"log_every_steps",
"=",
"10",
",",
"supervisor_is_chief",
"=",
"True",
",",
"supervisor_master",
"=",
"''",
",",
"supervisor_save_model_secs",
"=",
"600",
",",
"keep_checkpoint_max",
"=",
"5",
",",
"supervisor_save_summaries_steps",
"=",
"100",
",",
"feed_fn",
"=",
"None",
",",
"steps",
"=",
"None",
",",
"fail_on_nan_loss",
"=",
"True",
",",
"monitors",
"=",
"None",
",",
"max_steps",
"=",
"None",
")",
":",
"while",
"True",
":",
"try",
":",
"return",
"_train_internal",
"(",
"graph",
",",
"output_dir",
",",
"train_op",
",",
"loss_op",
",",
"global_step_tensor",
",",
"init_op",
",",
"init_feed_dict",
",",
"init_fn",
",",
"log_every_steps",
",",
"supervisor_is_chief",
",",
"supervisor_master",
",",
"supervisor_save_model_secs",
",",
"keep_checkpoint_max",
",",
"supervisor_save_summaries_steps",
",",
"feed_fn",
",",
"steps",
",",
"fail_on_nan_loss",
",",
"monitors",
",",
"max_steps",
")",
"except",
"errors",
".",
"AbortedError",
":",
"# Happens when PS restarts, keep training.",
"logging",
".",
"warning",
"(",
"'Training got Aborted error. Keep training.'",
")"
] | https://github.com/miyosuda/TensorFlowAndroidDemo/blob/35903e0221aa5f109ea2dbef27f20b52e317f42d/jni-build/jni/include/tensorflow/contrib/learn/python/learn/graph_actions.py#L286-L392 |
||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/windows/Lib/site-packages/pkg_resources/__init__.py | python | _by_version_descending | (names) | return sorted(names, key=_by_version, reverse=True) | Given a list of filenames, return them in descending order
by version number.
>>> names = 'bar', 'foo', 'Python-2.7.10.egg', 'Python-2.7.2.egg'
>>> _by_version_descending(names)
['Python-2.7.10.egg', 'Python-2.7.2.egg', 'foo', 'bar']
>>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.egg'
>>> _by_version_descending(names)
['Setuptools-1.2.3.egg', 'Setuptools-1.2.3b1.egg']
>>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.post1.egg'
>>> _by_version_descending(names)
['Setuptools-1.2.3.post1.egg', 'Setuptools-1.2.3b1.egg'] | Given a list of filenames, return them in descending order
by version number. | [
"Given",
"a",
"list",
"of",
"filenames",
"return",
"them",
"in",
"descending",
"order",
"by",
"version",
"number",
"."
] | def _by_version_descending(names):
"""
Given a list of filenames, return them in descending order
by version number.
>>> names = 'bar', 'foo', 'Python-2.7.10.egg', 'Python-2.7.2.egg'
>>> _by_version_descending(names)
['Python-2.7.10.egg', 'Python-2.7.2.egg', 'foo', 'bar']
>>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.egg'
>>> _by_version_descending(names)
['Setuptools-1.2.3.egg', 'Setuptools-1.2.3b1.egg']
>>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.post1.egg'
>>> _by_version_descending(names)
['Setuptools-1.2.3.post1.egg', 'Setuptools-1.2.3b1.egg']
"""
def _by_version(name):
"""
Parse each component of the filename
"""
name, ext = os.path.splitext(name)
parts = itertools.chain(name.split('-'), [ext])
return [packaging.version.parse(part) for part in parts]
return sorted(names, key=_by_version, reverse=True) | [
"def",
"_by_version_descending",
"(",
"names",
")",
":",
"def",
"_by_version",
"(",
"name",
")",
":",
"\"\"\"\n Parse each component of the filename\n \"\"\"",
"name",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"name",
")",
"parts",
"=",
"itertools",
".",
"chain",
"(",
"name",
".",
"split",
"(",
"'-'",
")",
",",
"[",
"ext",
"]",
")",
"return",
"[",
"packaging",
".",
"version",
".",
"parse",
"(",
"part",
")",
"for",
"part",
"in",
"parts",
"]",
"return",
"sorted",
"(",
"names",
",",
"key",
"=",
"_by_version",
",",
"reverse",
"=",
"True",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/site-packages/pkg_resources/__init__.py#L2022-L2045 |
|
windystrife/UnrealEngine_NVIDIAGameWorks | b50e6338a7c5b26374d66306ebc7807541ff815e | Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Tools/pynche/pyColorChooser.py | python | askcolor | (color = None, **options) | return _chooser.show(color, options) | Ask for a color | Ask for a color | [
"Ask",
"for",
"a",
"color"
] | def askcolor(color = None, **options):
"""Ask for a color"""
global _chooser
if not _chooser:
_chooser = apply(Chooser, (), options)
return _chooser.show(color, options) | [
"def",
"askcolor",
"(",
"color",
"=",
"None",
",",
"*",
"*",
"options",
")",
":",
"global",
"_chooser",
"if",
"not",
"_chooser",
":",
"_chooser",
"=",
"apply",
"(",
"Chooser",
",",
"(",
")",
",",
"options",
")",
"return",
"_chooser",
".",
"show",
"(",
"color",
",",
"options",
")"
] | https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Tools/pynche/pyColorChooser.py#L80-L85 |
|
stellar-deprecated/stellard | 67eabb2217bdfa9a6ea317f62338fb6bca458c90 | src/protobuf/python/google/protobuf/service.py | python | RpcController.ErrorText | (self) | If Failed is true, returns a human-readable description of the error. | If Failed is true, returns a human-readable description of the error. | [
"If",
"Failed",
"is",
"true",
"returns",
"a",
"human",
"-",
"readable",
"description",
"of",
"the",
"error",
"."
] | def ErrorText(self):
"""If Failed is true, returns a human-readable description of the error."""
raise NotImplementedError | [
"def",
"ErrorText",
"(",
"self",
")",
":",
"raise",
"NotImplementedError"
] | https://github.com/stellar-deprecated/stellard/blob/67eabb2217bdfa9a6ea317f62338fb6bca458c90/src/protobuf/python/google/protobuf/service.py#L150-L152 |
||
trilinos/Trilinos | 6168be6dd51e35e1cd681e9c4b24433e709df140 | packages/seacas/scripts/exodus2.in.py | python | exodus.put_node_set_dist_fact | (self, id, nodeSetDistFact) | exo.put_node_set_dist_fact(node_set_id, ns_dist_facts)
-> store the list of distribution factors for nodes in a node set
input value(s):
<int> node_set_id node set *ID* (not *INDEX*)
<list<float>> ns_dist_facts a list of distribution factors,
e.g. nodal 'weights' | exo.put_node_set_dist_fact(node_set_id, ns_dist_facts) | [
"exo",
".",
"put_node_set_dist_fact",
"(",
"node_set_id",
"ns_dist_facts",
")"
] | def put_node_set_dist_fact(self, id, nodeSetDistFact):
"""
exo.put_node_set_dist_fact(node_set_id, ns_dist_facts)
-> store the list of distribution factors for nodes in a node set
input value(s):
<int> node_set_id node set *ID* (not *INDEX*)
<list<float>> ns_dist_facts a list of distribution factors,
e.g. nodal 'weights'
"""
self.__ex_put_node_set_dist_fact(id, nodeSetDistFact) | [
"def",
"put_node_set_dist_fact",
"(",
"self",
",",
"id",
",",
"nodeSetDistFact",
")",
":",
"self",
".",
"__ex_put_node_set_dist_fact",
"(",
"id",
",",
"nodeSetDistFact",
")"
] | https://github.com/trilinos/Trilinos/blob/6168be6dd51e35e1cd681e9c4b24433e709df140/packages/seacas/scripts/exodus2.in.py#L2133-L2144 |
||
adobe/chromium | cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7 | third_party/closure_linter/closure_linter/checkerbase.py | python | CheckerBase._ExecutePass | (self, token, pass_function, parse_error=None,
debug_tokens=False) | return True | Calls the given function for every token in the given token stream.
As each token is passed to the given function, state is kept up to date and,
depending on the error_trace flag, errors are either caught and reported, or
allowed to bubble up so developers can see the full stack trace. If a parse
error is specified, the pass will proceed as normal until the token causing
the parse error is reached.
Args:
token: The first token in the token stream.
pass_function: The function to call for each token in the token stream.
parse_error: A ParseError if any errors occurred.
debug_tokens: Whether every token should be printed as it is encountered
during the pass.
Returns:
A boolean indicating whether the full token stream could be checked or if
checking failed prematurely.
Raises:
Exception: If any error occurred while calling the given function. | Calls the given function for every token in the given token stream. | [
"Calls",
"the",
"given",
"function",
"for",
"every",
"token",
"in",
"the",
"given",
"token",
"stream",
"."
] | def _ExecutePass(self, token, pass_function, parse_error=None,
debug_tokens=False):
"""Calls the given function for every token in the given token stream.
As each token is passed to the given function, state is kept up to date and,
depending on the error_trace flag, errors are either caught and reported, or
allowed to bubble up so developers can see the full stack trace. If a parse
error is specified, the pass will proceed as normal until the token causing
the parse error is reached.
Args:
token: The first token in the token stream.
pass_function: The function to call for each token in the token stream.
parse_error: A ParseError if any errors occurred.
debug_tokens: Whether every token should be printed as it is encountered
during the pass.
Returns:
A boolean indicating whether the full token stream could be checked or if
checking failed prematurely.
Raises:
Exception: If any error occurred while calling the given function.
"""
self._state_tracker.Reset()
while token:
if debug_tokens:
print token
if parse_error and parse_error.token == token:
message = ('Error parsing file at token "%s". Unable to '
'check the rest of file.' % token.string)
self.HandleError(errors.FILE_DOES_NOT_PARSE, message, token)
self._error_handler.FinishFile()
return
try:
self._state_tracker.HandleToken(
token, self._state_tracker.GetLastNonSpaceToken())
pass_function(token)
self._state_tracker.HandleAfterToken(token)
except:
if FLAGS.error_trace:
raise
else:
self.HandleError(errors.FILE_DOES_NOT_PARSE,
('Error parsing file at token "%s". Unable to '
'check the rest of file.' % token.string),
token)
self._error_handler.FinishFile()
return False
token = token.next
return True | [
"def",
"_ExecutePass",
"(",
"self",
",",
"token",
",",
"pass_function",
",",
"parse_error",
"=",
"None",
",",
"debug_tokens",
"=",
"False",
")",
":",
"self",
".",
"_state_tracker",
".",
"Reset",
"(",
")",
"while",
"token",
":",
"if",
"debug_tokens",
":",
"print",
"token",
"if",
"parse_error",
"and",
"parse_error",
".",
"token",
"==",
"token",
":",
"message",
"=",
"(",
"'Error parsing file at token \"%s\". Unable to '",
"'check the rest of file.'",
"%",
"token",
".",
"string",
")",
"self",
".",
"HandleError",
"(",
"errors",
".",
"FILE_DOES_NOT_PARSE",
",",
"message",
",",
"token",
")",
"self",
".",
"_error_handler",
".",
"FinishFile",
"(",
")",
"return",
"try",
":",
"self",
".",
"_state_tracker",
".",
"HandleToken",
"(",
"token",
",",
"self",
".",
"_state_tracker",
".",
"GetLastNonSpaceToken",
"(",
")",
")",
"pass_function",
"(",
"token",
")",
"self",
".",
"_state_tracker",
".",
"HandleAfterToken",
"(",
"token",
")",
"except",
":",
"if",
"FLAGS",
".",
"error_trace",
":",
"raise",
"else",
":",
"self",
".",
"HandleError",
"(",
"errors",
".",
"FILE_DOES_NOT_PARSE",
",",
"(",
"'Error parsing file at token \"%s\". Unable to '",
"'check the rest of file.'",
"%",
"token",
".",
"string",
")",
",",
"token",
")",
"self",
".",
"_error_handler",
".",
"FinishFile",
"(",
")",
"return",
"False",
"token",
"=",
"token",
".",
"next",
"return",
"True"
] | https://github.com/adobe/chromium/blob/cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7/third_party/closure_linter/closure_linter/checkerbase.py#L260-L312 |
|
rsummers11/CADLab | 976ed959a0b5208bb4173127a7ef732ac73a9b6f | MULAN_universal_lesion_analysis/maskrcnn/modeling/roi_heads/box_head/loss.py | python | FastRCNNLossComputation.__init__ | (self, proposal_matcher, fg_bg_sampler, box_coder) | Arguments:
proposal_matcher (Matcher)
fg_bg_sampler (BalancedPositiveNegativeSampler)
box_coder (BoxCoder) | Arguments:
proposal_matcher (Matcher)
fg_bg_sampler (BalancedPositiveNegativeSampler)
box_coder (BoxCoder) | [
"Arguments",
":",
"proposal_matcher",
"(",
"Matcher",
")",
"fg_bg_sampler",
"(",
"BalancedPositiveNegativeSampler",
")",
"box_coder",
"(",
"BoxCoder",
")"
] | def __init__(self, proposal_matcher, fg_bg_sampler, box_coder):
"""
Arguments:
proposal_matcher (Matcher)
fg_bg_sampler (BalancedPositiveNegativeSampler)
box_coder (BoxCoder)
"""
self.proposal_matcher = proposal_matcher
self.fg_bg_sampler = fg_bg_sampler
self.box_coder = box_coder | [
"def",
"__init__",
"(",
"self",
",",
"proposal_matcher",
",",
"fg_bg_sampler",
",",
"box_coder",
")",
":",
"self",
".",
"proposal_matcher",
"=",
"proposal_matcher",
"self",
".",
"fg_bg_sampler",
"=",
"fg_bg_sampler",
"self",
".",
"box_coder",
"=",
"box_coder"
] | https://github.com/rsummers11/CADLab/blob/976ed959a0b5208bb4173127a7ef732ac73a9b6f/MULAN_universal_lesion_analysis/maskrcnn/modeling/roi_heads/box_head/loss.py#L22-L31 |
||
Tom94/practical-path-guiding | fcf01afb436184e8a74bf300aa89f69b03ab25a2 | visualizer/nanogui/docs/exhale.py | python | ExhaleRoot.reparentClassLike | (self) | Helper method for :func:`exhale.ExhaleRoot.reparentAll`. Iterates over the
``self.class_like`` list and adds each object as a child to a namespace if the
class, or struct is a member of that namespace. Many classes / structs will be
reparented to a namespace node, these will remain in ``self.class_like``.
However, if a class or struct is reparented to a different class or struct (it
is a nested class / struct), it *will* be removed from so that the class view
hierarchy is generated correctly. | Helper method for :func:`exhale.ExhaleRoot.reparentAll`. Iterates over the
``self.class_like`` list and adds each object as a child to a namespace if the
class, or struct is a member of that namespace. Many classes / structs will be
reparented to a namespace node, these will remain in ``self.class_like``.
However, if a class or struct is reparented to a different class or struct (it
is a nested class / struct), it *will* be removed from so that the class view
hierarchy is generated correctly. | [
"Helper",
"method",
"for",
":",
"func",
":",
"exhale",
".",
"ExhaleRoot",
".",
"reparentAll",
".",
"Iterates",
"over",
"the",
"self",
".",
"class_like",
"list",
"and",
"adds",
"each",
"object",
"as",
"a",
"child",
"to",
"a",
"namespace",
"if",
"the",
"class",
"or",
"struct",
"is",
"a",
"member",
"of",
"that",
"namespace",
".",
"Many",
"classes",
"/",
"structs",
"will",
"be",
"reparented",
"to",
"a",
"namespace",
"node",
"these",
"will",
"remain",
"in",
"self",
".",
"class_like",
".",
"However",
"if",
"a",
"class",
"or",
"struct",
"is",
"reparented",
"to",
"a",
"different",
"class",
"or",
"struct",
"(",
"it",
"is",
"a",
"nested",
"class",
"/",
"struct",
")",
"it",
"*",
"will",
"*",
"be",
"removed",
"from",
"so",
"that",
"the",
"class",
"view",
"hierarchy",
"is",
"generated",
"correctly",
"."
] | def reparentClassLike(self):
'''
Helper method for :func:`exhale.ExhaleRoot.reparentAll`. Iterates over the
``self.class_like`` list and adds each object as a child to a namespace if the
class, or struct is a member of that namespace. Many classes / structs will be
reparented to a namespace node, these will remain in ``self.class_like``.
However, if a class or struct is reparented to a different class or struct (it
is a nested class / struct), it *will* be removed from so that the class view
hierarchy is generated correctly.
'''
removals = []
for cl in self.class_like:
parts = cl.name.split("::")
if len(parts) > 1:
# first try and reparent to namespaces
namespace_name = "::".join(parts[:-1])
parent_found = False
for n in self.namespaces:
if n.name == namespace_name:
n.children.append(cl)
cl.parent = n
parent_found = True
break
# if a namespace parent wasn not found, try and reparent to a class
if not parent_found:
# parent class name would be namespace_name
for p_cls in self.class_like:
if p_cls.name == namespace_name:
p_cls.children.append(cl)
cl.parent = p_cls
removals.append(cl)
break
for rm in removals:
if rm in self.class_like:
self.class_like.remove(rm) | [
"def",
"reparentClassLike",
"(",
"self",
")",
":",
"removals",
"=",
"[",
"]",
"for",
"cl",
"in",
"self",
".",
"class_like",
":",
"parts",
"=",
"cl",
".",
"name",
".",
"split",
"(",
"\"::\"",
")",
"if",
"len",
"(",
"parts",
")",
">",
"1",
":",
"# first try and reparent to namespaces",
"namespace_name",
"=",
"\"::\"",
".",
"join",
"(",
"parts",
"[",
":",
"-",
"1",
"]",
")",
"parent_found",
"=",
"False",
"for",
"n",
"in",
"self",
".",
"namespaces",
":",
"if",
"n",
".",
"name",
"==",
"namespace_name",
":",
"n",
".",
"children",
".",
"append",
"(",
"cl",
")",
"cl",
".",
"parent",
"=",
"n",
"parent_found",
"=",
"True",
"break",
"# if a namespace parent wasn not found, try and reparent to a class",
"if",
"not",
"parent_found",
":",
"# parent class name would be namespace_name",
"for",
"p_cls",
"in",
"self",
".",
"class_like",
":",
"if",
"p_cls",
".",
"name",
"==",
"namespace_name",
":",
"p_cls",
".",
"children",
".",
"append",
"(",
"cl",
")",
"cl",
".",
"parent",
"=",
"p_cls",
"removals",
".",
"append",
"(",
"cl",
")",
"break",
"for",
"rm",
"in",
"removals",
":",
"if",
"rm",
"in",
"self",
".",
"class_like",
":",
"self",
".",
"class_like",
".",
"remove",
"(",
"rm",
")"
] | https://github.com/Tom94/practical-path-guiding/blob/fcf01afb436184e8a74bf300aa89f69b03ab25a2/visualizer/nanogui/docs/exhale.py#L1653-L1689 |
||
google/earthenterprise | 0fe84e29be470cd857e3a0e52e5d0afd5bb8cee9 | earth_enterprise/src/fusion/portableglobe/cutter/cgi-bin/common/utils.py | python | OutputFile | (file_name, replace_params) | Outputs a file to standard out with the globe name replaced. | Outputs a file to standard out with the globe name replaced. | [
"Outputs",
"a",
"file",
"to",
"standard",
"out",
"with",
"the",
"globe",
"name",
"replaced",
"."
] | def OutputFile(file_name, replace_params):
"""Outputs a file to standard out with the globe name replaced."""
fp = open(file_name)
text = fp.read()
fp.close()
print ReplaceParams(text, replace_params) | [
"def",
"OutputFile",
"(",
"file_name",
",",
"replace_params",
")",
":",
"fp",
"=",
"open",
"(",
"file_name",
")",
"text",
"=",
"fp",
".",
"read",
"(",
")",
"fp",
".",
"close",
"(",
")",
"print",
"ReplaceParams",
"(",
"text",
",",
"replace_params",
")"
] | https://github.com/google/earthenterprise/blob/0fe84e29be470cd857e3a0e52e5d0afd5bb8cee9/earth_enterprise/src/fusion/portableglobe/cutter/cgi-bin/common/utils.py#L180-L185 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/msw/grid.py | python | GridTableBase.AppendCols | (*args, **kwargs) | return _grid.GridTableBase_AppendCols(*args, **kwargs) | AppendCols(self, size_t numCols=1) -> bool | AppendCols(self, size_t numCols=1) -> bool | [
"AppendCols",
"(",
"self",
"size_t",
"numCols",
"=",
"1",
")",
"-",
">",
"bool"
] | def AppendCols(*args, **kwargs):
"""AppendCols(self, size_t numCols=1) -> bool"""
return _grid.GridTableBase_AppendCols(*args, **kwargs) | [
"def",
"AppendCols",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_grid",
".",
"GridTableBase_AppendCols",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/grid.py#L878-L880 |
|
makefile/frcnn | 8d9b9ebf8be8315ba2f374d460121b0adf1df29c | scripts/cpp_lint.py | python | Search | (pattern, s) | return _regexp_compile_cache[pattern].search(s) | Searches the string for the pattern, caching the compiled regexp. | Searches the string for the pattern, caching the compiled regexp. | [
"Searches",
"the",
"string",
"for",
"the",
"pattern",
"caching",
"the",
"compiled",
"regexp",
"."
] | def Search(pattern, s):
"""Searches the string for the pattern, caching the compiled regexp."""
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].search(s) | [
"def",
"Search",
"(",
"pattern",
",",
"s",
")",
":",
"if",
"pattern",
"not",
"in",
"_regexp_compile_cache",
":",
"_regexp_compile_cache",
"[",
"pattern",
"]",
"=",
"sre_compile",
".",
"compile",
"(",
"pattern",
")",
"return",
"_regexp_compile_cache",
"[",
"pattern",
"]",
".",
"search",
"(",
"s",
")"
] | https://github.com/makefile/frcnn/blob/8d9b9ebf8be8315ba2f374d460121b0adf1df29c/scripts/cpp_lint.py#L543-L547 |
|
ApolloAuto/apollo-platform | 86d9dc6743b496ead18d597748ebabd34a513289 | ros/third_party/lib_x86_64/python2.7/dist-packages/numpy/oldnumeric/ma.py | python | power | (a, b, third=None) | a**b | a**b | [
"a",
"**",
"b"
] | def power (a, b, third=None):
"a**b"
if third is not None:
raise MAError("3-argument power not supported.")
ma = getmask(a)
mb = getmask(b)
m = mask_or(ma, mb)
fa = filled(a, 1)
fb = filled(b, 1)
if fb.dtype.char in typecodes["Integer"]:
return masked_array(umath.power(fa, fb), m)
md = make_mask(umath.less(fa, 0), flag=1)
m = mask_or(m, md)
if m is nomask:
return masked_array(umath.power(fa, fb))
else:
fa = numeric.where(m, 1, fa)
return masked_array(umath.power(fa, fb), m) | [
"def",
"power",
"(",
"a",
",",
"b",
",",
"third",
"=",
"None",
")",
":",
"if",
"third",
"is",
"not",
"None",
":",
"raise",
"MAError",
"(",
"\"3-argument power not supported.\"",
")",
"ma",
"=",
"getmask",
"(",
"a",
")",
"mb",
"=",
"getmask",
"(",
"b",
")",
"m",
"=",
"mask_or",
"(",
"ma",
",",
"mb",
")",
"fa",
"=",
"filled",
"(",
"a",
",",
"1",
")",
"fb",
"=",
"filled",
"(",
"b",
",",
"1",
")",
"if",
"fb",
".",
"dtype",
".",
"char",
"in",
"typecodes",
"[",
"\"Integer\"",
"]",
":",
"return",
"masked_array",
"(",
"umath",
".",
"power",
"(",
"fa",
",",
"fb",
")",
",",
"m",
")",
"md",
"=",
"make_mask",
"(",
"umath",
".",
"less",
"(",
"fa",
",",
"0",
")",
",",
"flag",
"=",
"1",
")",
"m",
"=",
"mask_or",
"(",
"m",
",",
"md",
")",
"if",
"m",
"is",
"nomask",
":",
"return",
"masked_array",
"(",
"umath",
".",
"power",
"(",
"fa",
",",
"fb",
")",
")",
"else",
":",
"fa",
"=",
"numeric",
".",
"where",
"(",
"m",
",",
"1",
",",
"fa",
")",
"return",
"masked_array",
"(",
"umath",
".",
"power",
"(",
"fa",
",",
"fb",
")",
",",
"m",
")"
] | https://github.com/ApolloAuto/apollo-platform/blob/86d9dc6743b496ead18d597748ebabd34a513289/ros/third_party/lib_x86_64/python2.7/dist-packages/numpy/oldnumeric/ma.py#L1591-L1608 |
||
tensorflow/tensorflow | 419e3a6b650ea4bd1b0cba23c4348f8a69f3272e | tensorflow/python/keras/saving/save.py | python | save_model | (model,
filepath,
overwrite=True,
include_optimizer=True,
save_format=None,
signatures=None,
options=None,
save_traces=True) | Saves a model as a TensorFlow SavedModel or HDF5 file.
See the [Serialization and Saving guide](https://keras.io/guides/serialization_and_saving/)
for details.
Usage:
>>> model = tf.keras.Sequential([
... tf.keras.layers.Dense(5, input_shape=(3,)),
... tf.keras.layers.Softmax()])
>>> model.save('/tmp/model')
>>> loaded_model = tf.keras.models.load_model('/tmp/model')
>>> x = tf.random.uniform((10, 3))
>>> assert np.allclose(model.predict(x), loaded_model.predict(x))
The SavedModel and HDF5 file contains:
- the model's configuration (topology)
- the model's weights
- the model's optimizer's state (if any)
Thus models can be reinstantiated in the exact same state, without any of the
code used for model definition or training.
Note that the model weights may have different scoped names after being
loaded. Scoped names include the model/layer names, such as
`"dense_1/kernel:0"`. It is recommended that you use the layer properties to
access specific variables, e.g. `model.get_layer("dense_1").kernel`.
__SavedModel serialization format__
Keras SavedModel uses `tf.saved_model.save` to save the model and all
trackable objects attached to the model (e.g. layers and variables). The model
config, weights, and optimizer are saved in the SavedModel. Additionally, for
every Keras layer attached to the model, the SavedModel stores:
* the config and metadata -- e.g. name, dtype, trainable status
* traced call and loss functions, which are stored as TensorFlow subgraphs.
The traced functions allow the SavedModel format to save and load custom
layers without the original class definition.
You can choose to not save the traced functions by disabling the `save_traces`
option. This will decrease the time it takes to save the model and the
amount of disk space occupied by the output SavedModel. If you enable this
option, then you _must_ provide all custom class definitions when loading
the model. See the `custom_objects` argument in `tf.keras.models.load_model`.
Args:
model: Keras model instance to be saved.
filepath: One of the following:
- String or `pathlib.Path` object, path where to save the model
- `h5py.File` object where to save the model
overwrite: Whether we should overwrite any existing model at the target
location, or instead ask the user with a manual prompt.
include_optimizer: If True, save optimizer's state together.
save_format: Either 'tf' or 'h5', indicating whether to save the model
to Tensorflow SavedModel or HDF5. Defaults to 'tf' in TF 2.X, and 'h5'
in TF 1.X.
signatures: Signatures to save with the SavedModel. Applicable to the 'tf'
format only. Please see the `signatures` argument in
`tf.saved_model.save` for details.
options: (only applies to SavedModel format) `tf.saved_model.SaveOptions`
object that specifies options for saving to SavedModel.
save_traces: (only applies to SavedModel format) When enabled, the
SavedModel will store the function traces for each layer. This
can be disabled, so that only the configs of each layer are stored.
Defaults to `True`. Disabling this will decrease serialization time and
reduce file size, but it requires that all custom layers/models
implement a `get_config()` method.
Raises:
ImportError: If save format is hdf5, and h5py is not available. | Saves a model as a TensorFlow SavedModel or HDF5 file. | [
"Saves",
"a",
"model",
"as",
"a",
"TensorFlow",
"SavedModel",
"or",
"HDF5",
"file",
"."
] | def save_model(model,
filepath,
overwrite=True,
include_optimizer=True,
save_format=None,
signatures=None,
options=None,
save_traces=True):
# pylint: disable=line-too-long
"""Saves a model as a TensorFlow SavedModel or HDF5 file.
See the [Serialization and Saving guide](https://keras.io/guides/serialization_and_saving/)
for details.
Usage:
>>> model = tf.keras.Sequential([
... tf.keras.layers.Dense(5, input_shape=(3,)),
... tf.keras.layers.Softmax()])
>>> model.save('/tmp/model')
>>> loaded_model = tf.keras.models.load_model('/tmp/model')
>>> x = tf.random.uniform((10, 3))
>>> assert np.allclose(model.predict(x), loaded_model.predict(x))
The SavedModel and HDF5 file contains:
- the model's configuration (topology)
- the model's weights
- the model's optimizer's state (if any)
Thus models can be reinstantiated in the exact same state, without any of the
code used for model definition or training.
Note that the model weights may have different scoped names after being
loaded. Scoped names include the model/layer names, such as
`"dense_1/kernel:0"`. It is recommended that you use the layer properties to
access specific variables, e.g. `model.get_layer("dense_1").kernel`.
__SavedModel serialization format__
Keras SavedModel uses `tf.saved_model.save` to save the model and all
trackable objects attached to the model (e.g. layers and variables). The model
config, weights, and optimizer are saved in the SavedModel. Additionally, for
every Keras layer attached to the model, the SavedModel stores:
* the config and metadata -- e.g. name, dtype, trainable status
* traced call and loss functions, which are stored as TensorFlow subgraphs.
The traced functions allow the SavedModel format to save and load custom
layers without the original class definition.
You can choose to not save the traced functions by disabling the `save_traces`
option. This will decrease the time it takes to save the model and the
amount of disk space occupied by the output SavedModel. If you enable this
option, then you _must_ provide all custom class definitions when loading
the model. See the `custom_objects` argument in `tf.keras.models.load_model`.
Args:
model: Keras model instance to be saved.
filepath: One of the following:
- String or `pathlib.Path` object, path where to save the model
- `h5py.File` object where to save the model
overwrite: Whether we should overwrite any existing model at the target
location, or instead ask the user with a manual prompt.
include_optimizer: If True, save optimizer's state together.
save_format: Either 'tf' or 'h5', indicating whether to save the model
to Tensorflow SavedModel or HDF5. Defaults to 'tf' in TF 2.X, and 'h5'
in TF 1.X.
signatures: Signatures to save with the SavedModel. Applicable to the 'tf'
format only. Please see the `signatures` argument in
`tf.saved_model.save` for details.
options: (only applies to SavedModel format) `tf.saved_model.SaveOptions`
object that specifies options for saving to SavedModel.
save_traces: (only applies to SavedModel format) When enabled, the
SavedModel will store the function traces for each layer. This
can be disabled, so that only the configs of each layer are stored.
Defaults to `True`. Disabling this will decrease serialization time and
reduce file size, but it requires that all custom layers/models
implement a `get_config()` method.
Raises:
ImportError: If save format is hdf5, and h5py is not available.
"""
# pylint: enable=line-too-long
from tensorflow.python.keras.engine import sequential # pylint: disable=g-import-not-at-top
default_format = 'tf' if tf2.enabled() else 'h5'
save_format = save_format or default_format
filepath = path_to_string(filepath)
# If the user has not already called fit or built the underlying metrics, we
# should do that before saving to ensure the metric names have all
# appropriate name transformations applied.
saving_utils.try_build_compiled_arguments(model)
if (save_format == 'h5' or
(h5py is not None and isinstance(filepath, h5py.File)) or
saving_utils.is_hdf5_filepath(filepath)):
# TODO(b/130258301): add utility method for detecting model type.
if (not model._is_graph_network and # pylint:disable=protected-access
not isinstance(model, sequential.Sequential)):
raise NotImplementedError(
'Saving the model to HDF5 format requires the model to be a '
'Functional model or a Sequential model. It does not work for '
'subclassed models, because such models are defined via the body of '
'a Python method, which isn\'t safely serializable. Consider saving '
'to the Tensorflow SavedModel format (by setting save_format="tf") '
'or using `save_weights`.')
hdf5_format.save_model_to_hdf5(
model, filepath, overwrite, include_optimizer)
else:
with generic_utils.SharedObjectSavingScope():
saved_model_save.save(model, filepath, overwrite, include_optimizer,
signatures, options, save_traces) | [
"def",
"save_model",
"(",
"model",
",",
"filepath",
",",
"overwrite",
"=",
"True",
",",
"include_optimizer",
"=",
"True",
",",
"save_format",
"=",
"None",
",",
"signatures",
"=",
"None",
",",
"options",
"=",
"None",
",",
"save_traces",
"=",
"True",
")",
":",
"# pylint: disable=line-too-long",
"# pylint: enable=line-too-long",
"from",
"tensorflow",
".",
"python",
".",
"keras",
".",
"engine",
"import",
"sequential",
"# pylint: disable=g-import-not-at-top",
"default_format",
"=",
"'tf'",
"if",
"tf2",
".",
"enabled",
"(",
")",
"else",
"'h5'",
"save_format",
"=",
"save_format",
"or",
"default_format",
"filepath",
"=",
"path_to_string",
"(",
"filepath",
")",
"# If the user has not already called fit or built the underlying metrics, we",
"# should do that before saving to ensure the metric names have all",
"# appropriate name transformations applied.",
"saving_utils",
".",
"try_build_compiled_arguments",
"(",
"model",
")",
"if",
"(",
"save_format",
"==",
"'h5'",
"or",
"(",
"h5py",
"is",
"not",
"None",
"and",
"isinstance",
"(",
"filepath",
",",
"h5py",
".",
"File",
")",
")",
"or",
"saving_utils",
".",
"is_hdf5_filepath",
"(",
"filepath",
")",
")",
":",
"# TODO(b/130258301): add utility method for detecting model type.",
"if",
"(",
"not",
"model",
".",
"_is_graph_network",
"and",
"# pylint:disable=protected-access",
"not",
"isinstance",
"(",
"model",
",",
"sequential",
".",
"Sequential",
")",
")",
":",
"raise",
"NotImplementedError",
"(",
"'Saving the model to HDF5 format requires the model to be a '",
"'Functional model or a Sequential model. It does not work for '",
"'subclassed models, because such models are defined via the body of '",
"'a Python method, which isn\\'t safely serializable. Consider saving '",
"'to the Tensorflow SavedModel format (by setting save_format=\"tf\") '",
"'or using `save_weights`.'",
")",
"hdf5_format",
".",
"save_model_to_hdf5",
"(",
"model",
",",
"filepath",
",",
"overwrite",
",",
"include_optimizer",
")",
"else",
":",
"with",
"generic_utils",
".",
"SharedObjectSavingScope",
"(",
")",
":",
"saved_model_save",
".",
"save",
"(",
"model",
",",
"filepath",
",",
"overwrite",
",",
"include_optimizer",
",",
"signatures",
",",
"options",
",",
"save_traces",
")"
] | https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/keras/saving/save.py#L37-L151 |
||
msftguy/ssh-rd | a5f3a79daeac5844edebf01916c9613563f1c390 | _3rd/boost_1_48_0/tools/build/v2/util/utility.py | python | get_value | (property) | return replace_grist (property, '') | Gets the value of a property, that is, the part following the grist, if any. | Gets the value of a property, that is, the part following the grist, if any. | [
"Gets",
"the",
"value",
"of",
"a",
"property",
"that",
"is",
"the",
"part",
"following",
"the",
"grist",
"if",
"any",
"."
] | def get_value (property):
""" Gets the value of a property, that is, the part following the grist, if any.
"""
return replace_grist (property, '') | [
"def",
"get_value",
"(",
"property",
")",
":",
"return",
"replace_grist",
"(",
"property",
",",
"''",
")"
] | https://github.com/msftguy/ssh-rd/blob/a5f3a79daeac5844edebf01916c9613563f1c390/_3rd/boost_1_48_0/tools/build/v2/util/utility.py#L71-L74 |
|
francinexue/xuefu | b6ff79747a42e020588c0c0a921048e08fe4680c | api/ctpx/ctptd.py | python | CtpTd.onRspQryParkedOrderAction | (self, ParkedOrderActionField, RspInfoField, requestId, final) | 请求查询预埋撤单响应 | 请求查询预埋撤单响应 | [
"请求查询预埋撤单响应"
] | def onRspQryParkedOrderAction(self, ParkedOrderActionField, RspInfoField, requestId, final):
"""请求查询预埋撤单响应"""
pass | [
"def",
"onRspQryParkedOrderAction",
"(",
"self",
",",
"ParkedOrderActionField",
",",
"RspInfoField",
",",
"requestId",
",",
"final",
")",
":",
"pass"
] | https://github.com/francinexue/xuefu/blob/b6ff79747a42e020588c0c0a921048e08fe4680c/api/ctpx/ctptd.py#L439-L441 |
||
adobe/chromium | cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7 | gpu/command_buffer/build_gles2_cmd_buffer.py | python | ImmediatePointerArgument.WriteValidationCode | (self, file, func) | Overridden from Argument. | Overridden from Argument. | [
"Overridden",
"from",
"Argument",
"."
] | def WriteValidationCode(self, file, func):
"""Overridden from Argument."""
file.Write(" if (%s == NULL) {\n" % self.name)
file.Write(" return error::kOutOfBounds;\n")
file.Write(" }\n") | [
"def",
"WriteValidationCode",
"(",
"self",
",",
"file",
",",
"func",
")",
":",
"file",
".",
"Write",
"(",
"\" if (%s == NULL) {\\n\"",
"%",
"self",
".",
"name",
")",
"file",
".",
"Write",
"(",
"\" return error::kOutOfBounds;\\n\"",
")",
"file",
".",
"Write",
"(",
"\" }\\n\"",
")"
] | https://github.com/adobe/chromium/blob/cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7/gpu/command_buffer/build_gles2_cmd_buffer.py#L4862-L4866 |
||
wlanjie/AndroidFFmpeg | 7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf | tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/lib-tk/Tkinter.py | python | Tk.readprofile | (self, baseName, className) | Internal function. It reads BASENAME.tcl and CLASSNAME.tcl into
the Tcl Interpreter and calls execfile on BASENAME.py and CLASSNAME.py if
such a file exists in the home directory. | Internal function. It reads BASENAME.tcl and CLASSNAME.tcl into
the Tcl Interpreter and calls execfile on BASENAME.py and CLASSNAME.py if
such a file exists in the home directory. | [
"Internal",
"function",
".",
"It",
"reads",
"BASENAME",
".",
"tcl",
"and",
"CLASSNAME",
".",
"tcl",
"into",
"the",
"Tcl",
"Interpreter",
"and",
"calls",
"execfile",
"on",
"BASENAME",
".",
"py",
"and",
"CLASSNAME",
".",
"py",
"if",
"such",
"a",
"file",
"exists",
"in",
"the",
"home",
"directory",
"."
] | def readprofile(self, baseName, className):
"""Internal function. It reads BASENAME.tcl and CLASSNAME.tcl into
the Tcl Interpreter and calls execfile on BASENAME.py and CLASSNAME.py if
such a file exists in the home directory."""
import os
if 'HOME' in os.environ: home = os.environ['HOME']
else: home = os.curdir
class_tcl = os.path.join(home, '.%s.tcl' % className)
class_py = os.path.join(home, '.%s.py' % className)
base_tcl = os.path.join(home, '.%s.tcl' % baseName)
base_py = os.path.join(home, '.%s.py' % baseName)
dir = {'self': self}
exec 'from Tkinter import *' in dir
if os.path.isfile(class_tcl):
self.tk.call('source', class_tcl)
if os.path.isfile(class_py):
execfile(class_py, dir)
if os.path.isfile(base_tcl):
self.tk.call('source', base_tcl)
if os.path.isfile(base_py):
execfile(base_py, dir) | [
"def",
"readprofile",
"(",
"self",
",",
"baseName",
",",
"className",
")",
":",
"import",
"os",
"if",
"'HOME'",
"in",
"os",
".",
"environ",
":",
"home",
"=",
"os",
".",
"environ",
"[",
"'HOME'",
"]",
"else",
":",
"home",
"=",
"os",
".",
"curdir",
"class_tcl",
"=",
"os",
".",
"path",
".",
"join",
"(",
"home",
",",
"'.%s.tcl'",
"%",
"className",
")",
"class_py",
"=",
"os",
".",
"path",
".",
"join",
"(",
"home",
",",
"'.%s.py'",
"%",
"className",
")",
"base_tcl",
"=",
"os",
".",
"path",
".",
"join",
"(",
"home",
",",
"'.%s.tcl'",
"%",
"baseName",
")",
"base_py",
"=",
"os",
".",
"path",
".",
"join",
"(",
"home",
",",
"'.%s.py'",
"%",
"baseName",
")",
"dir",
"=",
"{",
"'self'",
":",
"self",
"}",
"exec",
"'from Tkinter import *'",
"in",
"dir",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"class_tcl",
")",
":",
"self",
".",
"tk",
".",
"call",
"(",
"'source'",
",",
"class_tcl",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"class_py",
")",
":",
"execfile",
"(",
"class_py",
",",
"dir",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"base_tcl",
")",
":",
"self",
".",
"tk",
".",
"call",
"(",
"'source'",
",",
"base_tcl",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"base_py",
")",
":",
"execfile",
"(",
"base_py",
",",
"dir",
")"
] | https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/lib-tk/Tkinter.py#L1795-L1815 |
||
mantidproject/mantid | 03deeb89254ec4289edb8771e0188c2090a02f32 | qt/python/mantidqtinterfaces/mantidqtinterfaces/Muon/GUI/Common/fitting_widgets/basic_fitting/fit_function_options_view.py | python | FitFunctionOptionsView.set_slot_for_plot_guess_start_x_updated | (self, slot) | Connect the slot for the start x option. | Connect the slot for the start x option. | [
"Connect",
"the",
"slot",
"for",
"the",
"start",
"x",
"option",
"."
] | def set_slot_for_plot_guess_start_x_updated(self, slot) -> None:
"""Connect the slot for the start x option."""
self.plot_guess_start_x_line_edit.editingFinished.connect(slot) | [
"def",
"set_slot_for_plot_guess_start_x_updated",
"(",
"self",
",",
"slot",
")",
"->",
"None",
":",
"self",
".",
"plot_guess_start_x_line_edit",
".",
"editingFinished",
".",
"connect",
"(",
"slot",
")"
] | https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/qt/python/mantidqtinterfaces/mantidqtinterfaces/Muon/GUI/Common/fitting_widgets/basic_fitting/fit_function_options_view.py#L134-L136 |
||
baidu-research/tensorflow-allreduce | 66d5b855e90b0949e9fa5cca5599fd729a70e874 | tensorflow/contrib/layers/python/layers/feature_column.py | python | _SparseColumn.weight_tensor | (self, input_tensor) | return None | Returns the weight tensor from the given transformed input_tensor. | Returns the weight tensor from the given transformed input_tensor. | [
"Returns",
"the",
"weight",
"tensor",
"from",
"the",
"given",
"transformed",
"input_tensor",
"."
] | def weight_tensor(self, input_tensor):
"""Returns the weight tensor from the given transformed input_tensor."""
return None | [
"def",
"weight_tensor",
"(",
"self",
",",
"input_tensor",
")",
":",
"return",
"None"
] | https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/contrib/layers/python/layers/feature_column.py#L414-L416 |
|
htcondor/htcondor | 4829724575176d1d6c936e4693dfd78a728569b0 | src/condor_contrib/condor_pigeon/src/condor_pigeon_client/skype_linux_tools/Skype4Py/skype.py | python | ISkypeEvents.CallTransferStatusChanged | (self, Call, Status) | This event occurs when a call transfer status changes.
@param Call: Call object.
@type Call: L{ICall}
@param Status: New status of the call transfer.
@type Status: L{Call status<enums.clsUnknown>} | This event occurs when a call transfer status changes. | [
"This",
"event",
"occurs",
"when",
"a",
"call",
"transfer",
"status",
"changes",
"."
] | def CallTransferStatusChanged(self, Call, Status):
'''This event occurs when a call transfer status changes.
@param Call: Call object.
@type Call: L{ICall}
@param Status: New status of the call transfer.
@type Status: L{Call status<enums.clsUnknown>}
''' | [
"def",
"CallTransferStatusChanged",
"(",
"self",
",",
"Call",
",",
"Status",
")",
":"
] | https://github.com/htcondor/htcondor/blob/4829724575176d1d6c936e4693dfd78a728569b0/src/condor_contrib/condor_pigeon/src/condor_pigeon_client/skype_linux_tools/Skype4Py/skype.py#L1419-L1426 |
||
mantidproject/mantid | 03deeb89254ec4289edb8771e0188c2090a02f32 | qt/python/mantidqtinterfaces/mantidqtinterfaces/Muon/GUI/Common/fitting_widgets/basic_fitting/basic_fitting_presenter.py | python | BasicFittingPresenter.handle_exclude_range_state_changed | (self) | Handles when Exclude Range is ticked or unticked. | Handles when Exclude Range is ticked or unticked. | [
"Handles",
"when",
"Exclude",
"Range",
"is",
"ticked",
"or",
"unticked",
"."
] | def handle_exclude_range_state_changed(self) -> None:
"""Handles when Exclude Range is ticked or unticked."""
self.model.exclude_range = self.view.exclude_range
self.view.set_exclude_start_and_end_x_visible(self.model.exclude_range) | [
"def",
"handle_exclude_range_state_changed",
"(",
"self",
")",
"->",
"None",
":",
"self",
".",
"model",
".",
"exclude_range",
"=",
"self",
".",
"view",
".",
"exclude_range",
"self",
".",
"view",
".",
"set_exclude_start_and_end_x_visible",
"(",
"self",
".",
"model",
".",
"exclude_range",
")"
] | https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/qt/python/mantidqtinterfaces/mantidqtinterfaces/Muon/GUI/Common/fitting_widgets/basic_fitting/basic_fitting_presenter.py#L322-L325 |
||
mantidproject/mantid | 03deeb89254ec4289edb8771e0188c2090a02f32 | qt/python/mantidqtinterfaces/mantidqtinterfaces/HFIR_4Circle_Reduction/hfctables.py | python | ScanSurveyTable.get_hkl | (self, row_index) | return index_h, index_k, index_l | Get peak index (HKL) from survey table (i.e., SPICE file)
:param row_index:
:return: | Get peak index (HKL) from survey table (i.e., SPICE file)
:param row_index:
:return: | [
"Get",
"peak",
"index",
"(",
"HKL",
")",
"from",
"survey",
"table",
"(",
"i",
".",
"e",
".",
"SPICE",
"file",
")",
":",
"param",
"row_index",
":",
":",
"return",
":"
] | def get_hkl(self, row_index):
"""
Get peak index (HKL) from survey table (i.e., SPICE file)
:param row_index:
:return:
"""
index_h = self.get_cell_value(row_index, self._colIndexH)
index_k = self.get_cell_value(row_index, self._colIndexK)
index_l = self.get_cell_value(row_index, self._colIndexL)
return index_h, index_k, index_l | [
"def",
"get_hkl",
"(",
"self",
",",
"row_index",
")",
":",
"index_h",
"=",
"self",
".",
"get_cell_value",
"(",
"row_index",
",",
"self",
".",
"_colIndexH",
")",
"index_k",
"=",
"self",
".",
"get_cell_value",
"(",
"row_index",
",",
"self",
".",
"_colIndexK",
")",
"index_l",
"=",
"self",
".",
"get_cell_value",
"(",
"row_index",
",",
"self",
".",
"_colIndexL",
")",
"return",
"index_h",
",",
"index_k",
",",
"index_l"
] | https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/qt/python/mantidqtinterfaces/mantidqtinterfaces/HFIR_4Circle_Reduction/hfctables.py#L1229-L1239 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemDefectReporter/v1/AWS/common-code/Lib/requests_oauthlib/oauth2_session.py | python | OAuth2Session.register_compliance_hook | (self, hook_type, hook) | Register a hook for request/response tweaking.
Available hooks are:
access_token_response invoked before token parsing.
refresh_token_response invoked before refresh token parsing.
protected_request invoked before making a request.
If you find a new hook is needed please send a GitHub PR request
or open an issue. | Register a hook for request/response tweaking. | [
"Register",
"a",
"hook",
"for",
"request",
"/",
"response",
"tweaking",
"."
] | def register_compliance_hook(self, hook_type, hook):
"""Register a hook for request/response tweaking.
Available hooks are:
access_token_response invoked before token parsing.
refresh_token_response invoked before refresh token parsing.
protected_request invoked before making a request.
If you find a new hook is needed please send a GitHub PR request
or open an issue.
"""
if hook_type not in self.compliance_hook:
raise ValueError('Hook type %s is not in %s.',
hook_type, self.compliance_hook)
self.compliance_hook[hook_type].add(hook) | [
"def",
"register_compliance_hook",
"(",
"self",
",",
"hook_type",
",",
"hook",
")",
":",
"if",
"hook_type",
"not",
"in",
"self",
".",
"compliance_hook",
":",
"raise",
"ValueError",
"(",
"'Hook type %s is not in %s.'",
",",
"hook_type",
",",
"self",
".",
"compliance_hook",
")",
"self",
".",
"compliance_hook",
"[",
"hook_type",
"]",
".",
"add",
"(",
"hook",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemDefectReporter/v1/AWS/common-code/Lib/requests_oauthlib/oauth2_session.py#L362-L376 |
||
ChromiumWebApps/chromium | c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7 | third_party/markdown/extensions/abbr.py | python | AbbrExtension.extendMarkdown | (self, md, md_globals) | Insert AbbrPreprocessor before ReferencePreprocessor. | Insert AbbrPreprocessor before ReferencePreprocessor. | [
"Insert",
"AbbrPreprocessor",
"before",
"ReferencePreprocessor",
"."
] | def extendMarkdown(self, md, md_globals):
""" Insert AbbrPreprocessor before ReferencePreprocessor. """
md.preprocessors.add('abbr', AbbrPreprocessor(md), '<reference') | [
"def",
"extendMarkdown",
"(",
"self",
",",
"md",
",",
"md_globals",
")",
":",
"md",
".",
"preprocessors",
".",
"add",
"(",
"'abbr'",
",",
"AbbrPreprocessor",
"(",
"md",
")",
",",
"'<reference'",
")"
] | https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/third_party/markdown/extensions/abbr.py#L72-L74 |
||
krishauser/Klampt | 972cc83ea5befac3f653c1ba20f80155768ad519 | Python/klampt/src/robotsim.py | python | IKObjective.setAxialRotConstraint | (self, alocal: "double const [3]", aworld: "double const [3]") | return _robotsim.IKObjective_setAxialRotConstraint(self, alocal, aworld) | r"""
setAxialRotConstraint(IKObjective self, double const [3] alocal, double const [3] aworld)
Manual: Sets an axial rotation constraint. | r"""
setAxialRotConstraint(IKObjective self, double const [3] alocal, double const [3] aworld) | [
"r",
"setAxialRotConstraint",
"(",
"IKObjective",
"self",
"double",
"const",
"[",
"3",
"]",
"alocal",
"double",
"const",
"[",
"3",
"]",
"aworld",
")"
] | def setAxialRotConstraint(self, alocal: "double const [3]", aworld: "double const [3]") -> "void":
r"""
setAxialRotConstraint(IKObjective self, double const [3] alocal, double const [3] aworld)
Manual: Sets an axial rotation constraint.
"""
return _robotsim.IKObjective_setAxialRotConstraint(self, alocal, aworld) | [
"def",
"setAxialRotConstraint",
"(",
"self",
",",
"alocal",
":",
"\"double const [3]\"",
",",
"aworld",
":",
"\"double const [3]\"",
")",
"->",
"\"void\"",
":",
"return",
"_robotsim",
".",
"IKObjective_setAxialRotConstraint",
"(",
"self",
",",
"alocal",
",",
"aworld",
")"
] | https://github.com/krishauser/Klampt/blob/972cc83ea5befac3f653c1ba20f80155768ad519/Python/klampt/src/robotsim.py#L6477-L6485 |
|
hanpfei/chromium-net | 392cc1fa3a8f92f42e4071ab6e674d8e0482f83f | tools/perf/metrics/network.py | python | NetworkMetric.Stop | (self, _, tab) | Prepare the results for this page.
The results are the differences between the current values
and the values when Start() was called. | Prepare the results for this page. | [
"Prepare",
"the",
"results",
"for",
"this",
"page",
"."
] | def Stop(self, _, tab):
"""Prepare the results for this page.
The results are the differences between the current values
and the values when Start() was called.
"""
if not self._platform.CanMonitorNetworkData():
return
data = self._platform.GetNetworkData(self._browser)
if data is not None:
snd, rcv = data
if self._network_snd is not None:
self._network_snd = snd - self._network_snd
if self._network_rcv is not None:
self._network_rcv = rcv - self._network_rcv
else: # If end data cannot be found, report none.
self._network_snd = None
self._network_rcv = None | [
"def",
"Stop",
"(",
"self",
",",
"_",
",",
"tab",
")",
":",
"if",
"not",
"self",
".",
"_platform",
".",
"CanMonitorNetworkData",
"(",
")",
":",
"return",
"data",
"=",
"self",
".",
"_platform",
".",
"GetNetworkData",
"(",
"self",
".",
"_browser",
")",
"if",
"data",
"is",
"not",
"None",
":",
"snd",
",",
"rcv",
"=",
"data",
"if",
"self",
".",
"_network_snd",
"is",
"not",
"None",
":",
"self",
".",
"_network_snd",
"=",
"snd",
"-",
"self",
".",
"_network_snd",
"if",
"self",
".",
"_network_rcv",
"is",
"not",
"None",
":",
"self",
".",
"_network_rcv",
"=",
"rcv",
"-",
"self",
".",
"_network_rcv",
"else",
":",
"# If end data cannot be found, report none.",
"self",
".",
"_network_snd",
"=",
"None",
"self",
".",
"_network_rcv",
"=",
"None"
] | https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/tools/perf/metrics/network.py#L38-L56 |
||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/compileall.py | python | compile_path | (skip_curdir=1, maxlevels=0, force=False, quiet=0,
legacy=False, optimize=-1,
invalidation_mode=None) | return success | Byte-compile all module on sys.path.
Arguments (all optional):
skip_curdir: if true, skip current directory (default True)
maxlevels: max recursion level (default 0)
force: as for compile_dir() (default False)
quiet: as for compile_dir() (default 0)
legacy: as for compile_dir() (default False)
optimize: as for compile_dir() (default -1)
invalidation_mode: as for compiler_dir() | Byte-compile all module on sys.path. | [
"Byte",
"-",
"compile",
"all",
"module",
"on",
"sys",
".",
"path",
"."
] | def compile_path(skip_curdir=1, maxlevels=0, force=False, quiet=0,
legacy=False, optimize=-1,
invalidation_mode=None):
"""Byte-compile all module on sys.path.
Arguments (all optional):
skip_curdir: if true, skip current directory (default True)
maxlevels: max recursion level (default 0)
force: as for compile_dir() (default False)
quiet: as for compile_dir() (default 0)
legacy: as for compile_dir() (default False)
optimize: as for compile_dir() (default -1)
invalidation_mode: as for compiler_dir()
"""
success = True
for dir in sys.path:
if (not dir or dir == os.curdir) and skip_curdir:
if quiet < 2:
print('Skipping current directory')
else:
success = success and compile_dir(
dir,
maxlevels,
None,
force,
quiet=quiet,
legacy=legacy,
optimize=optimize,
invalidation_mode=invalidation_mode,
)
return success | [
"def",
"compile_path",
"(",
"skip_curdir",
"=",
"1",
",",
"maxlevels",
"=",
"0",
",",
"force",
"=",
"False",
",",
"quiet",
"=",
"0",
",",
"legacy",
"=",
"False",
",",
"optimize",
"=",
"-",
"1",
",",
"invalidation_mode",
"=",
"None",
")",
":",
"success",
"=",
"True",
"for",
"dir",
"in",
"sys",
".",
"path",
":",
"if",
"(",
"not",
"dir",
"or",
"dir",
"==",
"os",
".",
"curdir",
")",
"and",
"skip_curdir",
":",
"if",
"quiet",
"<",
"2",
":",
"print",
"(",
"'Skipping current directory'",
")",
"else",
":",
"success",
"=",
"success",
"and",
"compile_dir",
"(",
"dir",
",",
"maxlevels",
",",
"None",
",",
"force",
",",
"quiet",
"=",
"quiet",
",",
"legacy",
"=",
"legacy",
",",
"optimize",
"=",
"optimize",
",",
"invalidation_mode",
"=",
"invalidation_mode",
",",
")",
"return",
"success"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/compileall.py#L192-L223 |
|
zju3dv/clean-pvnet | 5870c509e3cc205e1bb28910a7b1a9a3c8add9a8 | lib/utils/vsd/misc.py | python | paste_im | (src, trg, pos) | Pastes src to trg with the top left corner at pos. | Pastes src to trg with the top left corner at pos. | [
"Pastes",
"src",
"to",
"trg",
"with",
"the",
"top",
"left",
"corner",
"at",
"pos",
"."
] | def paste_im(src, trg, pos):
"""
Pastes src to trg with the top left corner at pos.
"""
assert(src.ndim == trg.ndim)
# Size of the region to be pasted
w = min(src.shape[1], trg.shape[1] - pos[0])
h = min(src.shape[0], trg.shape[0] - pos[1])
if src.ndim == 3:
trg[pos[1]:(pos[1] + h), pos[0]:(pos[0] + w), :] = src[:h, :w, :]
else:
trg[pos[1]:(pos[1] + h), pos[0]:(pos[0] + w)] = src[:h, :w] | [
"def",
"paste_im",
"(",
"src",
",",
"trg",
",",
"pos",
")",
":",
"assert",
"(",
"src",
".",
"ndim",
"==",
"trg",
".",
"ndim",
")",
"# Size of the region to be pasted",
"w",
"=",
"min",
"(",
"src",
".",
"shape",
"[",
"1",
"]",
",",
"trg",
".",
"shape",
"[",
"1",
"]",
"-",
"pos",
"[",
"0",
"]",
")",
"h",
"=",
"min",
"(",
"src",
".",
"shape",
"[",
"0",
"]",
",",
"trg",
".",
"shape",
"[",
"0",
"]",
"-",
"pos",
"[",
"1",
"]",
")",
"if",
"src",
".",
"ndim",
"==",
"3",
":",
"trg",
"[",
"pos",
"[",
"1",
"]",
":",
"(",
"pos",
"[",
"1",
"]",
"+",
"h",
")",
",",
"pos",
"[",
"0",
"]",
":",
"(",
"pos",
"[",
"0",
"]",
"+",
"w",
")",
",",
":",
"]",
"=",
"src",
"[",
":",
"h",
",",
":",
"w",
",",
":",
"]",
"else",
":",
"trg",
"[",
"pos",
"[",
"1",
"]",
":",
"(",
"pos",
"[",
"1",
"]",
"+",
"h",
")",
",",
"pos",
"[",
"0",
"]",
":",
"(",
"pos",
"[",
"0",
"]",
"+",
"w",
")",
"]",
"=",
"src",
"[",
":",
"h",
",",
":",
"w",
"]"
] | https://github.com/zju3dv/clean-pvnet/blob/5870c509e3cc205e1bb28910a7b1a9a3c8add9a8/lib/utils/vsd/misc.py#L103-L116 |
||
PaddlePaddle/Paddle | 1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c | python/paddle/utils/cpp_extension/extension_utils.py | python | custom_write_stub | (resource, pyfile) | Customized write_stub function to allow us to inject generated python
api codes into egg python file. | Customized write_stub function to allow us to inject generated python
api codes into egg python file. | [
"Customized",
"write_stub",
"function",
"to",
"allow",
"us",
"to",
"inject",
"generated",
"python",
"api",
"codes",
"into",
"egg",
"python",
"file",
"."
] | def custom_write_stub(resource, pyfile):
"""
Customized write_stub function to allow us to inject generated python
api codes into egg python file.
"""
_stub_template = textwrap.dedent("""
import os
import sys
import types
import paddle
def inject_ext_module(module_name, api_names):
if module_name in sys.modules:
return sys.modules[module_name]
new_module = types.ModuleType(module_name)
for api_name in api_names:
setattr(new_module, api_name, eval(api_name))
return new_module
def __bootstrap__():
cur_dir = os.path.dirname(os.path.abspath(__file__))
so_path = os.path.join(cur_dir, "{resource}")
assert os.path.exists(so_path)
# load custom op shared library with abs path
new_custom_ops = paddle.utils.cpp_extension.load_op_meta_info_and_register_op(so_path)
m = inject_ext_module(__name__, new_custom_ops)
__bootstrap__()
{custom_api}
""").lstrip()
# Parse registerring op information
_, op_info = CustomOpInfo.instance().last()
so_path = op_info.so_path
new_custom_ops = load_op_meta_info_and_register_op(so_path)
assert len(
new_custom_ops
) > 0, "Required at least one custom operators, but received len(custom_op) = %d" % len(
new_custom_ops)
# NOTE: To avoid importing .so file instead of python file because they have same name,
# we rename .so shared library to another name, see EasyInstallCommand.
filename, ext = os.path.splitext(resource)
resource = filename + "_pd_" + ext
api_content = []
for op_name in new_custom_ops:
api_content.append(_custom_api_content(op_name))
with open(pyfile, 'w') as f:
f.write(
_stub_template.format(
resource=resource, custom_api='\n\n'.join(api_content))) | [
"def",
"custom_write_stub",
"(",
"resource",
",",
"pyfile",
")",
":",
"_stub_template",
"=",
"textwrap",
".",
"dedent",
"(",
"\"\"\"\n import os\n import sys\n import types\n import paddle\n \n def inject_ext_module(module_name, api_names):\n if module_name in sys.modules:\n return sys.modules[module_name]\n\n new_module = types.ModuleType(module_name)\n for api_name in api_names:\n setattr(new_module, api_name, eval(api_name))\n\n return new_module\n\n def __bootstrap__():\n cur_dir = os.path.dirname(os.path.abspath(__file__))\n so_path = os.path.join(cur_dir, \"{resource}\")\n\n assert os.path.exists(so_path)\n\n # load custom op shared library with abs path\n new_custom_ops = paddle.utils.cpp_extension.load_op_meta_info_and_register_op(so_path)\n m = inject_ext_module(__name__, new_custom_ops)\n \n __bootstrap__()\n\n {custom_api}\n \"\"\"",
")",
".",
"lstrip",
"(",
")",
"# Parse registerring op information",
"_",
",",
"op_info",
"=",
"CustomOpInfo",
".",
"instance",
"(",
")",
".",
"last",
"(",
")",
"so_path",
"=",
"op_info",
".",
"so_path",
"new_custom_ops",
"=",
"load_op_meta_info_and_register_op",
"(",
"so_path",
")",
"assert",
"len",
"(",
"new_custom_ops",
")",
">",
"0",
",",
"\"Required at least one custom operators, but received len(custom_op) = %d\"",
"%",
"len",
"(",
"new_custom_ops",
")",
"# NOTE: To avoid importing .so file instead of python file because they have same name,",
"# we rename .so shared library to another name, see EasyInstallCommand.",
"filename",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"resource",
")",
"resource",
"=",
"filename",
"+",
"\"_pd_\"",
"+",
"ext",
"api_content",
"=",
"[",
"]",
"for",
"op_name",
"in",
"new_custom_ops",
":",
"api_content",
".",
"append",
"(",
"_custom_api_content",
"(",
"op_name",
")",
")",
"with",
"open",
"(",
"pyfile",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"_stub_template",
".",
"format",
"(",
"resource",
"=",
"resource",
",",
"custom_api",
"=",
"'\\n\\n'",
".",
"join",
"(",
"api_content",
")",
")",
")"
] | https://github.com/PaddlePaddle/Paddle/blob/1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c/python/paddle/utils/cpp_extension/extension_utils.py#L138-L196 |
||
benoitsteiner/tensorflow-opencl | cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5 | tensorflow/python/ops/resource_variable_ops.py | python | ResourceVariable._as_graph_element | (self) | return self._graph_element | Conversion function for Graph.as_graph_element(). | Conversion function for Graph.as_graph_element(). | [
"Conversion",
"function",
"for",
"Graph",
".",
"as_graph_element",
"()",
"."
] | def _as_graph_element(self):
"""Conversion function for Graph.as_graph_element()."""
return self._graph_element | [
"def",
"_as_graph_element",
"(",
"self",
")",
":",
"return",
"self",
".",
"_graph_element"
] | https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/python/ops/resource_variable_ops.py#L533-L535 |
|
ApolloAuto/apollo-platform | 86d9dc6743b496ead18d597748ebabd34a513289 | ros/third_party/lib_x86_64/python2.7/dist-packages/numpy/oldnumeric/ma.py | python | MaskedArray.__idiv__ | (self, other) | return self | Divide self by other in place. | Divide self by other in place. | [
"Divide",
"self",
"by",
"other",
"in",
"place",
"."
] | def __idiv__(self, other):
"Divide self by other in place."
t = self._data.dtype.char
f = filled(other, 0)
t1 = f.dtype.char
if t == t1:
pass
elif t in typecodes['Integer']:
if t1 in typecodes['Integer']:
f = f.astype(t)
else:
raise TypeError('Incorrect type for in-place operation.')
elif t in typecodes['Float']:
if t1 in typecodes['Integer']:
f = f.astype(t)
elif t1 in typecodes['Float']:
f = f.astype(t)
else:
raise TypeError('Incorrect type for in-place operation.')
elif t in typecodes['Complex']:
if t1 in typecodes['Integer']:
f = f.astype(t)
elif t1 in typecodes['Float']:
f = f.astype(t)
elif t1 in typecodes['Complex']:
f = f.astype(t)
else:
raise TypeError('Incorrect type for in-place operation.')
else:
raise TypeError('Incorrect type for in-place operation.')
mo = getmask(other)
result = divide(self, masked_array(f, mask=mo))
self._data = result.data
dm = result.raw_mask()
if dm is not self._mask:
self._mask = dm
self._shared_mask = 1
return self | [
"def",
"__idiv__",
"(",
"self",
",",
"other",
")",
":",
"t",
"=",
"self",
".",
"_data",
".",
"dtype",
".",
"char",
"f",
"=",
"filled",
"(",
"other",
",",
"0",
")",
"t1",
"=",
"f",
".",
"dtype",
".",
"char",
"if",
"t",
"==",
"t1",
":",
"pass",
"elif",
"t",
"in",
"typecodes",
"[",
"'Integer'",
"]",
":",
"if",
"t1",
"in",
"typecodes",
"[",
"'Integer'",
"]",
":",
"f",
"=",
"f",
".",
"astype",
"(",
"t",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'Incorrect type for in-place operation.'",
")",
"elif",
"t",
"in",
"typecodes",
"[",
"'Float'",
"]",
":",
"if",
"t1",
"in",
"typecodes",
"[",
"'Integer'",
"]",
":",
"f",
"=",
"f",
".",
"astype",
"(",
"t",
")",
"elif",
"t1",
"in",
"typecodes",
"[",
"'Float'",
"]",
":",
"f",
"=",
"f",
".",
"astype",
"(",
"t",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'Incorrect type for in-place operation.'",
")",
"elif",
"t",
"in",
"typecodes",
"[",
"'Complex'",
"]",
":",
"if",
"t1",
"in",
"typecodes",
"[",
"'Integer'",
"]",
":",
"f",
"=",
"f",
".",
"astype",
"(",
"t",
")",
"elif",
"t1",
"in",
"typecodes",
"[",
"'Float'",
"]",
":",
"f",
"=",
"f",
".",
"astype",
"(",
"t",
")",
"elif",
"t1",
"in",
"typecodes",
"[",
"'Complex'",
"]",
":",
"f",
"=",
"f",
".",
"astype",
"(",
"t",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'Incorrect type for in-place operation.'",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'Incorrect type for in-place operation.'",
")",
"mo",
"=",
"getmask",
"(",
"other",
")",
"result",
"=",
"divide",
"(",
"self",
",",
"masked_array",
"(",
"f",
",",
"mask",
"=",
"mo",
")",
")",
"self",
".",
"_data",
"=",
"result",
".",
"data",
"dm",
"=",
"result",
".",
"raw_mask",
"(",
")",
"if",
"dm",
"is",
"not",
"self",
".",
"_mask",
":",
"self",
".",
"_mask",
"=",
"dm",
"self",
".",
"_shared_mask",
"=",
"1",
"return",
"self"
] | https://github.com/ApolloAuto/apollo-platform/blob/86d9dc6743b496ead18d597748ebabd34a513289/ros/third_party/lib_x86_64/python2.7/dist-packages/numpy/oldnumeric/ma.py#L1120-L1157 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/mailbox.py | python | _mboxMMDF.get_file | (self, key, from_=False) | return _PartialFile(self._file, self._file.tell(), stop) | Return a file-like representation or raise a KeyError. | Return a file-like representation or raise a KeyError. | [
"Return",
"a",
"file",
"-",
"like",
"representation",
"or",
"raise",
"a",
"KeyError",
"."
] | def get_file(self, key, from_=False):
"""Return a file-like representation or raise a KeyError."""
start, stop = self._lookup(key)
self._file.seek(start)
if not from_:
self._file.readline()
return _PartialFile(self._file, self._file.tell(), stop) | [
"def",
"get_file",
"(",
"self",
",",
"key",
",",
"from_",
"=",
"False",
")",
":",
"start",
",",
"stop",
"=",
"self",
".",
"_lookup",
"(",
"key",
")",
"self",
".",
"_file",
".",
"seek",
"(",
"start",
")",
"if",
"not",
"from_",
":",
"self",
".",
"_file",
".",
"readline",
"(",
")",
"return",
"_PartialFile",
"(",
"self",
".",
"_file",
",",
"self",
".",
"_file",
".",
"tell",
"(",
")",
",",
"stop",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/mailbox.py#L798-L804 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemFramework/v1/AWS/resource-manager-code/lib/pkg_resources/_vendor/pyparsing.py | python | removeQuotes | (s,l,t) | return t[0][1:-1] | Helper parse action for removing quotation marks from parsed quoted strings.
Example::
# by default, quotation marks are included in parsed results
quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]
# use removeQuotes to strip quotation marks from parsed results
quotedString.setParseAction(removeQuotes)
quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"] | Helper parse action for removing quotation marks from parsed quoted strings. | [
"Helper",
"parse",
"action",
"for",
"removing",
"quotation",
"marks",
"from",
"parsed",
"quoted",
"strings",
"."
] | def removeQuotes(s,l,t):
"""
Helper parse action for removing quotation marks from parsed quoted strings.
Example::
# by default, quotation marks are included in parsed results
quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]
# use removeQuotes to strip quotation marks from parsed results
quotedString.setParseAction(removeQuotes)
quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]
"""
return t[0][1:-1] | [
"def",
"removeQuotes",
"(",
"s",
",",
"l",
",",
"t",
")",
":",
"return",
"t",
"[",
"0",
"]",
"[",
"1",
":",
"-",
"1",
"]"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemFramework/v1/AWS/resource-manager-code/lib/pkg_resources/_vendor/pyparsing.py#L4811-L4823 |
|
NeoGeographyToolkit/StereoPipeline | eedf54a919fb5cce1ab0e280bb0df4050763aa11 | src/asp/IceBridge/icebridge_common.py | python | findMatchingLidarFileFromList | (imageFile, lidarFiles) | return bestLidarFile | Find the best matching lidar file from a list. | Find the best matching lidar file from a list. | [
"Find",
"the",
"best",
"matching",
"lidar",
"file",
"from",
"a",
"list",
"."
] | def findMatchingLidarFileFromList(imageFile, lidarFiles):
'''Find the best matching lidar file from a list.'''
vals = parseTimeStamps(imageFile)
if len(vals) < 2:
raise Exception('Failed to parse the date and time from: ' + imageFile)
useTimeFix = False
returnMinAndSecOnly = False
imageDateTime = parseDateTimeStrings(vals[0], vals[1], useTimeFix, returnMinAndSecOnly)
#print 'INPUT = ' + str(imageDateTime)
# Search for the matching file in the lidar folder.
# - We are looking for the closest lidar time that starts BEFORE the image time.
# - It is possible for an image to span lidar files, we will address that if we need to!
bestTimeDelta = datetime.timedelta.max
bestLidarFile = 'NA'
zeroDelta = datetime.timedelta()
# First see if we need correction for sometimes seconds going from 1 to 60.
minMinSec = 60
maxMinSec = 0
for lidarPath in lidarFiles:
vals = parseTimeStamps(lidarPath)
if len(vals) < 2:
continue # ignore bad files
useTimeFix = False
returnMinAndSecOnly = True
(minute, second) = parseDateTimeStrings(vals[0], vals[1], useTimeFix, returnMinAndSecOnly)
if second < minMinSec: minMinSec = second
if second > maxMinSec: maxMinSec = second
if minute < minMinSec: minMinSec = minute
if minute > maxMinSec: maxMinSec = minute
if minMinSec <= 0 and maxMinSec >= 60:
raise Exception("The minute/second range goes from " + str(minMinSec) +
" to " + str(maxMinSec))
useTimeFix = False
if maxMinSec >= 60:
useTimeFix = True
#print 'Using lidar time fix!'
for lidarPath in lidarFiles:
vals = parseTimeStamps(lidarPath)
if len(vals) < 2:
continue # ignore bad files
try:
returnMinAndSecOnly = False
lidarDateTime = parseDateTimeStrings(vals[0], vals[1], useTimeFix, returnMinAndSecOnly)
#lidarDateTime = lidarDateTime + datetime.timedelta(hours=2, minutes=3, seconds=42) # Manual hack for flights with bad lidar times!
except Exception as e:
raise Exception('Failed to parse datetime for lidar file: ' + lidarPath + '\n' +
'Error is: ' + str(e))
#print 'THIS = ' + str(lidarDateTime)
# Compare time to the image time
timeDelta = abs(imageDateTime - lidarDateTime)
#print 'DELTA = ' + str(timeDelta)
# Select the closest lidar time
# - Since we are using the paired files, the file time is in the middle
# of the (large) file so being close to the middle should make sure the DEM
# is fully covered by LIDAR data.
if timeDelta < bestTimeDelta:
bestLidarFile = lidarPath
bestTimeDelta = timeDelta
# Normal spacing seems to be 6.5 minutes but this must vary by flight.
MAX_DELTA = datetime.timedelta(minutes=15)
if (bestLidarFile == 'NA') or (bestTimeDelta > MAX_DELTA):
errorMessage = 'Failed to find matching lidar file for image ' + imageFile
if bestLidarFile:
errorMessage += '\n--> Nearest lidar file was '+ bestLidarFile +' with delta ' + str(bestTimeDelta)
raise Exception(errorMessage)
#print bestLidarFile
#print bestTimeDelta
return bestLidarFile | [
"def",
"findMatchingLidarFileFromList",
"(",
"imageFile",
",",
"lidarFiles",
")",
":",
"vals",
"=",
"parseTimeStamps",
"(",
"imageFile",
")",
"if",
"len",
"(",
"vals",
")",
"<",
"2",
":",
"raise",
"Exception",
"(",
"'Failed to parse the date and time from: '",
"+",
"imageFile",
")",
"useTimeFix",
"=",
"False",
"returnMinAndSecOnly",
"=",
"False",
"imageDateTime",
"=",
"parseDateTimeStrings",
"(",
"vals",
"[",
"0",
"]",
",",
"vals",
"[",
"1",
"]",
",",
"useTimeFix",
",",
"returnMinAndSecOnly",
")",
"#print 'INPUT = ' + str(imageDateTime)",
"# Search for the matching file in the lidar folder.",
"# - We are looking for the closest lidar time that starts BEFORE the image time.",
"# - It is possible for an image to span lidar files, we will address that if we need to!",
"bestTimeDelta",
"=",
"datetime",
".",
"timedelta",
".",
"max",
"bestLidarFile",
"=",
"'NA'",
"zeroDelta",
"=",
"datetime",
".",
"timedelta",
"(",
")",
"# First see if we need correction for sometimes seconds going from 1 to 60.",
"minMinSec",
"=",
"60",
"maxMinSec",
"=",
"0",
"for",
"lidarPath",
"in",
"lidarFiles",
":",
"vals",
"=",
"parseTimeStamps",
"(",
"lidarPath",
")",
"if",
"len",
"(",
"vals",
")",
"<",
"2",
":",
"continue",
"# ignore bad files",
"useTimeFix",
"=",
"False",
"returnMinAndSecOnly",
"=",
"True",
"(",
"minute",
",",
"second",
")",
"=",
"parseDateTimeStrings",
"(",
"vals",
"[",
"0",
"]",
",",
"vals",
"[",
"1",
"]",
",",
"useTimeFix",
",",
"returnMinAndSecOnly",
")",
"if",
"second",
"<",
"minMinSec",
":",
"minMinSec",
"=",
"second",
"if",
"second",
">",
"maxMinSec",
":",
"maxMinSec",
"=",
"second",
"if",
"minute",
"<",
"minMinSec",
":",
"minMinSec",
"=",
"minute",
"if",
"minute",
">",
"maxMinSec",
":",
"maxMinSec",
"=",
"minute",
"if",
"minMinSec",
"<=",
"0",
"and",
"maxMinSec",
">=",
"60",
":",
"raise",
"Exception",
"(",
"\"The minute/second range goes from \"",
"+",
"str",
"(",
"minMinSec",
")",
"+",
"\" to \"",
"+",
"str",
"(",
"maxMinSec",
")",
")",
"useTimeFix",
"=",
"False",
"if",
"maxMinSec",
">=",
"60",
":",
"useTimeFix",
"=",
"True",
"#print 'Using lidar time fix!'",
"for",
"lidarPath",
"in",
"lidarFiles",
":",
"vals",
"=",
"parseTimeStamps",
"(",
"lidarPath",
")",
"if",
"len",
"(",
"vals",
")",
"<",
"2",
":",
"continue",
"# ignore bad files",
"try",
":",
"returnMinAndSecOnly",
"=",
"False",
"lidarDateTime",
"=",
"parseDateTimeStrings",
"(",
"vals",
"[",
"0",
"]",
",",
"vals",
"[",
"1",
"]",
",",
"useTimeFix",
",",
"returnMinAndSecOnly",
")",
"#lidarDateTime = lidarDateTime + datetime.timedelta(hours=2, minutes=3, seconds=42) # Manual hack for flights with bad lidar times!",
"except",
"Exception",
"as",
"e",
":",
"raise",
"Exception",
"(",
"'Failed to parse datetime for lidar file: '",
"+",
"lidarPath",
"+",
"'\\n'",
"+",
"'Error is: '",
"+",
"str",
"(",
"e",
")",
")",
"#print 'THIS = ' + str(lidarDateTime)",
"# Compare time to the image time",
"timeDelta",
"=",
"abs",
"(",
"imageDateTime",
"-",
"lidarDateTime",
")",
"#print 'DELTA = ' + str(timeDelta)",
"# Select the closest lidar time",
"# - Since we are using the paired files, the file time is in the middle ",
"# of the (large) file so being close to the middle should make sure the DEM",
"# is fully covered by LIDAR data.",
"if",
"timeDelta",
"<",
"bestTimeDelta",
":",
"bestLidarFile",
"=",
"lidarPath",
"bestTimeDelta",
"=",
"timeDelta",
"# Normal spacing seems to be 6.5 minutes but this must vary by flight.",
"MAX_DELTA",
"=",
"datetime",
".",
"timedelta",
"(",
"minutes",
"=",
"15",
")",
"if",
"(",
"bestLidarFile",
"==",
"'NA'",
")",
"or",
"(",
"bestTimeDelta",
">",
"MAX_DELTA",
")",
":",
"errorMessage",
"=",
"'Failed to find matching lidar file for image '",
"+",
"imageFile",
"if",
"bestLidarFile",
":",
"errorMessage",
"+=",
"'\\n--> Nearest lidar file was '",
"+",
"bestLidarFile",
"+",
"' with delta '",
"+",
"str",
"(",
"bestTimeDelta",
")",
"raise",
"Exception",
"(",
"errorMessage",
")",
"#print bestLidarFile",
"#print bestTimeDelta",
"return",
"bestLidarFile"
] | https://github.com/NeoGeographyToolkit/StereoPipeline/blob/eedf54a919fb5cce1ab0e280bb0df4050763aa11/src/asp/IceBridge/icebridge_common.py#L1225-L1308 |