repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
sequence | docstring
stringlengths 1
46.9k
| docstring_tokens
sequence | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
jazzband/django-queued-storage | queued_storage/backends.py | https://github.com/jazzband/django-queued-storage/blob/f8225d88a01ef5ca8001aeb3f7f80818a022a12d/queued_storage/backends.py#L162-L172 | def open(self, name, mode='rb'):
"""
Retrieves the specified file from storage.
:param name: file name
:type name: str
:param mode: mode to open the file with
:type mode: str
:rtype: :class:`~django:django.core.files.File`
"""
return self.get_storage(name).open(name, mode) | [
"def",
"open",
"(",
"self",
",",
"name",
",",
"mode",
"=",
"'rb'",
")",
":",
"return",
"self",
".",
"get_storage",
"(",
"name",
")",
".",
"open",
"(",
"name",
",",
"mode",
")"
] | Retrieves the specified file from storage.
:param name: file name
:type name: str
:param mode: mode to open the file with
:type mode: str
:rtype: :class:`~django:django.core.files.File` | [
"Retrieves",
"the",
"specified",
"file",
"from",
"storage",
"."
] | python | train |
michaelpb/omnic | omnic/web/viewer.py | https://github.com/michaelpb/omnic/blob/1111cfd73c9dc1955afe42d9cf2a468c46f83cd6/omnic/web/viewer.py#L20-L28 | def get_assets(self):
'''
Return a flat list of absolute paths to all assets required by this
viewer
'''
return sum([
[self.prefix_asset(viewer, relpath) for relpath in viewer.assets]
for viewer in self.viewers
], []) | [
"def",
"get_assets",
"(",
"self",
")",
":",
"return",
"sum",
"(",
"[",
"[",
"self",
".",
"prefix_asset",
"(",
"viewer",
",",
"relpath",
")",
"for",
"relpath",
"in",
"viewer",
".",
"assets",
"]",
"for",
"viewer",
"in",
"self",
".",
"viewers",
"]",
",",
"[",
"]",
")"
] | Return a flat list of absolute paths to all assets required by this
viewer | [
"Return",
"a",
"flat",
"list",
"of",
"absolute",
"paths",
"to",
"all",
"assets",
"required",
"by",
"this",
"viewer"
] | python | train |
apple/turicreate | src/unity/python/turicreate/data_structures/sarray.py | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/data_structures/sarray.py#L3083-L3217 | def split_datetime(self, column_name_prefix = "X", limit=None, timezone=False):
"""
Splits an SArray of datetime type to multiple columns, return a
new SFrame that contains expanded columns. A SArray of datetime will be
split by default into an SFrame of 6 columns, one for each
year/month/day/hour/minute/second element.
**Column Naming**
When splitting a SArray of datetime type, new columns are named:
prefix.year, prefix.month, etc. The prefix is set by the parameter
"column_name_prefix" and defaults to 'X'. If column_name_prefix is
None or empty, then no prefix is used.
**Timezone Column**
If timezone parameter is True, then timezone information is represented
as one additional column which is a float shows the offset from
GMT(0.0) or from UTC.
Parameters
----------
column_name_prefix: str, optional
If provided, expanded column names would start with the given prefix.
Defaults to "X".
limit: list[str], optional
Limits the set of datetime elements to expand.
Possible values are 'year','month','day','hour','minute','second',
'weekday', 'isoweekday', 'tmweekday', and 'us'.
If not provided, only ['year','month','day','hour','minute','second']
are expanded.
- 'year': The year number
- 'month': A value between 1 and 12 where 1 is January.
- 'day': Day of the months. Begins at 1.
- 'hour': Hours since midnight.
- 'minute': Minutes after the hour.
- 'second': Seconds after the minute.
- 'us': Microseconds after the second. Between 0 and 999,999.
- 'weekday': A value between 0 and 6 where 0 is Monday.
- 'isoweekday': A value between 1 and 7 where 1 is Monday.
- 'tmweekday': A value between 0 and 7 where 0 is Sunday
timezone: bool, optional
A boolean parameter that determines whether to show timezone column or not.
Defaults to False.
Returns
-------
out : SFrame
A new SFrame that contains all expanded columns
Examples
--------
To expand only day and year elements of a datetime SArray
>>> sa = SArray(
[datetime(2011, 1, 21, 7, 7, 21, tzinfo=GMT(0)),
datetime(2010, 2, 5, 7, 8, 21, tzinfo=GMT(4.5)])
>>> sa.split_datetime(column_name_prefix=None,limit=['day','year'])
Columns:
day int
year int
Rows: 2
Data:
+-------+--------+
| day | year |
+-------+--------+
| 21 | 2011 |
| 5 | 2010 |
+-------+--------+
[2 rows x 2 columns]
To expand only year and timezone elements of a datetime SArray
with timezone column represented as a string. Columns are named with prefix:
'Y.column_name'.
>>> sa.split_datetime(column_name_prefix="Y",limit=['year'],timezone=True)
Columns:
Y.year int
Y.timezone float
Rows: 2
Data:
+----------+---------+
| Y.year | Y.timezone |
+----------+---------+
| 2011 | 0.0 |
| 2010 | 4.5 |
+----------+---------+
[2 rows x 2 columns]
"""
from .sframe import SFrame as _SFrame
if self.dtype != datetime.datetime:
raise TypeError("Only column of datetime type is supported.")
if column_name_prefix is None:
column_name_prefix = ""
if six.PY2 and type(column_name_prefix) == unicode:
column_name_prefix = column_name_prefix.encode('utf-8')
if type(column_name_prefix) != str:
raise TypeError("'column_name_prefix' must be a string")
# convert limit to column_keys
if limit is not None:
if not _is_non_string_iterable(limit):
raise TypeError("'limit' must be a list")
name_types = set([type(i) for i in limit])
if (len(name_types) != 1):
raise TypeError("'limit' contains values that are different types")
if (name_types.pop() != str):
raise TypeError("'limit' must contain string values.")
if len(set(limit)) != len(limit):
raise ValueError("'limit' contains duplicate values")
column_types = []
if(limit is None):
limit = ['year','month','day','hour','minute','second']
column_types = [int] * len(limit)
if(timezone == True):
limit += ['timezone']
column_types += [float]
with cython_context():
return _SFrame(_proxy=self.__proxy__.expand(column_name_prefix, limit, column_types)) | [
"def",
"split_datetime",
"(",
"self",
",",
"column_name_prefix",
"=",
"\"X\"",
",",
"limit",
"=",
"None",
",",
"timezone",
"=",
"False",
")",
":",
"from",
".",
"sframe",
"import",
"SFrame",
"as",
"_SFrame",
"if",
"self",
".",
"dtype",
"!=",
"datetime",
".",
"datetime",
":",
"raise",
"TypeError",
"(",
"\"Only column of datetime type is supported.\"",
")",
"if",
"column_name_prefix",
"is",
"None",
":",
"column_name_prefix",
"=",
"\"\"",
"if",
"six",
".",
"PY2",
"and",
"type",
"(",
"column_name_prefix",
")",
"==",
"unicode",
":",
"column_name_prefix",
"=",
"column_name_prefix",
".",
"encode",
"(",
"'utf-8'",
")",
"if",
"type",
"(",
"column_name_prefix",
")",
"!=",
"str",
":",
"raise",
"TypeError",
"(",
"\"'column_name_prefix' must be a string\"",
")",
"# convert limit to column_keys",
"if",
"limit",
"is",
"not",
"None",
":",
"if",
"not",
"_is_non_string_iterable",
"(",
"limit",
")",
":",
"raise",
"TypeError",
"(",
"\"'limit' must be a list\"",
")",
"name_types",
"=",
"set",
"(",
"[",
"type",
"(",
"i",
")",
"for",
"i",
"in",
"limit",
"]",
")",
"if",
"(",
"len",
"(",
"name_types",
")",
"!=",
"1",
")",
":",
"raise",
"TypeError",
"(",
"\"'limit' contains values that are different types\"",
")",
"if",
"(",
"name_types",
".",
"pop",
"(",
")",
"!=",
"str",
")",
":",
"raise",
"TypeError",
"(",
"\"'limit' must contain string values.\"",
")",
"if",
"len",
"(",
"set",
"(",
"limit",
")",
")",
"!=",
"len",
"(",
"limit",
")",
":",
"raise",
"ValueError",
"(",
"\"'limit' contains duplicate values\"",
")",
"column_types",
"=",
"[",
"]",
"if",
"(",
"limit",
"is",
"None",
")",
":",
"limit",
"=",
"[",
"'year'",
",",
"'month'",
",",
"'day'",
",",
"'hour'",
",",
"'minute'",
",",
"'second'",
"]",
"column_types",
"=",
"[",
"int",
"]",
"*",
"len",
"(",
"limit",
")",
"if",
"(",
"timezone",
"==",
"True",
")",
":",
"limit",
"+=",
"[",
"'timezone'",
"]",
"column_types",
"+=",
"[",
"float",
"]",
"with",
"cython_context",
"(",
")",
":",
"return",
"_SFrame",
"(",
"_proxy",
"=",
"self",
".",
"__proxy__",
".",
"expand",
"(",
"column_name_prefix",
",",
"limit",
",",
"column_types",
")",
")"
] | Splits an SArray of datetime type to multiple columns, return a
new SFrame that contains expanded columns. A SArray of datetime will be
split by default into an SFrame of 6 columns, one for each
year/month/day/hour/minute/second element.
**Column Naming**
When splitting a SArray of datetime type, new columns are named:
prefix.year, prefix.month, etc. The prefix is set by the parameter
"column_name_prefix" and defaults to 'X'. If column_name_prefix is
None or empty, then no prefix is used.
**Timezone Column**
If timezone parameter is True, then timezone information is represented
as one additional column which is a float shows the offset from
GMT(0.0) or from UTC.
Parameters
----------
column_name_prefix: str, optional
If provided, expanded column names would start with the given prefix.
Defaults to "X".
limit: list[str], optional
Limits the set of datetime elements to expand.
Possible values are 'year','month','day','hour','minute','second',
'weekday', 'isoweekday', 'tmweekday', and 'us'.
If not provided, only ['year','month','day','hour','minute','second']
are expanded.
- 'year': The year number
- 'month': A value between 1 and 12 where 1 is January.
- 'day': Day of the months. Begins at 1.
- 'hour': Hours since midnight.
- 'minute': Minutes after the hour.
- 'second': Seconds after the minute.
- 'us': Microseconds after the second. Between 0 and 999,999.
- 'weekday': A value between 0 and 6 where 0 is Monday.
- 'isoweekday': A value between 1 and 7 where 1 is Monday.
- 'tmweekday': A value between 0 and 7 where 0 is Sunday
timezone: bool, optional
A boolean parameter that determines whether to show timezone column or not.
Defaults to False.
Returns
-------
out : SFrame
A new SFrame that contains all expanded columns
Examples
--------
To expand only day and year elements of a datetime SArray
>>> sa = SArray(
[datetime(2011, 1, 21, 7, 7, 21, tzinfo=GMT(0)),
datetime(2010, 2, 5, 7, 8, 21, tzinfo=GMT(4.5)])
>>> sa.split_datetime(column_name_prefix=None,limit=['day','year'])
Columns:
day int
year int
Rows: 2
Data:
+-------+--------+
| day | year |
+-------+--------+
| 21 | 2011 |
| 5 | 2010 |
+-------+--------+
[2 rows x 2 columns]
To expand only year and timezone elements of a datetime SArray
with timezone column represented as a string. Columns are named with prefix:
'Y.column_name'.
>>> sa.split_datetime(column_name_prefix="Y",limit=['year'],timezone=True)
Columns:
Y.year int
Y.timezone float
Rows: 2
Data:
+----------+---------+
| Y.year | Y.timezone |
+----------+---------+
| 2011 | 0.0 |
| 2010 | 4.5 |
+----------+---------+
[2 rows x 2 columns] | [
"Splits",
"an",
"SArray",
"of",
"datetime",
"type",
"to",
"multiple",
"columns",
"return",
"a",
"new",
"SFrame",
"that",
"contains",
"expanded",
"columns",
".",
"A",
"SArray",
"of",
"datetime",
"will",
"be",
"split",
"by",
"default",
"into",
"an",
"SFrame",
"of",
"6",
"columns",
"one",
"for",
"each",
"year",
"/",
"month",
"/",
"day",
"/",
"hour",
"/",
"minute",
"/",
"second",
"element",
"."
] | python | train |
cloud9ers/gurumate | environment/lib/python2.7/site-packages/IPython/core/magic.py | https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/magic.py#L543-L610 | def parse_options(self, arg_str, opt_str, *long_opts, **kw):
"""Parse options passed to an argument string.
The interface is similar to that of getopt(), but it returns back a
Struct with the options as keys and the stripped argument string still
as a string.
arg_str is quoted as a true sys.argv vector by using shlex.split.
This allows us to easily expand variables, glob files, quote
arguments, etc.
Options:
-mode: default 'string'. If given as 'list', the argument string is
returned as a list (split on whitespace) instead of a string.
-list_all: put all option values in lists. Normally only options
appearing more than once are put in a list.
-posix (True): whether to split the input line in POSIX mode or not,
as per the conventions outlined in the shlex module from the
standard library."""
# inject default options at the beginning of the input line
caller = sys._getframe(1).f_code.co_name
arg_str = '%s %s' % (self.options_table.get(caller,''),arg_str)
mode = kw.get('mode','string')
if mode not in ['string','list']:
raise ValueError,'incorrect mode given: %s' % mode
# Get options
list_all = kw.get('list_all',0)
posix = kw.get('posix', os.name == 'posix')
strict = kw.get('strict', True)
# Check if we have more than one argument to warrant extra processing:
odict = {} # Dictionary with options
args = arg_str.split()
if len(args) >= 1:
# If the list of inputs only has 0 or 1 thing in it, there's no
# need to look for options
argv = arg_split(arg_str, posix, strict)
# Do regular option processing
try:
opts,args = getopt(argv, opt_str, long_opts)
except GetoptError,e:
raise UsageError('%s ( allowed: "%s" %s)' % (e.msg,opt_str,
" ".join(long_opts)))
for o,a in opts:
if o.startswith('--'):
o = o[2:]
else:
o = o[1:]
try:
odict[o].append(a)
except AttributeError:
odict[o] = [odict[o],a]
except KeyError:
if list_all:
odict[o] = [a]
else:
odict[o] = a
# Prepare opts,args for return
opts = Struct(odict)
if mode == 'string':
args = ' '.join(args)
return opts,args | [
"def",
"parse_options",
"(",
"self",
",",
"arg_str",
",",
"opt_str",
",",
"*",
"long_opts",
",",
"*",
"*",
"kw",
")",
":",
"# inject default options at the beginning of the input line",
"caller",
"=",
"sys",
".",
"_getframe",
"(",
"1",
")",
".",
"f_code",
".",
"co_name",
"arg_str",
"=",
"'%s %s'",
"%",
"(",
"self",
".",
"options_table",
".",
"get",
"(",
"caller",
",",
"''",
")",
",",
"arg_str",
")",
"mode",
"=",
"kw",
".",
"get",
"(",
"'mode'",
",",
"'string'",
")",
"if",
"mode",
"not",
"in",
"[",
"'string'",
",",
"'list'",
"]",
":",
"raise",
"ValueError",
",",
"'incorrect mode given: %s'",
"%",
"mode",
"# Get options",
"list_all",
"=",
"kw",
".",
"get",
"(",
"'list_all'",
",",
"0",
")",
"posix",
"=",
"kw",
".",
"get",
"(",
"'posix'",
",",
"os",
".",
"name",
"==",
"'posix'",
")",
"strict",
"=",
"kw",
".",
"get",
"(",
"'strict'",
",",
"True",
")",
"# Check if we have more than one argument to warrant extra processing:",
"odict",
"=",
"{",
"}",
"# Dictionary with options",
"args",
"=",
"arg_str",
".",
"split",
"(",
")",
"if",
"len",
"(",
"args",
")",
">=",
"1",
":",
"# If the list of inputs only has 0 or 1 thing in it, there's no",
"# need to look for options",
"argv",
"=",
"arg_split",
"(",
"arg_str",
",",
"posix",
",",
"strict",
")",
"# Do regular option processing",
"try",
":",
"opts",
",",
"args",
"=",
"getopt",
"(",
"argv",
",",
"opt_str",
",",
"long_opts",
")",
"except",
"GetoptError",
",",
"e",
":",
"raise",
"UsageError",
"(",
"'%s ( allowed: \"%s\" %s)'",
"%",
"(",
"e",
".",
"msg",
",",
"opt_str",
",",
"\" \"",
".",
"join",
"(",
"long_opts",
")",
")",
")",
"for",
"o",
",",
"a",
"in",
"opts",
":",
"if",
"o",
".",
"startswith",
"(",
"'--'",
")",
":",
"o",
"=",
"o",
"[",
"2",
":",
"]",
"else",
":",
"o",
"=",
"o",
"[",
"1",
":",
"]",
"try",
":",
"odict",
"[",
"o",
"]",
".",
"append",
"(",
"a",
")",
"except",
"AttributeError",
":",
"odict",
"[",
"o",
"]",
"=",
"[",
"odict",
"[",
"o",
"]",
",",
"a",
"]",
"except",
"KeyError",
":",
"if",
"list_all",
":",
"odict",
"[",
"o",
"]",
"=",
"[",
"a",
"]",
"else",
":",
"odict",
"[",
"o",
"]",
"=",
"a",
"# Prepare opts,args for return",
"opts",
"=",
"Struct",
"(",
"odict",
")",
"if",
"mode",
"==",
"'string'",
":",
"args",
"=",
"' '",
".",
"join",
"(",
"args",
")",
"return",
"opts",
",",
"args"
] | Parse options passed to an argument string.
The interface is similar to that of getopt(), but it returns back a
Struct with the options as keys and the stripped argument string still
as a string.
arg_str is quoted as a true sys.argv vector by using shlex.split.
This allows us to easily expand variables, glob files, quote
arguments, etc.
Options:
-mode: default 'string'. If given as 'list', the argument string is
returned as a list (split on whitespace) instead of a string.
-list_all: put all option values in lists. Normally only options
appearing more than once are put in a list.
-posix (True): whether to split the input line in POSIX mode or not,
as per the conventions outlined in the shlex module from the
standard library. | [
"Parse",
"options",
"passed",
"to",
"an",
"argument",
"string",
"."
] | python | test |
fmfn/BayesianOptimization | bayes_opt/target_space.py | https://github.com/fmfn/BayesianOptimization/blob/8ce2292895137477963cf1bafa4e71fa20b2ce49/bayes_opt/target_space.py#L234-L241 | def res(self):
"""Get all target values found and corresponding parametes."""
params = [dict(zip(self.keys, p)) for p in self.params]
return [
{"target": target, "params": param}
for target, param in zip(self.target, params)
] | [
"def",
"res",
"(",
"self",
")",
":",
"params",
"=",
"[",
"dict",
"(",
"zip",
"(",
"self",
".",
"keys",
",",
"p",
")",
")",
"for",
"p",
"in",
"self",
".",
"params",
"]",
"return",
"[",
"{",
"\"target\"",
":",
"target",
",",
"\"params\"",
":",
"param",
"}",
"for",
"target",
",",
"param",
"in",
"zip",
"(",
"self",
".",
"target",
",",
"params",
")",
"]"
] | Get all target values found and corresponding parametes. | [
"Get",
"all",
"target",
"values",
"found",
"and",
"corresponding",
"parametes",
"."
] | python | train |
danilobellini/audiolazy | audiolazy/lazy_filters.py | https://github.com/danilobellini/audiolazy/blob/dba0a278937909980ed40b976d866b8e97c35dee/audiolazy/lazy_filters.py#L1116-L1142 | def comb(delay, tau=inf):
"""
Feedback comb filter for a given time constant (and delay).
``y[n] = x[n] + alpha * y[n - delay]``
Parameters
----------
delay :
Feedback delay (lag), in number of samples.
tau :
Time decay (up to ``1/e``, or -8.686 dB), in number of samples, which
allows finding ``alpha = e ** (-delay / tau)``. Defaults to ``inf``
(infinite), which means alpha = 1.
Returns
-------
A ZFilter instance with the comb filter.
See Also
--------
freq2lag :
Frequency (in rad/sample) to delay (in samples) converter.
"""
alpha = e ** (-delay / tau)
return 1 / (1 - alpha * z ** -delay) | [
"def",
"comb",
"(",
"delay",
",",
"tau",
"=",
"inf",
")",
":",
"alpha",
"=",
"e",
"**",
"(",
"-",
"delay",
"/",
"tau",
")",
"return",
"1",
"/",
"(",
"1",
"-",
"alpha",
"*",
"z",
"**",
"-",
"delay",
")"
] | Feedback comb filter for a given time constant (and delay).
``y[n] = x[n] + alpha * y[n - delay]``
Parameters
----------
delay :
Feedback delay (lag), in number of samples.
tau :
Time decay (up to ``1/e``, or -8.686 dB), in number of samples, which
allows finding ``alpha = e ** (-delay / tau)``. Defaults to ``inf``
(infinite), which means alpha = 1.
Returns
-------
A ZFilter instance with the comb filter.
See Also
--------
freq2lag :
Frequency (in rad/sample) to delay (in samples) converter. | [
"Feedback",
"comb",
"filter",
"for",
"a",
"given",
"time",
"constant",
"(",
"and",
"delay",
")",
"."
] | python | train |
jtauber/sebastian | sebastian/core/elements.py | https://github.com/jtauber/sebastian/blob/4e460c3aeab332b45c74fe78e65e76ec87d5cfa8/sebastian/core/elements.py#L248-L273 | def subseq(self, start_offset=0, end_offset=None):
"""
Return a subset of the sequence
starting at start_offset (defaulting to the beginning)
ending at end_offset (None representing the end, whih is the default)
Raises ValueError if duration_64 is missing on any element
"""
from sebastian.core import DURATION_64
def subseq_iter(start_offset, end_offset):
cur_offset = 0
for point in self._elements:
try:
cur_offset += point[DURATION_64]
except KeyError:
raise ValueError("HSeq.subseq requires all points to have a %s attribute" % DURATION_64)
#Skip until start
if cur_offset < start_offset:
continue
#Yield points start_offset <= point < end_offset
if end_offset is None or cur_offset < end_offset:
yield point
else:
raise StopIteration
return HSeq(subseq_iter(start_offset, end_offset)) | [
"def",
"subseq",
"(",
"self",
",",
"start_offset",
"=",
"0",
",",
"end_offset",
"=",
"None",
")",
":",
"from",
"sebastian",
".",
"core",
"import",
"DURATION_64",
"def",
"subseq_iter",
"(",
"start_offset",
",",
"end_offset",
")",
":",
"cur_offset",
"=",
"0",
"for",
"point",
"in",
"self",
".",
"_elements",
":",
"try",
":",
"cur_offset",
"+=",
"point",
"[",
"DURATION_64",
"]",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"\"HSeq.subseq requires all points to have a %s attribute\"",
"%",
"DURATION_64",
")",
"#Skip until start",
"if",
"cur_offset",
"<",
"start_offset",
":",
"continue",
"#Yield points start_offset <= point < end_offset",
"if",
"end_offset",
"is",
"None",
"or",
"cur_offset",
"<",
"end_offset",
":",
"yield",
"point",
"else",
":",
"raise",
"StopIteration",
"return",
"HSeq",
"(",
"subseq_iter",
"(",
"start_offset",
",",
"end_offset",
")",
")"
] | Return a subset of the sequence
starting at start_offset (defaulting to the beginning)
ending at end_offset (None representing the end, whih is the default)
Raises ValueError if duration_64 is missing on any element | [
"Return",
"a",
"subset",
"of",
"the",
"sequence",
"starting",
"at",
"start_offset",
"(",
"defaulting",
"to",
"the",
"beginning",
")",
"ending",
"at",
"end_offset",
"(",
"None",
"representing",
"the",
"end",
"whih",
"is",
"the",
"default",
")",
"Raises",
"ValueError",
"if",
"duration_64",
"is",
"missing",
"on",
"any",
"element"
] | python | train |
hotdoc/hotdoc | hotdoc/extensions/c/clang/cindex.py | https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/extensions/c/clang/cindex.py#L2689-L2727 | def get_extent(self, filename, locations):
"""Obtain a SourceRange from this translation unit.
The bounds of the SourceRange must ultimately be defined by a start and
end SourceLocation. For the locations argument, you can pass:
- 2 SourceLocation instances in a 2-tuple or list.
- 2 int file offsets via a 2-tuple or list.
- 2 2-tuple or lists of (line, column) pairs in a 2-tuple or list.
e.g.
get_extent('foo.c', (5, 10))
get_extent('foo.c', ((1, 1), (1, 15)))
"""
f = self.get_file(filename)
if len(locations) < 2:
raise Exception('Must pass object with at least 2 elements')
start_location, end_location = locations
if hasattr(start_location, '__len__'):
start_location = SourceLocation.from_position(self, f,
start_location[0], start_location[1])
elif isinstance(start_location, int):
start_location = SourceLocation.from_offset(self, f,
start_location)
if hasattr(end_location, '__len__'):
end_location = SourceLocation.from_position(self, f,
end_location[0], end_location[1])
elif isinstance(end_location, int):
end_location = SourceLocation.from_offset(self, f, end_location)
assert isinstance(start_location, SourceLocation)
assert isinstance(end_location, SourceLocation)
return SourceRange.from_locations(start_location, end_location) | [
"def",
"get_extent",
"(",
"self",
",",
"filename",
",",
"locations",
")",
":",
"f",
"=",
"self",
".",
"get_file",
"(",
"filename",
")",
"if",
"len",
"(",
"locations",
")",
"<",
"2",
":",
"raise",
"Exception",
"(",
"'Must pass object with at least 2 elements'",
")",
"start_location",
",",
"end_location",
"=",
"locations",
"if",
"hasattr",
"(",
"start_location",
",",
"'__len__'",
")",
":",
"start_location",
"=",
"SourceLocation",
".",
"from_position",
"(",
"self",
",",
"f",
",",
"start_location",
"[",
"0",
"]",
",",
"start_location",
"[",
"1",
"]",
")",
"elif",
"isinstance",
"(",
"start_location",
",",
"int",
")",
":",
"start_location",
"=",
"SourceLocation",
".",
"from_offset",
"(",
"self",
",",
"f",
",",
"start_location",
")",
"if",
"hasattr",
"(",
"end_location",
",",
"'__len__'",
")",
":",
"end_location",
"=",
"SourceLocation",
".",
"from_position",
"(",
"self",
",",
"f",
",",
"end_location",
"[",
"0",
"]",
",",
"end_location",
"[",
"1",
"]",
")",
"elif",
"isinstance",
"(",
"end_location",
",",
"int",
")",
":",
"end_location",
"=",
"SourceLocation",
".",
"from_offset",
"(",
"self",
",",
"f",
",",
"end_location",
")",
"assert",
"isinstance",
"(",
"start_location",
",",
"SourceLocation",
")",
"assert",
"isinstance",
"(",
"end_location",
",",
"SourceLocation",
")",
"return",
"SourceRange",
".",
"from_locations",
"(",
"start_location",
",",
"end_location",
")"
] | Obtain a SourceRange from this translation unit.
The bounds of the SourceRange must ultimately be defined by a start and
end SourceLocation. For the locations argument, you can pass:
- 2 SourceLocation instances in a 2-tuple or list.
- 2 int file offsets via a 2-tuple or list.
- 2 2-tuple or lists of (line, column) pairs in a 2-tuple or list.
e.g.
get_extent('foo.c', (5, 10))
get_extent('foo.c', ((1, 1), (1, 15))) | [
"Obtain",
"a",
"SourceRange",
"from",
"this",
"translation",
"unit",
"."
] | python | train |
wummel/linkchecker | third_party/dnspython/dns/message.py | https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/third_party/dnspython/dns/message.py#L593-L615 | def _get_question(self, qcount):
"""Read the next I{qcount} records from the wire data and add them to
the question section.
@param qcount: the number of questions in the message
@type qcount: int"""
if self.updating and qcount > 1:
raise dns.exception.FormError
for i in xrange(0, qcount):
(qname, used) = dns.name.from_wire(self.wire, self.current)
if not self.message.origin is None:
qname = qname.relativize(self.message.origin)
self.current = self.current + used
(rdtype, rdclass) = \
struct.unpack('!HH',
self.wire[self.current:self.current + 4])
self.current = self.current + 4
self.message.find_rrset(self.message.question, qname,
rdclass, rdtype, create=True,
force_unique=True)
if self.updating:
self.zone_rdclass = rdclass | [
"def",
"_get_question",
"(",
"self",
",",
"qcount",
")",
":",
"if",
"self",
".",
"updating",
"and",
"qcount",
">",
"1",
":",
"raise",
"dns",
".",
"exception",
".",
"FormError",
"for",
"i",
"in",
"xrange",
"(",
"0",
",",
"qcount",
")",
":",
"(",
"qname",
",",
"used",
")",
"=",
"dns",
".",
"name",
".",
"from_wire",
"(",
"self",
".",
"wire",
",",
"self",
".",
"current",
")",
"if",
"not",
"self",
".",
"message",
".",
"origin",
"is",
"None",
":",
"qname",
"=",
"qname",
".",
"relativize",
"(",
"self",
".",
"message",
".",
"origin",
")",
"self",
".",
"current",
"=",
"self",
".",
"current",
"+",
"used",
"(",
"rdtype",
",",
"rdclass",
")",
"=",
"struct",
".",
"unpack",
"(",
"'!HH'",
",",
"self",
".",
"wire",
"[",
"self",
".",
"current",
":",
"self",
".",
"current",
"+",
"4",
"]",
")",
"self",
".",
"current",
"=",
"self",
".",
"current",
"+",
"4",
"self",
".",
"message",
".",
"find_rrset",
"(",
"self",
".",
"message",
".",
"question",
",",
"qname",
",",
"rdclass",
",",
"rdtype",
",",
"create",
"=",
"True",
",",
"force_unique",
"=",
"True",
")",
"if",
"self",
".",
"updating",
":",
"self",
".",
"zone_rdclass",
"=",
"rdclass"
] | Read the next I{qcount} records from the wire data and add them to
the question section.
@param qcount: the number of questions in the message
@type qcount: int | [
"Read",
"the",
"next",
"I",
"{",
"qcount",
"}",
"records",
"from",
"the",
"wire",
"data",
"and",
"add",
"them",
"to",
"the",
"question",
"section",
"."
] | python | train |
ray-project/ray | python/ray/serialization.py | https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/serialization.py#L16-L55 | def check_serializable(cls):
"""Throws an exception if Ray cannot serialize this class efficiently.
Args:
cls (type): The class to be serialized.
Raises:
Exception: An exception is raised if Ray cannot serialize this class
efficiently.
"""
if is_named_tuple(cls):
# This case works.
return
if not hasattr(cls, "__new__"):
print("The class {} does not have a '__new__' attribute and is "
"probably an old-stye class. Please make it a new-style class "
"by inheriting from 'object'.")
raise RayNotDictionarySerializable("The class {} does not have a "
"'__new__' attribute and is "
"probably an old-style class. We "
"do not support this. Please make "
"it a new-style class by "
"inheriting from 'object'."
.format(cls))
try:
obj = cls.__new__(cls)
except Exception:
raise RayNotDictionarySerializable("The class {} has overridden "
"'__new__', so Ray may not be able "
"to serialize it efficiently."
.format(cls))
if not hasattr(obj, "__dict__"):
raise RayNotDictionarySerializable("Objects of the class {} do not "
"have a '__dict__' attribute, so "
"Ray cannot serialize it "
"efficiently.".format(cls))
if hasattr(obj, "__slots__"):
raise RayNotDictionarySerializable("The class {} uses '__slots__', so "
"Ray may not be able to serialize "
"it efficiently.".format(cls)) | [
"def",
"check_serializable",
"(",
"cls",
")",
":",
"if",
"is_named_tuple",
"(",
"cls",
")",
":",
"# This case works.",
"return",
"if",
"not",
"hasattr",
"(",
"cls",
",",
"\"__new__\"",
")",
":",
"print",
"(",
"\"The class {} does not have a '__new__' attribute and is \"",
"\"probably an old-stye class. Please make it a new-style class \"",
"\"by inheriting from 'object'.\"",
")",
"raise",
"RayNotDictionarySerializable",
"(",
"\"The class {} does not have a \"",
"\"'__new__' attribute and is \"",
"\"probably an old-style class. We \"",
"\"do not support this. Please make \"",
"\"it a new-style class by \"",
"\"inheriting from 'object'.\"",
".",
"format",
"(",
"cls",
")",
")",
"try",
":",
"obj",
"=",
"cls",
".",
"__new__",
"(",
"cls",
")",
"except",
"Exception",
":",
"raise",
"RayNotDictionarySerializable",
"(",
"\"The class {} has overridden \"",
"\"'__new__', so Ray may not be able \"",
"\"to serialize it efficiently.\"",
".",
"format",
"(",
"cls",
")",
")",
"if",
"not",
"hasattr",
"(",
"obj",
",",
"\"__dict__\"",
")",
":",
"raise",
"RayNotDictionarySerializable",
"(",
"\"Objects of the class {} do not \"",
"\"have a '__dict__' attribute, so \"",
"\"Ray cannot serialize it \"",
"\"efficiently.\"",
".",
"format",
"(",
"cls",
")",
")",
"if",
"hasattr",
"(",
"obj",
",",
"\"__slots__\"",
")",
":",
"raise",
"RayNotDictionarySerializable",
"(",
"\"The class {} uses '__slots__', so \"",
"\"Ray may not be able to serialize \"",
"\"it efficiently.\"",
".",
"format",
"(",
"cls",
")",
")"
] | Throws an exception if Ray cannot serialize this class efficiently.
Args:
cls (type): The class to be serialized.
Raises:
Exception: An exception is raised if Ray cannot serialize this class
efficiently. | [
"Throws",
"an",
"exception",
"if",
"Ray",
"cannot",
"serialize",
"this",
"class",
"efficiently",
"."
] | python | train |
drj11/pypng | code/iccp.py | https://github.com/drj11/pypng/blob/b8220ca9f58e4c5bc1d507e713744fcb8c049225/code/iccp.py#L316-L328 | def encode(tsig, *l):
"""Encode a Python value as an ICC type. `tsig` is the type
signature to (the first 4 bytes of the encoded value, see [ICC 2004]
section 10.
"""
fun = encodefuns()
if tsig not in fun:
raise "No encoder for type %r." % tsig
v = fun[tsig](*l)
# Padd tsig out with spaces.
tsig = (tsig + ' ')[: 4]
return tsig + ('\x00' * 4) + v | [
"def",
"encode",
"(",
"tsig",
",",
"*",
"l",
")",
":",
"fun",
"=",
"encodefuns",
"(",
")",
"if",
"tsig",
"not",
"in",
"fun",
":",
"raise",
"\"No encoder for type %r.\"",
"%",
"tsig",
"v",
"=",
"fun",
"[",
"tsig",
"]",
"(",
"*",
"l",
")",
"# Padd tsig out with spaces.",
"tsig",
"=",
"(",
"tsig",
"+",
"' '",
")",
"[",
":",
"4",
"]",
"return",
"tsig",
"+",
"(",
"'\\x00'",
"*",
"4",
")",
"+",
"v"
] | Encode a Python value as an ICC type. `tsig` is the type
signature to (the first 4 bytes of the encoded value, see [ICC 2004]
section 10. | [
"Encode",
"a",
"Python",
"value",
"as",
"an",
"ICC",
"type",
".",
"tsig",
"is",
"the",
"type",
"signature",
"to",
"(",
"the",
"first",
"4",
"bytes",
"of",
"the",
"encoded",
"value",
"see",
"[",
"ICC",
"2004",
"]",
"section",
"10",
"."
] | python | train |
openwisp/netjsonconfig | netjsonconfig/backends/openwisp/openwisp.py | https://github.com/openwisp/netjsonconfig/blob/c23ce9732720856e2f6dc54060db71a8182c7d4b/netjsonconfig/backends/openwisp/openwisp.py#L78-L89 | def _add_install(self, context):
"""
generates install.sh and adds it to included files
"""
contents = self._render_template('install.sh', context)
self.config.setdefault('files', []) # file list might be empty
# add install.sh to list of included files
self._add_unique_file({
"path": "/install.sh",
"contents": contents,
"mode": "755"
}) | [
"def",
"_add_install",
"(",
"self",
",",
"context",
")",
":",
"contents",
"=",
"self",
".",
"_render_template",
"(",
"'install.sh'",
",",
"context",
")",
"self",
".",
"config",
".",
"setdefault",
"(",
"'files'",
",",
"[",
"]",
")",
"# file list might be empty",
"# add install.sh to list of included files",
"self",
".",
"_add_unique_file",
"(",
"{",
"\"path\"",
":",
"\"/install.sh\"",
",",
"\"contents\"",
":",
"contents",
",",
"\"mode\"",
":",
"\"755\"",
"}",
")"
] | generates install.sh and adds it to included files | [
"generates",
"install",
".",
"sh",
"and",
"adds",
"it",
"to",
"included",
"files"
] | python | valid |
sloria/read_env | read_env.py | https://github.com/sloria/read_env/blob/90c5a7b38d70f06cd96b5d9a7e68e422bb5bd605/read_env.py#L16-L46 | def read_env(path=None, environ=None, recurse=True):
"""Reads a .env file into ``environ`` (which defaults to ``os.environ``).
If .env is not found in the directory from which this function is called, recurse
up the directory tree until a .env file is found.
"""
environ = environ if environ is not None else os.environ
# By default, start search from the same file this function is called
if path is None:
frame = inspect.currentframe().f_back
caller_dir = os.path.dirname(frame.f_code.co_filename)
path = os.path.join(os.path.abspath(caller_dir), ENV)
if recurse:
current = path
pardir = os.path.abspath(os.path.join(current, os.pardir))
while current != pardir:
target = os.path.join(current, ENV)
if os.path.exists(target):
path = os.path.abspath(target)
break
else:
current = os.path.abspath(os.path.join(current, os.pardir))
pardir = os.path.abspath(os.path.join(current, os.pardir))
if not path:
raise FileNotFoundError('Could not find a .env file')
with open(path, 'r') as fp:
content = fp.read()
parsed = parse_env(content)
for key, value in parsed.items():
environ.setdefault(key, value) | [
"def",
"read_env",
"(",
"path",
"=",
"None",
",",
"environ",
"=",
"None",
",",
"recurse",
"=",
"True",
")",
":",
"environ",
"=",
"environ",
"if",
"environ",
"is",
"not",
"None",
"else",
"os",
".",
"environ",
"# By default, start search from the same file this function is called",
"if",
"path",
"is",
"None",
":",
"frame",
"=",
"inspect",
".",
"currentframe",
"(",
")",
".",
"f_back",
"caller_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"frame",
".",
"f_code",
".",
"co_filename",
")",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"caller_dir",
")",
",",
"ENV",
")",
"if",
"recurse",
":",
"current",
"=",
"path",
"pardir",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"current",
",",
"os",
".",
"pardir",
")",
")",
"while",
"current",
"!=",
"pardir",
":",
"target",
"=",
"os",
".",
"path",
".",
"join",
"(",
"current",
",",
"ENV",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"target",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"target",
")",
"break",
"else",
":",
"current",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"current",
",",
"os",
".",
"pardir",
")",
")",
"pardir",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"current",
",",
"os",
".",
"pardir",
")",
")",
"if",
"not",
"path",
":",
"raise",
"FileNotFoundError",
"(",
"'Could not find a .env file'",
")",
"with",
"open",
"(",
"path",
",",
"'r'",
")",
"as",
"fp",
":",
"content",
"=",
"fp",
".",
"read",
"(",
")",
"parsed",
"=",
"parse_env",
"(",
"content",
")",
"for",
"key",
",",
"value",
"in",
"parsed",
".",
"items",
"(",
")",
":",
"environ",
".",
"setdefault",
"(",
"key",
",",
"value",
")"
] | Reads a .env file into ``environ`` (which defaults to ``os.environ``).
If .env is not found in the directory from which this function is called, recurse
up the directory tree until a .env file is found. | [
"Reads",
"a",
".",
"env",
"file",
"into",
"environ",
"(",
"which",
"defaults",
"to",
"os",
".",
"environ",
")",
".",
"If",
".",
"env",
"is",
"not",
"found",
"in",
"the",
"directory",
"from",
"which",
"this",
"function",
"is",
"called",
"recurse",
"up",
"the",
"directory",
"tree",
"until",
"a",
".",
"env",
"file",
"is",
"found",
"."
] | python | train |
RudolfCardinal/pythonlib | cardinal_pythonlib/rnc_web.py | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/rnc_web.py#L349-L355 | def getenv_escaped(key: str, default: str = None) -> Optional[str]:
"""
Returns an environment variable's value, CGI-escaped, or ``None``.
"""
value = os.getenv(key, default)
# noinspection PyDeprecation
return cgi.escape(value) if value is not None else None | [
"def",
"getenv_escaped",
"(",
"key",
":",
"str",
",",
"default",
":",
"str",
"=",
"None",
")",
"->",
"Optional",
"[",
"str",
"]",
":",
"value",
"=",
"os",
".",
"getenv",
"(",
"key",
",",
"default",
")",
"# noinspection PyDeprecation",
"return",
"cgi",
".",
"escape",
"(",
"value",
")",
"if",
"value",
"is",
"not",
"None",
"else",
"None"
] | Returns an environment variable's value, CGI-escaped, or ``None``. | [
"Returns",
"an",
"environment",
"variable",
"s",
"value",
"CGI",
"-",
"escaped",
"or",
"None",
"."
] | python | train |
pyviz/holoviews | setup.py | https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/setup.py#L183-L196 | def walker(top, names):
"""
Walks a directory and records all packages and file extensions.
"""
global packages, extensions
if any(exc in top for exc in excludes):
return
package = top[top.rfind('holoviews'):].replace(os.path.sep, '.')
packages.append(package)
for name in names:
ext = '.'.join(name.split('.')[1:])
ext_str = '*.%s' % ext
if ext and ext not in excludes and ext_str not in extensions[package]:
extensions[package].append(ext_str) | [
"def",
"walker",
"(",
"top",
",",
"names",
")",
":",
"global",
"packages",
",",
"extensions",
"if",
"any",
"(",
"exc",
"in",
"top",
"for",
"exc",
"in",
"excludes",
")",
":",
"return",
"package",
"=",
"top",
"[",
"top",
".",
"rfind",
"(",
"'holoviews'",
")",
":",
"]",
".",
"replace",
"(",
"os",
".",
"path",
".",
"sep",
",",
"'.'",
")",
"packages",
".",
"append",
"(",
"package",
")",
"for",
"name",
"in",
"names",
":",
"ext",
"=",
"'.'",
".",
"join",
"(",
"name",
".",
"split",
"(",
"'.'",
")",
"[",
"1",
":",
"]",
")",
"ext_str",
"=",
"'*.%s'",
"%",
"ext",
"if",
"ext",
"and",
"ext",
"not",
"in",
"excludes",
"and",
"ext_str",
"not",
"in",
"extensions",
"[",
"package",
"]",
":",
"extensions",
"[",
"package",
"]",
".",
"append",
"(",
"ext_str",
")"
] | Walks a directory and records all packages and file extensions. | [
"Walks",
"a",
"directory",
"and",
"records",
"all",
"packages",
"and",
"file",
"extensions",
"."
] | python | train |
spyder-ide/spyder | spyder/plugins/editor/widgets/status.py | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/widgets/status.py#L34-L38 | def update_eol(self, os_name):
"""Update end of line status."""
os_name = to_text_string(os_name)
value = {"nt": "CRLF", "posix": "LF"}.get(os_name, "CR")
self.set_value(value) | [
"def",
"update_eol",
"(",
"self",
",",
"os_name",
")",
":",
"os_name",
"=",
"to_text_string",
"(",
"os_name",
")",
"value",
"=",
"{",
"\"nt\"",
":",
"\"CRLF\"",
",",
"\"posix\"",
":",
"\"LF\"",
"}",
".",
"get",
"(",
"os_name",
",",
"\"CR\"",
")",
"self",
".",
"set_value",
"(",
"value",
")"
] | Update end of line status. | [
"Update",
"end",
"of",
"line",
"status",
"."
] | python | train |
mlavin/django-all-access | allaccess/clients.py | https://github.com/mlavin/django-all-access/blob/4b15b6c9dedf8080a7c477e0af1142c609ec5598/allaccess/clients.py#L202-L214 | def parse_raw_token(self, raw_token):
"Parse token and secret from raw token response."
if raw_token is None:
return (None, None)
# Load as json first then parse as query string
try:
token_data = json.loads(raw_token)
except ValueError:
qs = parse_qs(raw_token)
token = qs.get('access_token', [None])[0]
else:
token = token_data.get('access_token', None)
return (token, None) | [
"def",
"parse_raw_token",
"(",
"self",
",",
"raw_token",
")",
":",
"if",
"raw_token",
"is",
"None",
":",
"return",
"(",
"None",
",",
"None",
")",
"# Load as json first then parse as query string",
"try",
":",
"token_data",
"=",
"json",
".",
"loads",
"(",
"raw_token",
")",
"except",
"ValueError",
":",
"qs",
"=",
"parse_qs",
"(",
"raw_token",
")",
"token",
"=",
"qs",
".",
"get",
"(",
"'access_token'",
",",
"[",
"None",
"]",
")",
"[",
"0",
"]",
"else",
":",
"token",
"=",
"token_data",
".",
"get",
"(",
"'access_token'",
",",
"None",
")",
"return",
"(",
"token",
",",
"None",
")"
] | Parse token and secret from raw token response. | [
"Parse",
"token",
"and",
"secret",
"from",
"raw",
"token",
"response",
"."
] | python | train |
mensi/gittornado | gittornado/iowrapper.py | https://github.com/mensi/gittornado/blob/adf86b5537064337c806cce0e71eacaabc8bb610/gittornado/iowrapper.py#L330-L359 | def _handle_stderr_event(self, fd, events):
"""Eventhandler for stderr"""
assert fd == self.fd_stderr
if events & self.ioloop.READ:
# got data ready
if not self.headers_sent:
payload = self.process.stderr.read()
data = 'HTTP/1.1 500 Internal Server Error\r\nDate: %s\r\nContent-Length: %d\r\n\r\n' % (get_date_header(), len(payload))
self.headers_sent = True
data += payload
else:
# see stdout
logger.error("This should not happen (stderr)")
data = self.process.stderr.read()
logger.debug('Sending stderr to client: %r', data)
self.request.write(data)
if events & self.ioloop.ERROR:
logger.debug('Error on stderr')
# ensure file is closed
if not self.process.stderr.closed:
self.process.stderr.close()
# remove handler
self.ioloop.remove_handler(self.fd_stderr)
# if all fds are closed, we can finish
return self._graceful_finish() | [
"def",
"_handle_stderr_event",
"(",
"self",
",",
"fd",
",",
"events",
")",
":",
"assert",
"fd",
"==",
"self",
".",
"fd_stderr",
"if",
"events",
"&",
"self",
".",
"ioloop",
".",
"READ",
":",
"# got data ready",
"if",
"not",
"self",
".",
"headers_sent",
":",
"payload",
"=",
"self",
".",
"process",
".",
"stderr",
".",
"read",
"(",
")",
"data",
"=",
"'HTTP/1.1 500 Internal Server Error\\r\\nDate: %s\\r\\nContent-Length: %d\\r\\n\\r\\n'",
"%",
"(",
"get_date_header",
"(",
")",
",",
"len",
"(",
"payload",
")",
")",
"self",
".",
"headers_sent",
"=",
"True",
"data",
"+=",
"payload",
"else",
":",
"# see stdout",
"logger",
".",
"error",
"(",
"\"This should not happen (stderr)\"",
")",
"data",
"=",
"self",
".",
"process",
".",
"stderr",
".",
"read",
"(",
")",
"logger",
".",
"debug",
"(",
"'Sending stderr to client: %r'",
",",
"data",
")",
"self",
".",
"request",
".",
"write",
"(",
"data",
")",
"if",
"events",
"&",
"self",
".",
"ioloop",
".",
"ERROR",
":",
"logger",
".",
"debug",
"(",
"'Error on stderr'",
")",
"# ensure file is closed",
"if",
"not",
"self",
".",
"process",
".",
"stderr",
".",
"closed",
":",
"self",
".",
"process",
".",
"stderr",
".",
"close",
"(",
")",
"# remove handler",
"self",
".",
"ioloop",
".",
"remove_handler",
"(",
"self",
".",
"fd_stderr",
")",
"# if all fds are closed, we can finish",
"return",
"self",
".",
"_graceful_finish",
"(",
")"
] | Eventhandler for stderr | [
"Eventhandler",
"for",
"stderr"
] | python | train |
dask/dask-kubernetes | dask_kubernetes/core.py | https://github.com/dask/dask-kubernetes/blob/8a4883ecd902460b446bb1f43ed97efe398a135e/dask_kubernetes/core.py#L522-L535 | def _namespace_default():
"""
Get current namespace if running in a k8s cluster
If not in a k8s cluster with service accounts enabled, default to
'default'
Taken from https://github.com/jupyterhub/kubespawner/blob/master/kubespawner/spawner.py#L125
"""
ns_path = '/var/run/secrets/kubernetes.io/serviceaccount/namespace'
if os.path.exists(ns_path):
with open(ns_path) as f:
return f.read().strip()
return 'default' | [
"def",
"_namespace_default",
"(",
")",
":",
"ns_path",
"=",
"'/var/run/secrets/kubernetes.io/serviceaccount/namespace'",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"ns_path",
")",
":",
"with",
"open",
"(",
"ns_path",
")",
"as",
"f",
":",
"return",
"f",
".",
"read",
"(",
")",
".",
"strip",
"(",
")",
"return",
"'default'"
] | Get current namespace if running in a k8s cluster
If not in a k8s cluster with service accounts enabled, default to
'default'
Taken from https://github.com/jupyterhub/kubespawner/blob/master/kubespawner/spawner.py#L125 | [
"Get",
"current",
"namespace",
"if",
"running",
"in",
"a",
"k8s",
"cluster"
] | python | train |
MaxStrange/AudioSegment | audiosegment.py | https://github.com/MaxStrange/AudioSegment/blob/1daefb8de626ddff3ff7016697c3ad31d262ecd6/audiosegment.py#L409-L450 | def detect_voice(self, prob_detect_voice=0.5):
"""
Returns self as a list of tuples:
[('v', voiced segment), ('u', unvoiced segment), (etc.)]
The overall order of the AudioSegment is preserved.
:param prob_detect_voice: The raw probability that any random 20ms window of the audio file
contains voice.
:returns: The described list.
"""
assert self.frame_rate in (48000, 32000, 16000, 8000), "Try resampling to one of the allowed frame rates."
assert self.sample_width == 2, "Try resampling to 16 bit."
assert self.channels == 1, "Try resampling to one channel."
class model_class:
def __init__(self, aggressiveness):
self.v = webrtcvad.Vad(int(aggressiveness))
def predict(self, vector):
if self.v.is_speech(vector.raw_data, vector.frame_rate):
return 1
else:
return 0
model = model_class(aggressiveness=2)
pyesno = 0.3 # Probability of the next 20 ms being unvoiced given that this 20 ms was voiced
pnoyes = 0.2 # Probability of the next 20 ms being voiced given that this 20 ms was unvoiced
p_realyes_outputyes = 0.4 # WebRTCVAD has a very high FP rate - just because it says yes, doesn't mean much
p_realyes_outputno = 0.05 # If it says no, we can be very certain that it really is a no
p_yes_raw = prob_detect_voice
filtered = self.detect_event(model=model,
ms_per_input=20,
transition_matrix=(pyesno, pnoyes),
model_stats=(p_realyes_outputyes, p_realyes_outputno),
event_length_s=0.25,
prob_raw_yes=p_yes_raw)
ret = []
for tup in filtered:
t = ('v', tup[1]) if tup[0] == 'y' else ('u', tup[1])
ret.append(t)
return ret | [
"def",
"detect_voice",
"(",
"self",
",",
"prob_detect_voice",
"=",
"0.5",
")",
":",
"assert",
"self",
".",
"frame_rate",
"in",
"(",
"48000",
",",
"32000",
",",
"16000",
",",
"8000",
")",
",",
"\"Try resampling to one of the allowed frame rates.\"",
"assert",
"self",
".",
"sample_width",
"==",
"2",
",",
"\"Try resampling to 16 bit.\"",
"assert",
"self",
".",
"channels",
"==",
"1",
",",
"\"Try resampling to one channel.\"",
"class",
"model_class",
":",
"def",
"__init__",
"(",
"self",
",",
"aggressiveness",
")",
":",
"self",
".",
"v",
"=",
"webrtcvad",
".",
"Vad",
"(",
"int",
"(",
"aggressiveness",
")",
")",
"def",
"predict",
"(",
"self",
",",
"vector",
")",
":",
"if",
"self",
".",
"v",
".",
"is_speech",
"(",
"vector",
".",
"raw_data",
",",
"vector",
".",
"frame_rate",
")",
":",
"return",
"1",
"else",
":",
"return",
"0",
"model",
"=",
"model_class",
"(",
"aggressiveness",
"=",
"2",
")",
"pyesno",
"=",
"0.3",
"# Probability of the next 20 ms being unvoiced given that this 20 ms was voiced",
"pnoyes",
"=",
"0.2",
"# Probability of the next 20 ms being voiced given that this 20 ms was unvoiced",
"p_realyes_outputyes",
"=",
"0.4",
"# WebRTCVAD has a very high FP rate - just because it says yes, doesn't mean much",
"p_realyes_outputno",
"=",
"0.05",
"# If it says no, we can be very certain that it really is a no",
"p_yes_raw",
"=",
"prob_detect_voice",
"filtered",
"=",
"self",
".",
"detect_event",
"(",
"model",
"=",
"model",
",",
"ms_per_input",
"=",
"20",
",",
"transition_matrix",
"=",
"(",
"pyesno",
",",
"pnoyes",
")",
",",
"model_stats",
"=",
"(",
"p_realyes_outputyes",
",",
"p_realyes_outputno",
")",
",",
"event_length_s",
"=",
"0.25",
",",
"prob_raw_yes",
"=",
"p_yes_raw",
")",
"ret",
"=",
"[",
"]",
"for",
"tup",
"in",
"filtered",
":",
"t",
"=",
"(",
"'v'",
",",
"tup",
"[",
"1",
"]",
")",
"if",
"tup",
"[",
"0",
"]",
"==",
"'y'",
"else",
"(",
"'u'",
",",
"tup",
"[",
"1",
"]",
")",
"ret",
".",
"append",
"(",
"t",
")",
"return",
"ret"
] | Returns self as a list of tuples:
[('v', voiced segment), ('u', unvoiced segment), (etc.)]
The overall order of the AudioSegment is preserved.
:param prob_detect_voice: The raw probability that any random 20ms window of the audio file
contains voice.
:returns: The described list. | [
"Returns",
"self",
"as",
"a",
"list",
"of",
"tuples",
":",
"[",
"(",
"v",
"voiced",
"segment",
")",
"(",
"u",
"unvoiced",
"segment",
")",
"(",
"etc",
".",
")",
"]"
] | python | test |
sxslex/capitalize-name | capitalize_name/__init__.py | https://github.com/sxslex/capitalize-name/blob/98f288a3cffaecdb8aaee5154e783ba46849bccd/capitalize_name/__init__.py#L86-L107 | def deep_unicode(s, encodings=None):
"""decode "DEEP" S using the codec registered for encoding."""
if encodings is None:
encodings = ['utf-8', 'latin-1']
if isinstance(s, (list, tuple)):
return [deep_unicode(i) for i in s]
if isinstance(s, dict):
return dict([
(deep_unicode(key), deep_unicode(s[key]))
for key in s
])
# in_dict = {}
# for key in s:
# in_dict[to_unicode(key)] = to_unicode(s[key])
# return in_dict
elif isinstance(s, str):
for encoding in encodings:
try:
return s.decode(encoding)
except:
pass
return s | [
"def",
"deep_unicode",
"(",
"s",
",",
"encodings",
"=",
"None",
")",
":",
"if",
"encodings",
"is",
"None",
":",
"encodings",
"=",
"[",
"'utf-8'",
",",
"'latin-1'",
"]",
"if",
"isinstance",
"(",
"s",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"return",
"[",
"deep_unicode",
"(",
"i",
")",
"for",
"i",
"in",
"s",
"]",
"if",
"isinstance",
"(",
"s",
",",
"dict",
")",
":",
"return",
"dict",
"(",
"[",
"(",
"deep_unicode",
"(",
"key",
")",
",",
"deep_unicode",
"(",
"s",
"[",
"key",
"]",
")",
")",
"for",
"key",
"in",
"s",
"]",
")",
"# in_dict = {}",
"# for key in s:",
"# in_dict[to_unicode(key)] = to_unicode(s[key])",
"# return in_dict",
"elif",
"isinstance",
"(",
"s",
",",
"str",
")",
":",
"for",
"encoding",
"in",
"encodings",
":",
"try",
":",
"return",
"s",
".",
"decode",
"(",
"encoding",
")",
"except",
":",
"pass",
"return",
"s"
] | decode "DEEP" S using the codec registered for encoding. | [
"decode",
"DEEP",
"S",
"using",
"the",
"codec",
"registered",
"for",
"encoding",
"."
] | python | train |
google/grumpy | third_party/stdlib/re.py | https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/re.py#L211-L221 | def escape(pattern):
"Escape all non-alphanumeric characters in pattern."
s = list(pattern)
alphanum = _alphanum
for i, c in enumerate(pattern):
if c not in alphanum:
if c == "\000":
s[i] = "\\000"
else:
s[i] = "\\" + c
return pattern[:0].join(s) | [
"def",
"escape",
"(",
"pattern",
")",
":",
"s",
"=",
"list",
"(",
"pattern",
")",
"alphanum",
"=",
"_alphanum",
"for",
"i",
",",
"c",
"in",
"enumerate",
"(",
"pattern",
")",
":",
"if",
"c",
"not",
"in",
"alphanum",
":",
"if",
"c",
"==",
"\"\\000\"",
":",
"s",
"[",
"i",
"]",
"=",
"\"\\\\000\"",
"else",
":",
"s",
"[",
"i",
"]",
"=",
"\"\\\\\"",
"+",
"c",
"return",
"pattern",
"[",
":",
"0",
"]",
".",
"join",
"(",
"s",
")"
] | Escape all non-alphanumeric characters in pattern. | [
"Escape",
"all",
"non",
"-",
"alphanumeric",
"characters",
"in",
"pattern",
"."
] | python | valid |
datajoint/datajoint-python | datajoint/expression.py | https://github.com/datajoint/datajoint-python/blob/4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c/datajoint/expression.py#L291-L339 | def restrict(self, restriction):
"""
In-place restriction. Restricts the result to a specified subset of the input.
rel.restrict(restriction) is equivalent to rel = rel & restriction or rel &= restriction
rel.restrict(Not(restriction)) is equivalent to rel = rel - restriction or rel -= restriction
The primary key of the result is unaffected.
Successive restrictions are combined as logical AND: r & a & b is equivalent to r & AndList((a, b))
Any QueryExpression, collection, or sequence other than an AndList are treated as OrLists
(logical disjunction of conditions)
Inverse restriction is accomplished by either using the subtraction operator or the Not class.
The expressions in each row equivalent:
rel & True rel
rel & False the empty entity set
rel & 'TRUE' rel
rel & 'FALSE' the empty entity set
rel - cond rel & Not(cond)
rel - 'TRUE' rel & False
rel - 'FALSE' rel
rel & AndList((cond1,cond2)) rel & cond1 & cond2
rel & AndList() rel
rel & [cond1, cond2] rel & OrList((cond1, cond2))
rel & [] rel & False
rel & None rel & False
rel & any_empty_entity_set rel & False
rel - AndList((cond1,cond2)) rel & [Not(cond1), Not(cond2)]
rel - [cond1, cond2] rel & Not(cond1) & Not(cond2)
rel - AndList() rel & False
rel - [] rel
rel - None rel
rel - any_empty_entity_set rel
When arg is another QueryExpression, the restriction rel & arg restricts rel to elements that match at least
one element in arg (hence arg is treated as an OrList).
Conversely, rel - arg restricts rel to elements that do not match any elements in arg.
Two elements match when their common attributes have equal values or when they have no common attributes.
All shared attributes must be in the primary key of either rel or arg or both or an error will be raised.
QueryExpression.restrict is the only access point that modifies restrictions. All other operators must
ultimately call restrict()
:param restriction: a sequence or an array (treated as OR list), another QueryExpression, an SQL condition
string, or an AndList.
"""
assert is_true(restriction) or not self.heading.expressions or isinstance(self, GroupBy), \
"Cannot restrict a projection with renamed attributes in place."
self.restriction.append(restriction)
return self | [
"def",
"restrict",
"(",
"self",
",",
"restriction",
")",
":",
"assert",
"is_true",
"(",
"restriction",
")",
"or",
"not",
"self",
".",
"heading",
".",
"expressions",
"or",
"isinstance",
"(",
"self",
",",
"GroupBy",
")",
",",
"\"Cannot restrict a projection with renamed attributes in place.\"",
"self",
".",
"restriction",
".",
"append",
"(",
"restriction",
")",
"return",
"self"
] | In-place restriction. Restricts the result to a specified subset of the input.
rel.restrict(restriction) is equivalent to rel = rel & restriction or rel &= restriction
rel.restrict(Not(restriction)) is equivalent to rel = rel - restriction or rel -= restriction
The primary key of the result is unaffected.
Successive restrictions are combined as logical AND: r & a & b is equivalent to r & AndList((a, b))
Any QueryExpression, collection, or sequence other than an AndList are treated as OrLists
(logical disjunction of conditions)
Inverse restriction is accomplished by either using the subtraction operator or the Not class.
The expressions in each row equivalent:
rel & True rel
rel & False the empty entity set
rel & 'TRUE' rel
rel & 'FALSE' the empty entity set
rel - cond rel & Not(cond)
rel - 'TRUE' rel & False
rel - 'FALSE' rel
rel & AndList((cond1,cond2)) rel & cond1 & cond2
rel & AndList() rel
rel & [cond1, cond2] rel & OrList((cond1, cond2))
rel & [] rel & False
rel & None rel & False
rel & any_empty_entity_set rel & False
rel - AndList((cond1,cond2)) rel & [Not(cond1), Not(cond2)]
rel - [cond1, cond2] rel & Not(cond1) & Not(cond2)
rel - AndList() rel & False
rel - [] rel
rel - None rel
rel - any_empty_entity_set rel
When arg is another QueryExpression, the restriction rel & arg restricts rel to elements that match at least
one element in arg (hence arg is treated as an OrList).
Conversely, rel - arg restricts rel to elements that do not match any elements in arg.
Two elements match when their common attributes have equal values or when they have no common attributes.
All shared attributes must be in the primary key of either rel or arg or both or an error will be raised.
QueryExpression.restrict is the only access point that modifies restrictions. All other operators must
ultimately call restrict()
:param restriction: a sequence or an array (treated as OR list), another QueryExpression, an SQL condition
string, or an AndList. | [
"In",
"-",
"place",
"restriction",
".",
"Restricts",
"the",
"result",
"to",
"a",
"specified",
"subset",
"of",
"the",
"input",
".",
"rel",
".",
"restrict",
"(",
"restriction",
")",
"is",
"equivalent",
"to",
"rel",
"=",
"rel",
"&",
"restriction",
"or",
"rel",
"&",
"=",
"restriction",
"rel",
".",
"restrict",
"(",
"Not",
"(",
"restriction",
"))",
"is",
"equivalent",
"to",
"rel",
"=",
"rel",
"-",
"restriction",
"or",
"rel",
"-",
"=",
"restriction",
"The",
"primary",
"key",
"of",
"the",
"result",
"is",
"unaffected",
".",
"Successive",
"restrictions",
"are",
"combined",
"as",
"logical",
"AND",
":",
"r",
"&",
"a",
"&",
"b",
"is",
"equivalent",
"to",
"r",
"&",
"AndList",
"((",
"a",
"b",
"))",
"Any",
"QueryExpression",
"collection",
"or",
"sequence",
"other",
"than",
"an",
"AndList",
"are",
"treated",
"as",
"OrLists",
"(",
"logical",
"disjunction",
"of",
"conditions",
")",
"Inverse",
"restriction",
"is",
"accomplished",
"by",
"either",
"using",
"the",
"subtraction",
"operator",
"or",
"the",
"Not",
"class",
"."
] | python | train |
Jajcus/pyxmpp2 | pyxmpp2/ext/legacyauth.py | https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/ext/legacyauth.py#L143-L166 | def auth_in_stage1(self,stanza):
"""Handle the first stage (<iq type='get'/>) of legacy ("plain" or
"digest") authentication.
[server only]"""
self.lock.acquire()
try:
if "plain" not in self.auth_methods and "digest" not in self.auth_methods:
iq=stanza.make_error_response("not-allowed")
self.send(iq)
return
iq=stanza.make_result_response()
q=iq.new_query("jabber:iq:auth")
q.newChild(None,"username",None)
q.newChild(None,"resource",None)
if "plain" in self.auth_methods:
q.newChild(None,"password",None)
if "digest" in self.auth_methods:
q.newChild(None,"digest",None)
self.send(iq)
iq.free()
finally:
self.lock.release() | [
"def",
"auth_in_stage1",
"(",
"self",
",",
"stanza",
")",
":",
"self",
".",
"lock",
".",
"acquire",
"(",
")",
"try",
":",
"if",
"\"plain\"",
"not",
"in",
"self",
".",
"auth_methods",
"and",
"\"digest\"",
"not",
"in",
"self",
".",
"auth_methods",
":",
"iq",
"=",
"stanza",
".",
"make_error_response",
"(",
"\"not-allowed\"",
")",
"self",
".",
"send",
"(",
"iq",
")",
"return",
"iq",
"=",
"stanza",
".",
"make_result_response",
"(",
")",
"q",
"=",
"iq",
".",
"new_query",
"(",
"\"jabber:iq:auth\"",
")",
"q",
".",
"newChild",
"(",
"None",
",",
"\"username\"",
",",
"None",
")",
"q",
".",
"newChild",
"(",
"None",
",",
"\"resource\"",
",",
"None",
")",
"if",
"\"plain\"",
"in",
"self",
".",
"auth_methods",
":",
"q",
".",
"newChild",
"(",
"None",
",",
"\"password\"",
",",
"None",
")",
"if",
"\"digest\"",
"in",
"self",
".",
"auth_methods",
":",
"q",
".",
"newChild",
"(",
"None",
",",
"\"digest\"",
",",
"None",
")",
"self",
".",
"send",
"(",
"iq",
")",
"iq",
".",
"free",
"(",
")",
"finally",
":",
"self",
".",
"lock",
".",
"release",
"(",
")"
] | Handle the first stage (<iq type='get'/>) of legacy ("plain" or
"digest") authentication.
[server only] | [
"Handle",
"the",
"first",
"stage",
"(",
"<iq",
"type",
"=",
"get",
"/",
">",
")",
"of",
"legacy",
"(",
"plain",
"or",
"digest",
")",
"authentication",
"."
] | python | valid |
gbowerman/azurerm | azurerm/amsrp.py | https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/amsrp.py#L460-L478 | def create_sas_locator(access_token, asset_id, accesspolicy_id):
'''Create Media Service SAS Locator.
Args:
access_token (str): A valid Azure authentication token.
asset_id (str): Media Service Asset ID.
accesspolicy_id (str): Media Service Access Policy ID.
Returns:
HTTP response. JSON body.
'''
path = '/Locators'
endpoint = ''.join([ams_rest_endpoint, path])
body = '{ \
"AccessPolicyId":"' + accesspolicy_id + '", \
"AssetId":"' + asset_id + '", \
"Type":1 \
}'
return do_ams_post(endpoint, path, body, access_token) | [
"def",
"create_sas_locator",
"(",
"access_token",
",",
"asset_id",
",",
"accesspolicy_id",
")",
":",
"path",
"=",
"'/Locators'",
"endpoint",
"=",
"''",
".",
"join",
"(",
"[",
"ams_rest_endpoint",
",",
"path",
"]",
")",
"body",
"=",
"'{ \\\n\t\t\"AccessPolicyId\":\"'",
"+",
"accesspolicy_id",
"+",
"'\", \\\n\t\t\"AssetId\":\"'",
"+",
"asset_id",
"+",
"'\", \\\n\t\t\"Type\":1 \\\n\t}'",
"return",
"do_ams_post",
"(",
"endpoint",
",",
"path",
",",
"body",
",",
"access_token",
")"
] | Create Media Service SAS Locator.
Args:
access_token (str): A valid Azure authentication token.
asset_id (str): Media Service Asset ID.
accesspolicy_id (str): Media Service Access Policy ID.
Returns:
HTTP response. JSON body. | [
"Create",
"Media",
"Service",
"SAS",
"Locator",
"."
] | python | train |
saltstack/salt | salt/states/file.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/file.py#L8038-L8313 | def cached(name,
source_hash='',
source_hash_name=None,
skip_verify=False,
saltenv='base'):
'''
.. versionadded:: 2017.7.3
Ensures that a file is saved to the minion's cache. This state is primarily
invoked by other states to ensure that we do not re-download a source file
if we do not need to.
name
The URL of the file to be cached. To cache a file from an environment
other than ``base``, either use the ``saltenv`` argument or include the
saltenv in the URL (e.g. ``salt://path/to/file.conf?saltenv=dev``).
.. note::
A list of URLs is not supported, this must be a single URL. If a
local file is passed here, then the state will obviously not try to
download anything, but it will compare a hash if one is specified.
source_hash
See the documentation for this same argument in the
:py:func:`file.managed <salt.states.file.managed>` state.
.. note::
For remote files not originating from the ``salt://`` fileserver,
such as http(s) or ftp servers, this state will not re-download the
file if the locally-cached copy matches this hash. This is done to
prevent unnecessary downloading on repeated runs of this state. To
update the cached copy of a file, it is necessary to update this
hash.
source_hash_name
See the documentation for this same argument in the
:py:func:`file.managed <salt.states.file.managed>` state.
skip_verify
See the documentation for this same argument in the
:py:func:`file.managed <salt.states.file.managed>` state.
.. note::
Setting this to ``True`` will result in a copy of the file being
downloaded from a remote (http(s), ftp, etc.) source each time the
state is run.
saltenv
Used to specify the environment from which to download a file from the
Salt fileserver (i.e. those with ``salt://`` URL).
This state will in most cases not be useful in SLS files, but it is useful
when writing a state or remote-execution module that needs to make sure
that a file at a given URL has been downloaded to the cachedir. One example
of this is in the :py:func:`archive.extracted <salt.states.file.extracted>`
state:
.. code-block:: python
result = __states__['file.cached'](source_match,
source_hash=source_hash,
source_hash_name=source_hash_name,
skip_verify=skip_verify,
saltenv=__env__)
This will return a dictionary containing the state's return data, including
a ``result`` key which will state whether or not the state was successful.
Note that this will not catch exceptions, so it is best used within a
try/except.
Once this state has been run from within another state or remote-execution
module, the actual location of the cached file can be obtained using
:py:func:`cp.is_cached <salt.modules.cp.is_cached>`:
.. code-block:: python
cached = __salt__['cp.is_cached'](source_match, saltenv=__env__)
This function will return the cached path of the file, or an empty string
if the file is not present in the minion cache.
'''
ret = {'changes': {},
'comment': '',
'name': name,
'result': False}
try:
parsed = _urlparse(name)
except Exception:
ret['comment'] = 'Only URLs or local file paths are valid input'
return ret
# This if statement will keep the state from proceeding if a remote source
# is specified and no source_hash is presented (unless we're skipping hash
# verification).
if not skip_verify \
and not source_hash \
and parsed.scheme in salt.utils.files.REMOTE_PROTOS:
ret['comment'] = (
'Unable to verify upstream hash of source file {0}, please set '
'source_hash or set skip_verify to True'.format(
salt.utils.url.redact_http_basic_auth(name))
)
return ret
if source_hash:
# Get the hash and hash type from the input. This takes care of parsing
# the hash out of a file containing checksums, if that is how the
# source_hash was specified.
try:
source_sum = __salt__['file.get_source_sum'](
source=name,
source_hash=source_hash,
source_hash_name=source_hash_name,
saltenv=saltenv)
except CommandExecutionError as exc:
ret['comment'] = exc.strerror
return ret
else:
if not source_sum:
# We shouldn't get here, problems in retrieving the hash in
# file.get_source_sum should result in a CommandExecutionError
# being raised, which we catch above. Nevertheless, we should
# provide useful information in the event that
# file.get_source_sum regresses.
ret['comment'] = (
'Failed to get source hash from {0}. This may be a bug. '
'If this error persists, please report it and set '
'skip_verify to True to work around it.'.format(source_hash)
)
return ret
else:
source_sum = {}
if parsed.scheme in salt.utils.files.LOCAL_PROTOS:
# Source is a local file path
full_path = os.path.realpath(os.path.expanduser(parsed.path))
if os.path.exists(full_path):
if not skip_verify and source_sum:
# Enforce the hash
local_hash = __salt__['file.get_hash'](
full_path,
source_sum.get('hash_type', __opts__['hash_type']))
if local_hash == source_sum['hsum']:
ret['result'] = True
ret['comment'] = (
'File {0} is present on the minion and has hash '
'{1}'.format(full_path, local_hash)
)
else:
ret['comment'] = (
'File {0} is present on the minion, but the hash ({1}) '
'does not match the specified hash ({2})'.format(
full_path, local_hash, source_sum['hsum']
)
)
return ret
else:
ret['result'] = True
ret['comment'] = 'File {0} is present on the minion'.format(
full_path
)
return ret
else:
ret['comment'] = 'File {0} is not present on the minion'.format(
full_path
)
return ret
local_copy = __salt__['cp.is_cached'](name, saltenv=saltenv)
if local_copy:
# File is already cached
pre_hash = __salt__['file.get_hash'](
local_copy,
source_sum.get('hash_type', __opts__['hash_type']))
if not skip_verify and source_sum:
# Get the local copy's hash to compare with the hash that was
# specified via source_hash. If it matches, we can exit early from
# the state without going any further, because the file is cached
# with the correct hash.
if pre_hash == source_sum['hsum']:
ret['result'] = True
ret['comment'] = (
'File is already cached to {0} with hash {1}'.format(
local_copy, pre_hash
)
)
else:
pre_hash = None
def _try_cache(path, checksum):
'''
This helper is not needed anymore in develop as the fileclient in the
develop branch now has means of skipping a download if the existing
hash matches one passed to cp.cache_file. Remove this helper and the
code that invokes it, once we have merged forward into develop.
'''
if not path or not checksum:
return True
form = salt.utils.files.HASHES_REVMAP.get(len(checksum))
if form is None:
# Shouldn't happen, an invalid checksum length should be caught
# before we get here. But in the event this gets through, don't let
# it cause any trouble, and just return True.
return True
try:
return salt.utils.hashutils.get_hash(path, form=form) != checksum
except (IOError, OSError, ValueError):
# Again, shouldn't happen, but don't let invalid input/permissions
# in the call to get_hash blow this up.
return True
# Cache the file. Note that this will not actually download the file if
# either of the following is true:
# 1. source is a salt:// URL and the fileserver determines that the hash
# of the minion's copy matches that of the fileserver.
# 2. File is remote (http(s), ftp, etc.) and the specified source_hash
# matches the cached copy.
# Remote, non salt:// sources _will_ download if a copy of the file was
# not already present in the minion cache.
if _try_cache(local_copy, source_sum.get('hsum')):
# The _try_cache helper is obsolete in the develop branch. Once merged
# forward, remove the helper as well as this if statement, and dedent
# the below block.
try:
local_copy = __salt__['cp.cache_file'](
name,
saltenv=saltenv,
source_hash=source_sum.get('hsum'))
except Exception as exc:
ret['comment'] = salt.utils.url.redact_http_basic_auth(exc.__str__())
return ret
if not local_copy:
ret['comment'] = (
'Failed to cache {0}, check minion log for more '
'information'.format(
salt.utils.url.redact_http_basic_auth(name))
)
return ret
post_hash = __salt__['file.get_hash'](
local_copy,
source_sum.get('hash_type', __opts__['hash_type']))
if pre_hash != post_hash:
ret['changes']['hash'] = {'old': pre_hash, 'new': post_hash}
# Check the hash, if we're enforcing one. Note that this will be the first
# hash check if the file was not previously cached, and the 2nd hash check
# if it was cached and the
if not skip_verify and source_sum:
if post_hash == source_sum['hsum']:
ret['result'] = True
ret['comment'] = (
'File is already cached to {0} with hash {1}'.format(
local_copy, post_hash
)
)
else:
ret['comment'] = (
'File is cached to {0}, but the hash ({1}) does not match '
'the specified hash ({2})'.format(
local_copy, post_hash, source_sum['hsum']
)
)
return ret
# We're not enforcing a hash, and we already know that the file was
# successfully cached, so we know the state was successful.
ret['result'] = True
ret['comment'] = 'File is cached to {0}'.format(local_copy)
return ret | [
"def",
"cached",
"(",
"name",
",",
"source_hash",
"=",
"''",
",",
"source_hash_name",
"=",
"None",
",",
"skip_verify",
"=",
"False",
",",
"saltenv",
"=",
"'base'",
")",
":",
"ret",
"=",
"{",
"'changes'",
":",
"{",
"}",
",",
"'comment'",
":",
"''",
",",
"'name'",
":",
"name",
",",
"'result'",
":",
"False",
"}",
"try",
":",
"parsed",
"=",
"_urlparse",
"(",
"name",
")",
"except",
"Exception",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'Only URLs or local file paths are valid input'",
"return",
"ret",
"# This if statement will keep the state from proceeding if a remote source",
"# is specified and no source_hash is presented (unless we're skipping hash",
"# verification).",
"if",
"not",
"skip_verify",
"and",
"not",
"source_hash",
"and",
"parsed",
".",
"scheme",
"in",
"salt",
".",
"utils",
".",
"files",
".",
"REMOTE_PROTOS",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"(",
"'Unable to verify upstream hash of source file {0}, please set '",
"'source_hash or set skip_verify to True'",
".",
"format",
"(",
"salt",
".",
"utils",
".",
"url",
".",
"redact_http_basic_auth",
"(",
"name",
")",
")",
")",
"return",
"ret",
"if",
"source_hash",
":",
"# Get the hash and hash type from the input. This takes care of parsing",
"# the hash out of a file containing checksums, if that is how the",
"# source_hash was specified.",
"try",
":",
"source_sum",
"=",
"__salt__",
"[",
"'file.get_source_sum'",
"]",
"(",
"source",
"=",
"name",
",",
"source_hash",
"=",
"source_hash",
",",
"source_hash_name",
"=",
"source_hash_name",
",",
"saltenv",
"=",
"saltenv",
")",
"except",
"CommandExecutionError",
"as",
"exc",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"exc",
".",
"strerror",
"return",
"ret",
"else",
":",
"if",
"not",
"source_sum",
":",
"# We shouldn't get here, problems in retrieving the hash in",
"# file.get_source_sum should result in a CommandExecutionError",
"# being raised, which we catch above. Nevertheless, we should",
"# provide useful information in the event that",
"# file.get_source_sum regresses.",
"ret",
"[",
"'comment'",
"]",
"=",
"(",
"'Failed to get source hash from {0}. This may be a bug. '",
"'If this error persists, please report it and set '",
"'skip_verify to True to work around it.'",
".",
"format",
"(",
"source_hash",
")",
")",
"return",
"ret",
"else",
":",
"source_sum",
"=",
"{",
"}",
"if",
"parsed",
".",
"scheme",
"in",
"salt",
".",
"utils",
".",
"files",
".",
"LOCAL_PROTOS",
":",
"# Source is a local file path",
"full_path",
"=",
"os",
".",
"path",
".",
"realpath",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"parsed",
".",
"path",
")",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"full_path",
")",
":",
"if",
"not",
"skip_verify",
"and",
"source_sum",
":",
"# Enforce the hash",
"local_hash",
"=",
"__salt__",
"[",
"'file.get_hash'",
"]",
"(",
"full_path",
",",
"source_sum",
".",
"get",
"(",
"'hash_type'",
",",
"__opts__",
"[",
"'hash_type'",
"]",
")",
")",
"if",
"local_hash",
"==",
"source_sum",
"[",
"'hsum'",
"]",
":",
"ret",
"[",
"'result'",
"]",
"=",
"True",
"ret",
"[",
"'comment'",
"]",
"=",
"(",
"'File {0} is present on the minion and has hash '",
"'{1}'",
".",
"format",
"(",
"full_path",
",",
"local_hash",
")",
")",
"else",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"(",
"'File {0} is present on the minion, but the hash ({1}) '",
"'does not match the specified hash ({2})'",
".",
"format",
"(",
"full_path",
",",
"local_hash",
",",
"source_sum",
"[",
"'hsum'",
"]",
")",
")",
"return",
"ret",
"else",
":",
"ret",
"[",
"'result'",
"]",
"=",
"True",
"ret",
"[",
"'comment'",
"]",
"=",
"'File {0} is present on the minion'",
".",
"format",
"(",
"full_path",
")",
"return",
"ret",
"else",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'File {0} is not present on the minion'",
".",
"format",
"(",
"full_path",
")",
"return",
"ret",
"local_copy",
"=",
"__salt__",
"[",
"'cp.is_cached'",
"]",
"(",
"name",
",",
"saltenv",
"=",
"saltenv",
")",
"if",
"local_copy",
":",
"# File is already cached",
"pre_hash",
"=",
"__salt__",
"[",
"'file.get_hash'",
"]",
"(",
"local_copy",
",",
"source_sum",
".",
"get",
"(",
"'hash_type'",
",",
"__opts__",
"[",
"'hash_type'",
"]",
")",
")",
"if",
"not",
"skip_verify",
"and",
"source_sum",
":",
"# Get the local copy's hash to compare with the hash that was",
"# specified via source_hash. If it matches, we can exit early from",
"# the state without going any further, because the file is cached",
"# with the correct hash.",
"if",
"pre_hash",
"==",
"source_sum",
"[",
"'hsum'",
"]",
":",
"ret",
"[",
"'result'",
"]",
"=",
"True",
"ret",
"[",
"'comment'",
"]",
"=",
"(",
"'File is already cached to {0} with hash {1}'",
".",
"format",
"(",
"local_copy",
",",
"pre_hash",
")",
")",
"else",
":",
"pre_hash",
"=",
"None",
"def",
"_try_cache",
"(",
"path",
",",
"checksum",
")",
":",
"'''\n This helper is not needed anymore in develop as the fileclient in the\n develop branch now has means of skipping a download if the existing\n hash matches one passed to cp.cache_file. Remove this helper and the\n code that invokes it, once we have merged forward into develop.\n '''",
"if",
"not",
"path",
"or",
"not",
"checksum",
":",
"return",
"True",
"form",
"=",
"salt",
".",
"utils",
".",
"files",
".",
"HASHES_REVMAP",
".",
"get",
"(",
"len",
"(",
"checksum",
")",
")",
"if",
"form",
"is",
"None",
":",
"# Shouldn't happen, an invalid checksum length should be caught",
"# before we get here. But in the event this gets through, don't let",
"# it cause any trouble, and just return True.",
"return",
"True",
"try",
":",
"return",
"salt",
".",
"utils",
".",
"hashutils",
".",
"get_hash",
"(",
"path",
",",
"form",
"=",
"form",
")",
"!=",
"checksum",
"except",
"(",
"IOError",
",",
"OSError",
",",
"ValueError",
")",
":",
"# Again, shouldn't happen, but don't let invalid input/permissions",
"# in the call to get_hash blow this up.",
"return",
"True",
"# Cache the file. Note that this will not actually download the file if",
"# either of the following is true:",
"# 1. source is a salt:// URL and the fileserver determines that the hash",
"# of the minion's copy matches that of the fileserver.",
"# 2. File is remote (http(s), ftp, etc.) and the specified source_hash",
"# matches the cached copy.",
"# Remote, non salt:// sources _will_ download if a copy of the file was",
"# not already present in the minion cache.",
"if",
"_try_cache",
"(",
"local_copy",
",",
"source_sum",
".",
"get",
"(",
"'hsum'",
")",
")",
":",
"# The _try_cache helper is obsolete in the develop branch. Once merged",
"# forward, remove the helper as well as this if statement, and dedent",
"# the below block.",
"try",
":",
"local_copy",
"=",
"__salt__",
"[",
"'cp.cache_file'",
"]",
"(",
"name",
",",
"saltenv",
"=",
"saltenv",
",",
"source_hash",
"=",
"source_sum",
".",
"get",
"(",
"'hsum'",
")",
")",
"except",
"Exception",
"as",
"exc",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"salt",
".",
"utils",
".",
"url",
".",
"redact_http_basic_auth",
"(",
"exc",
".",
"__str__",
"(",
")",
")",
"return",
"ret",
"if",
"not",
"local_copy",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"(",
"'Failed to cache {0}, check minion log for more '",
"'information'",
".",
"format",
"(",
"salt",
".",
"utils",
".",
"url",
".",
"redact_http_basic_auth",
"(",
"name",
")",
")",
")",
"return",
"ret",
"post_hash",
"=",
"__salt__",
"[",
"'file.get_hash'",
"]",
"(",
"local_copy",
",",
"source_sum",
".",
"get",
"(",
"'hash_type'",
",",
"__opts__",
"[",
"'hash_type'",
"]",
")",
")",
"if",
"pre_hash",
"!=",
"post_hash",
":",
"ret",
"[",
"'changes'",
"]",
"[",
"'hash'",
"]",
"=",
"{",
"'old'",
":",
"pre_hash",
",",
"'new'",
":",
"post_hash",
"}",
"# Check the hash, if we're enforcing one. Note that this will be the first",
"# hash check if the file was not previously cached, and the 2nd hash check",
"# if it was cached and the",
"if",
"not",
"skip_verify",
"and",
"source_sum",
":",
"if",
"post_hash",
"==",
"source_sum",
"[",
"'hsum'",
"]",
":",
"ret",
"[",
"'result'",
"]",
"=",
"True",
"ret",
"[",
"'comment'",
"]",
"=",
"(",
"'File is already cached to {0} with hash {1}'",
".",
"format",
"(",
"local_copy",
",",
"post_hash",
")",
")",
"else",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"(",
"'File is cached to {0}, but the hash ({1}) does not match '",
"'the specified hash ({2})'",
".",
"format",
"(",
"local_copy",
",",
"post_hash",
",",
"source_sum",
"[",
"'hsum'",
"]",
")",
")",
"return",
"ret",
"# We're not enforcing a hash, and we already know that the file was",
"# successfully cached, so we know the state was successful.",
"ret",
"[",
"'result'",
"]",
"=",
"True",
"ret",
"[",
"'comment'",
"]",
"=",
"'File is cached to {0}'",
".",
"format",
"(",
"local_copy",
")",
"return",
"ret"
] | .. versionadded:: 2017.7.3
Ensures that a file is saved to the minion's cache. This state is primarily
invoked by other states to ensure that we do not re-download a source file
if we do not need to.
name
The URL of the file to be cached. To cache a file from an environment
other than ``base``, either use the ``saltenv`` argument or include the
saltenv in the URL (e.g. ``salt://path/to/file.conf?saltenv=dev``).
.. note::
A list of URLs is not supported, this must be a single URL. If a
local file is passed here, then the state will obviously not try to
download anything, but it will compare a hash if one is specified.
source_hash
See the documentation for this same argument in the
:py:func:`file.managed <salt.states.file.managed>` state.
.. note::
For remote files not originating from the ``salt://`` fileserver,
such as http(s) or ftp servers, this state will not re-download the
file if the locally-cached copy matches this hash. This is done to
prevent unnecessary downloading on repeated runs of this state. To
update the cached copy of a file, it is necessary to update this
hash.
source_hash_name
See the documentation for this same argument in the
:py:func:`file.managed <salt.states.file.managed>` state.
skip_verify
See the documentation for this same argument in the
:py:func:`file.managed <salt.states.file.managed>` state.
.. note::
Setting this to ``True`` will result in a copy of the file being
downloaded from a remote (http(s), ftp, etc.) source each time the
state is run.
saltenv
Used to specify the environment from which to download a file from the
Salt fileserver (i.e. those with ``salt://`` URL).
This state will in most cases not be useful in SLS files, but it is useful
when writing a state or remote-execution module that needs to make sure
that a file at a given URL has been downloaded to the cachedir. One example
of this is in the :py:func:`archive.extracted <salt.states.file.extracted>`
state:
.. code-block:: python
result = __states__['file.cached'](source_match,
source_hash=source_hash,
source_hash_name=source_hash_name,
skip_verify=skip_verify,
saltenv=__env__)
This will return a dictionary containing the state's return data, including
a ``result`` key which will state whether or not the state was successful.
Note that this will not catch exceptions, so it is best used within a
try/except.
Once this state has been run from within another state or remote-execution
module, the actual location of the cached file can be obtained using
:py:func:`cp.is_cached <salt.modules.cp.is_cached>`:
.. code-block:: python
cached = __salt__['cp.is_cached'](source_match, saltenv=__env__)
This function will return the cached path of the file, or an empty string
if the file is not present in the minion cache. | [
"..",
"versionadded",
"::",
"2017",
".",
"7",
".",
"3"
] | python | train |
Legobot/Legobot | Legobot/Connectors/Discord.py | https://github.com/Legobot/Legobot/blob/d13da172960a149681cb5151ce34b2f3a58ad32b/Legobot/Connectors/Discord.py#L194-L219 | def _parse_metadata(self, message):
"""
Sets metadata in Legobot message
Args:
message (dict): Full message from Discord websocket connection"
Returns:
Legobot.Metadata
"""
metadata = Metadata(source=self.actor_urn).__dict__
if 'author' in message['d']:
metadata['source_user'] = message['d']['author']['username']
else:
metadata['source_user'] = None
if 'channel_id' in message['d']:
metadata['source_channel'] = message['d']['channel_id']
else:
metadata['source_channel'] = None
metadata['user_id'] = metadata['source_user']
metadata['display_name'] = metadata['source_user']
metadata['source_connector'] = 'discord'
return metadata | [
"def",
"_parse_metadata",
"(",
"self",
",",
"message",
")",
":",
"metadata",
"=",
"Metadata",
"(",
"source",
"=",
"self",
".",
"actor_urn",
")",
".",
"__dict__",
"if",
"'author'",
"in",
"message",
"[",
"'d'",
"]",
":",
"metadata",
"[",
"'source_user'",
"]",
"=",
"message",
"[",
"'d'",
"]",
"[",
"'author'",
"]",
"[",
"'username'",
"]",
"else",
":",
"metadata",
"[",
"'source_user'",
"]",
"=",
"None",
"if",
"'channel_id'",
"in",
"message",
"[",
"'d'",
"]",
":",
"metadata",
"[",
"'source_channel'",
"]",
"=",
"message",
"[",
"'d'",
"]",
"[",
"'channel_id'",
"]",
"else",
":",
"metadata",
"[",
"'source_channel'",
"]",
"=",
"None",
"metadata",
"[",
"'user_id'",
"]",
"=",
"metadata",
"[",
"'source_user'",
"]",
"metadata",
"[",
"'display_name'",
"]",
"=",
"metadata",
"[",
"'source_user'",
"]",
"metadata",
"[",
"'source_connector'",
"]",
"=",
"'discord'",
"return",
"metadata"
] | Sets metadata in Legobot message
Args:
message (dict): Full message from Discord websocket connection"
Returns:
Legobot.Metadata | [
"Sets",
"metadata",
"in",
"Legobot",
"message"
] | python | train |
xtuml/pyxtuml | xtuml/persist.py | https://github.com/xtuml/pyxtuml/blob/7dd9343b9a0191d1db1887ab9288d0a026608d9a/xtuml/persist.py#L211-L223 | def persist_unique_identifiers(metamodel, path, mode='w'):
'''
Persist all unique identifiers in a *metamodel* by serializing them and
saving to a *path* on disk.
'''
with open(path, mode) as f:
for metaclass in metamodel.metaclasses.values():
for index_name, attribute_names in metaclass.indices.items():
attribute_names = ', '.join(attribute_names)
s = 'CREATE UNIQUE INDEX %s ON %s (%s);\n' % (index_name,
metaclass.kind,
attribute_names)
f.write(s) | [
"def",
"persist_unique_identifiers",
"(",
"metamodel",
",",
"path",
",",
"mode",
"=",
"'w'",
")",
":",
"with",
"open",
"(",
"path",
",",
"mode",
")",
"as",
"f",
":",
"for",
"metaclass",
"in",
"metamodel",
".",
"metaclasses",
".",
"values",
"(",
")",
":",
"for",
"index_name",
",",
"attribute_names",
"in",
"metaclass",
".",
"indices",
".",
"items",
"(",
")",
":",
"attribute_names",
"=",
"', '",
".",
"join",
"(",
"attribute_names",
")",
"s",
"=",
"'CREATE UNIQUE INDEX %s ON %s (%s);\\n'",
"%",
"(",
"index_name",
",",
"metaclass",
".",
"kind",
",",
"attribute_names",
")",
"f",
".",
"write",
"(",
"s",
")"
] | Persist all unique identifiers in a *metamodel* by serializing them and
saving to a *path* on disk. | [
"Persist",
"all",
"unique",
"identifiers",
"in",
"a",
"*",
"metamodel",
"*",
"by",
"serializing",
"them",
"and",
"saving",
"to",
"a",
"*",
"path",
"*",
"on",
"disk",
"."
] | python | test |
log2timeline/plaso | docs/conf.py | https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/docs/conf.py#L388-L407 | def find_and_replace(self, node):
"""Parses URIs containing .md and replaces them with their HTML page.
Args:
node(node): docutils node.
Returns:
node: docutils node.
"""
if isinstance(node, nodes.reference) and 'refuri' in node:
reference_uri = node['refuri']
if reference_uri.endswith('.md') and not reference_uri.startswith('http'):
reference_uri = reference_uri[:-3] + '.html'
node['refuri'] = reference_uri
else:
match = self.ANCHOR_REGEX.match(reference_uri)
if match:
node['refuri'] = '{0:s}.html#{1:s}'.format(
match.group('uri'), match.group('anchor'))
return node | [
"def",
"find_and_replace",
"(",
"self",
",",
"node",
")",
":",
"if",
"isinstance",
"(",
"node",
",",
"nodes",
".",
"reference",
")",
"and",
"'refuri'",
"in",
"node",
":",
"reference_uri",
"=",
"node",
"[",
"'refuri'",
"]",
"if",
"reference_uri",
".",
"endswith",
"(",
"'.md'",
")",
"and",
"not",
"reference_uri",
".",
"startswith",
"(",
"'http'",
")",
":",
"reference_uri",
"=",
"reference_uri",
"[",
":",
"-",
"3",
"]",
"+",
"'.html'",
"node",
"[",
"'refuri'",
"]",
"=",
"reference_uri",
"else",
":",
"match",
"=",
"self",
".",
"ANCHOR_REGEX",
".",
"match",
"(",
"reference_uri",
")",
"if",
"match",
":",
"node",
"[",
"'refuri'",
"]",
"=",
"'{0:s}.html#{1:s}'",
".",
"format",
"(",
"match",
".",
"group",
"(",
"'uri'",
")",
",",
"match",
".",
"group",
"(",
"'anchor'",
")",
")",
"return",
"node"
] | Parses URIs containing .md and replaces them with their HTML page.
Args:
node(node): docutils node.
Returns:
node: docutils node. | [
"Parses",
"URIs",
"containing",
".",
"md",
"and",
"replaces",
"them",
"with",
"their",
"HTML",
"page",
"."
] | python | train |
lesscpy/lesscpy | lesscpy/lessc/color.py | https://github.com/lesscpy/lesscpy/blob/51e392fb4a3cd4ccfb6175e0e42ce7d2f6b78126/lesscpy/lessc/color.py#L148-L163 | def hsl(self, *args):
""" Translate hsl(...) to color string
raises:
ValueError
returns:
str
"""
if len(args) == 4:
return self.hsla(*args)
elif len(args) == 3:
h, s, l = args
rgb = colorsys.hls_to_rgb(
int(h) / 360.0, utility.pc_or_float(l), utility.pc_or_float(s))
color = (utility.convergent_round(c * 255) for c in rgb)
return self._rgbatohex(color)
raise ValueError('Illegal color values') | [
"def",
"hsl",
"(",
"self",
",",
"*",
"args",
")",
":",
"if",
"len",
"(",
"args",
")",
"==",
"4",
":",
"return",
"self",
".",
"hsla",
"(",
"*",
"args",
")",
"elif",
"len",
"(",
"args",
")",
"==",
"3",
":",
"h",
",",
"s",
",",
"l",
"=",
"args",
"rgb",
"=",
"colorsys",
".",
"hls_to_rgb",
"(",
"int",
"(",
"h",
")",
"/",
"360.0",
",",
"utility",
".",
"pc_or_float",
"(",
"l",
")",
",",
"utility",
".",
"pc_or_float",
"(",
"s",
")",
")",
"color",
"=",
"(",
"utility",
".",
"convergent_round",
"(",
"c",
"*",
"255",
")",
"for",
"c",
"in",
"rgb",
")",
"return",
"self",
".",
"_rgbatohex",
"(",
"color",
")",
"raise",
"ValueError",
"(",
"'Illegal color values'",
")"
] | Translate hsl(...) to color string
raises:
ValueError
returns:
str | [
"Translate",
"hsl",
"(",
"...",
")",
"to",
"color",
"string",
"raises",
":",
"ValueError",
"returns",
":",
"str"
] | python | valid |
mitsei/dlkit | dlkit/handcar/repository/managers.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/handcar/repository/managers.py#L1441-L1464 | def get_composition_admin_session(self):
"""Gets a composition administration session for creating, updating
and deleting compositions.
return: (osid.repository.CompositionAdminSession) - a
CompositionAdminSession
raise: OperationFailed - unable to complete request
raise: Unimplemented - supports_composition_admin() is false
compliance: optional - This method must be implemented if
supports_composition_admin() is true.
"""
if not self.supports_composition_admin():
raise Unimplemented()
try:
from . import sessions
except ImportError:
raise # OperationFailed()
try:
session = sessions.CompositionAdminSession(proxy=self._proxy,
runtime=self._runtime)
except AttributeError:
raise # OperationFailed()
return session | [
"def",
"get_composition_admin_session",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"supports_composition_admin",
"(",
")",
":",
"raise",
"Unimplemented",
"(",
")",
"try",
":",
"from",
".",
"import",
"sessions",
"except",
"ImportError",
":",
"raise",
"# OperationFailed()",
"try",
":",
"session",
"=",
"sessions",
".",
"CompositionAdminSession",
"(",
"proxy",
"=",
"self",
".",
"_proxy",
",",
"runtime",
"=",
"self",
".",
"_runtime",
")",
"except",
"AttributeError",
":",
"raise",
"# OperationFailed()",
"return",
"session"
] | Gets a composition administration session for creating, updating
and deleting compositions.
return: (osid.repository.CompositionAdminSession) - a
CompositionAdminSession
raise: OperationFailed - unable to complete request
raise: Unimplemented - supports_composition_admin() is false
compliance: optional - This method must be implemented if
supports_composition_admin() is true. | [
"Gets",
"a",
"composition",
"administration",
"session",
"for",
"creating",
"updating",
"and",
"deleting",
"compositions",
"."
] | python | train |
shexSpec/grammar | parsers/python/pyshexc/parser_impl/shex_shape_expression_parser.py | https://github.com/shexSpec/grammar/blob/4497cd1f73fa6703bca6e2cb53ba9c120f22e48c/parsers/python/pyshexc/parser_impl/shex_shape_expression_parser.py#L79-L88 | def visitInlineShapeAnd(self, ctx: ShExDocParser.InlineShapeAndContext):
""" inlineShapeAnd: inlineShapeNot (KW_AND inlineShapeNot)* """
if len(ctx.inlineShapeNot()) > 1:
self.expr = ShapeAnd(id=self.label, shapeExprs=[])
for sa in ctx.inlineShapeNot():
sep = ShexShapeExpressionParser(self.context)
sep.visit(sa)
self._and_collapser(self.expr, sep.expr)
else:
self.visit(ctx.inlineShapeNot(0)) | [
"def",
"visitInlineShapeAnd",
"(",
"self",
",",
"ctx",
":",
"ShExDocParser",
".",
"InlineShapeAndContext",
")",
":",
"if",
"len",
"(",
"ctx",
".",
"inlineShapeNot",
"(",
")",
")",
">",
"1",
":",
"self",
".",
"expr",
"=",
"ShapeAnd",
"(",
"id",
"=",
"self",
".",
"label",
",",
"shapeExprs",
"=",
"[",
"]",
")",
"for",
"sa",
"in",
"ctx",
".",
"inlineShapeNot",
"(",
")",
":",
"sep",
"=",
"ShexShapeExpressionParser",
"(",
"self",
".",
"context",
")",
"sep",
".",
"visit",
"(",
"sa",
")",
"self",
".",
"_and_collapser",
"(",
"self",
".",
"expr",
",",
"sep",
".",
"expr",
")",
"else",
":",
"self",
".",
"visit",
"(",
"ctx",
".",
"inlineShapeNot",
"(",
"0",
")",
")"
] | inlineShapeAnd: inlineShapeNot (KW_AND inlineShapeNot)* | [
"inlineShapeAnd",
":",
"inlineShapeNot",
"(",
"KW_AND",
"inlineShapeNot",
")",
"*"
] | python | train |
sernst/cauldron | cauldron/runner/__init__.py | https://github.com/sernst/cauldron/blob/4086aec9c038c402ea212c79fe8bd0d27104f9cf/cauldron/runner/__init__.py#L82-L123 | def reload_libraries(library_directories: list = None):
"""
Reload the libraries stored in the project's local and shared library
directories
"""
directories = library_directories or []
project = cauldron.project.get_internal_project()
if project:
directories += project.library_directories
if not directories:
return
def reload_module(path: str, library_directory: str):
path = os.path.dirname(path) if path.endswith('__init__.py') else path
start_index = len(library_directory) + 1
end_index = -3 if path.endswith('.py') else None
package_path = path[start_index:end_index]
module = sys.modules.get(package_path.replace(os.sep, '.'))
return importlib.reload(module) if module is not None else None
def reload_library(directory: str) -> list:
if not add_library_path(directory):
# If the library wasn't added because it doesn't exist, remove it
# in case the directory has recently been deleted and then return
# an empty result
remove_library_path(directory)
return []
glob_path = os.path.join(directory, '**', '*.py')
return [
reload_module(path, directory)
for path in glob.glob(glob_path, recursive=True)
]
return [
reloaded_module
for directory in directories
for reloaded_module in reload_library(directory)
if reload_module is not None
] | [
"def",
"reload_libraries",
"(",
"library_directories",
":",
"list",
"=",
"None",
")",
":",
"directories",
"=",
"library_directories",
"or",
"[",
"]",
"project",
"=",
"cauldron",
".",
"project",
".",
"get_internal_project",
"(",
")",
"if",
"project",
":",
"directories",
"+=",
"project",
".",
"library_directories",
"if",
"not",
"directories",
":",
"return",
"def",
"reload_module",
"(",
"path",
":",
"str",
",",
"library_directory",
":",
"str",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"path",
")",
"if",
"path",
".",
"endswith",
"(",
"'__init__.py'",
")",
"else",
"path",
"start_index",
"=",
"len",
"(",
"library_directory",
")",
"+",
"1",
"end_index",
"=",
"-",
"3",
"if",
"path",
".",
"endswith",
"(",
"'.py'",
")",
"else",
"None",
"package_path",
"=",
"path",
"[",
"start_index",
":",
"end_index",
"]",
"module",
"=",
"sys",
".",
"modules",
".",
"get",
"(",
"package_path",
".",
"replace",
"(",
"os",
".",
"sep",
",",
"'.'",
")",
")",
"return",
"importlib",
".",
"reload",
"(",
"module",
")",
"if",
"module",
"is",
"not",
"None",
"else",
"None",
"def",
"reload_library",
"(",
"directory",
":",
"str",
")",
"->",
"list",
":",
"if",
"not",
"add_library_path",
"(",
"directory",
")",
":",
"# If the library wasn't added because it doesn't exist, remove it",
"# in case the directory has recently been deleted and then return",
"# an empty result",
"remove_library_path",
"(",
"directory",
")",
"return",
"[",
"]",
"glob_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"'**'",
",",
"'*.py'",
")",
"return",
"[",
"reload_module",
"(",
"path",
",",
"directory",
")",
"for",
"path",
"in",
"glob",
".",
"glob",
"(",
"glob_path",
",",
"recursive",
"=",
"True",
")",
"]",
"return",
"[",
"reloaded_module",
"for",
"directory",
"in",
"directories",
"for",
"reloaded_module",
"in",
"reload_library",
"(",
"directory",
")",
"if",
"reload_module",
"is",
"not",
"None",
"]"
] | Reload the libraries stored in the project's local and shared library
directories | [
"Reload",
"the",
"libraries",
"stored",
"in",
"the",
"project",
"s",
"local",
"and",
"shared",
"library",
"directories"
] | python | train |
sdispater/orator | orator/orm/mixins/soft_deletes.py | https://github.com/sdispater/orator/blob/bd90bf198ee897751848f9a92e49d18e60a74136/orator/orm/mixins/soft_deletes.py#L92-L106 | def only_trashed(cls):
"""
Get a new query builder that only includes soft deletes
:type cls: orator.orm.model.Model
:rtype: orator.orm.builder.Builder
"""
instance = cls()
column = instance.get_qualified_deleted_at_column()
return instance.new_query_without_scope(SoftDeletingScope()).where_not_null(
column
) | [
"def",
"only_trashed",
"(",
"cls",
")",
":",
"instance",
"=",
"cls",
"(",
")",
"column",
"=",
"instance",
".",
"get_qualified_deleted_at_column",
"(",
")",
"return",
"instance",
".",
"new_query_without_scope",
"(",
"SoftDeletingScope",
"(",
")",
")",
".",
"where_not_null",
"(",
"column",
")"
] | Get a new query builder that only includes soft deletes
:type cls: orator.orm.model.Model
:rtype: orator.orm.builder.Builder | [
"Get",
"a",
"new",
"query",
"builder",
"that",
"only",
"includes",
"soft",
"deletes"
] | python | train |
DarkEnergySurvey/ugali | ugali/scratch/simulation/simulate_population.py | https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/scratch/simulation/simulate_population.py#L84-L99 | def meanFracdet(map_fracdet, lon_population, lat_population, radius_population):
"""
Compute the mean fracdet within circular aperture (radius specified in decimal degrees)
lon, lat, and radius are taken to be arrays of the same length
"""
nside_fracdet = healpy.npix2nside(len(map_fracdet))
map_fracdet_zero = np.where(map_fracdet >= 0., map_fracdet, 0.)
fracdet_population = np.empty(len(lon_population))
for ii in range(0, len(lon_population)):
fracdet_population[ii] = np.mean(map_fracdet_zero[ugali.utils.healpix.ang2disc(nside_fracdet,
lon_population[ii],
lat_population[ii],
radius_population if np.isscalar(radius_population) else radius_population[ii],
inclusive=True)])
return fracdet_population | [
"def",
"meanFracdet",
"(",
"map_fracdet",
",",
"lon_population",
",",
"lat_population",
",",
"radius_population",
")",
":",
"nside_fracdet",
"=",
"healpy",
".",
"npix2nside",
"(",
"len",
"(",
"map_fracdet",
")",
")",
"map_fracdet_zero",
"=",
"np",
".",
"where",
"(",
"map_fracdet",
">=",
"0.",
",",
"map_fracdet",
",",
"0.",
")",
"fracdet_population",
"=",
"np",
".",
"empty",
"(",
"len",
"(",
"lon_population",
")",
")",
"for",
"ii",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"lon_population",
")",
")",
":",
"fracdet_population",
"[",
"ii",
"]",
"=",
"np",
".",
"mean",
"(",
"map_fracdet_zero",
"[",
"ugali",
".",
"utils",
".",
"healpix",
".",
"ang2disc",
"(",
"nside_fracdet",
",",
"lon_population",
"[",
"ii",
"]",
",",
"lat_population",
"[",
"ii",
"]",
",",
"radius_population",
"if",
"np",
".",
"isscalar",
"(",
"radius_population",
")",
"else",
"radius_population",
"[",
"ii",
"]",
",",
"inclusive",
"=",
"True",
")",
"]",
")",
"return",
"fracdet_population"
] | Compute the mean fracdet within circular aperture (radius specified in decimal degrees)
lon, lat, and radius are taken to be arrays of the same length | [
"Compute",
"the",
"mean",
"fracdet",
"within",
"circular",
"aperture",
"(",
"radius",
"specified",
"in",
"decimal",
"degrees",
")"
] | python | train |
keenlabs/KeenClient-Python | keen/Padding.py | https://github.com/keenlabs/KeenClient-Python/blob/266387c3376d1e000d117e17c45045ae3439d43f/keen/Padding.py#L194-L205 | def removeSpacePadding(str, blocksize=AES_blocksize):
'Remove padding with spaces'
pad_len = 0
for char in str[::-1]: # str[::-1] reverses string
if char == ' ':
pad_len += 1
else:
break
str = str[:-pad_len]
return str | [
"def",
"removeSpacePadding",
"(",
"str",
",",
"blocksize",
"=",
"AES_blocksize",
")",
":",
"pad_len",
"=",
"0",
"for",
"char",
"in",
"str",
"[",
":",
":",
"-",
"1",
"]",
":",
"# str[::-1] reverses string",
"if",
"char",
"==",
"' '",
":",
"pad_len",
"+=",
"1",
"else",
":",
"break",
"str",
"=",
"str",
"[",
":",
"-",
"pad_len",
"]",
"return",
"str"
] | Remove padding with spaces | [
"Remove",
"padding",
"with",
"spaces"
] | python | train |
AndrewAnnex/SpiceyPy | spiceypy/spiceypy.py | https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L15019-L15040 | def vrotv(v, axis, theta):
"""
Rotate a vector about a specified axis vector by a
specified angle and return the rotated vector.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/vrotv_c.html
:param v: Vector to be rotated.
:type v: 3-Element Array of floats
:param axis: Axis of the rotation.
:type axis: 3-Element Array of floats
:param theta: Angle of rotation (radians).
:type theta: float
:return: Result of rotating v about axis by theta
:rtype: 3-Element Array of floats
"""
v = stypes.toDoubleVector(v)
axis = stypes.toDoubleVector(axis)
theta = ctypes.c_double(theta)
r = stypes.emptyDoubleVector(3)
libspice.vrotv_c(v, axis, theta, r)
return stypes.cVectorToPython(r) | [
"def",
"vrotv",
"(",
"v",
",",
"axis",
",",
"theta",
")",
":",
"v",
"=",
"stypes",
".",
"toDoubleVector",
"(",
"v",
")",
"axis",
"=",
"stypes",
".",
"toDoubleVector",
"(",
"axis",
")",
"theta",
"=",
"ctypes",
".",
"c_double",
"(",
"theta",
")",
"r",
"=",
"stypes",
".",
"emptyDoubleVector",
"(",
"3",
")",
"libspice",
".",
"vrotv_c",
"(",
"v",
",",
"axis",
",",
"theta",
",",
"r",
")",
"return",
"stypes",
".",
"cVectorToPython",
"(",
"r",
")"
] | Rotate a vector about a specified axis vector by a
specified angle and return the rotated vector.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/vrotv_c.html
:param v: Vector to be rotated.
:type v: 3-Element Array of floats
:param axis: Axis of the rotation.
:type axis: 3-Element Array of floats
:param theta: Angle of rotation (radians).
:type theta: float
:return: Result of rotating v about axis by theta
:rtype: 3-Element Array of floats | [
"Rotate",
"a",
"vector",
"about",
"a",
"specified",
"axis",
"vector",
"by",
"a",
"specified",
"angle",
"and",
"return",
"the",
"rotated",
"vector",
"."
] | python | train |
ThreatConnect-Inc/tcex | tcex/tcex_bin_run.py | https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_bin_run.py#L1591-L1605 | def stage_tc_indicator_entity(self, indicator_data):
"""Convert JSON data to TCEntity.
Args:
indicator_data (str): [description]
Returns:
[type]: [description]
"""
path = '@.{value: summary, '
path += 'type: type, '
path += 'ownerName: ownerName, '
path += 'confidence: confidence || `0`, '
path += 'rating: rating || `0`}'
return self.path_data(indicator_data, path) | [
"def",
"stage_tc_indicator_entity",
"(",
"self",
",",
"indicator_data",
")",
":",
"path",
"=",
"'@.{value: summary, '",
"path",
"+=",
"'type: type, '",
"path",
"+=",
"'ownerName: ownerName, '",
"path",
"+=",
"'confidence: confidence || `0`, '",
"path",
"+=",
"'rating: rating || `0`}'",
"return",
"self",
".",
"path_data",
"(",
"indicator_data",
",",
"path",
")"
] | Convert JSON data to TCEntity.
Args:
indicator_data (str): [description]
Returns:
[type]: [description] | [
"Convert",
"JSON",
"data",
"to",
"TCEntity",
"."
] | python | train |
exosite-labs/pyonep | pyonep/portals/__init__.py | https://github.com/exosite-labs/pyonep/blob/d27b621b00688a542e0adcc01f3e3354c05238a1/pyonep/portals/__init__.py#L501-L504 | def get_user_permission_from_email(self, email):
""" Returns a user's permissions object when given the user email."""
_id = self.get_user_id_from_email(email)
return self.get_user_permission(_id) | [
"def",
"get_user_permission_from_email",
"(",
"self",
",",
"email",
")",
":",
"_id",
"=",
"self",
".",
"get_user_id_from_email",
"(",
"email",
")",
"return",
"self",
".",
"get_user_permission",
"(",
"_id",
")"
] | Returns a user's permissions object when given the user email. | [
"Returns",
"a",
"user",
"s",
"permissions",
"object",
"when",
"given",
"the",
"user",
"email",
"."
] | python | train |
hydraplatform/hydra-base | hydra_base/util/dataset_util.py | https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/util/dataset_util.py#L214-L228 | def validate_ENUM(in_value, restriction):
"""
Test to ensure that the given value is contained in the provided list.
the value parameter must be either a single value or a 1-dimensional list.
All the values in this list must satisfy the ENUM
"""
value = _get_val(in_value)
if type(value) is list:
for subval in value:
if type(subval) is tuple:
subval = subval[1]
validate_ENUM(subval, restriction)
else:
if value not in restriction:
raise ValidationError("ENUM : %s"%(restriction)) | [
"def",
"validate_ENUM",
"(",
"in_value",
",",
"restriction",
")",
":",
"value",
"=",
"_get_val",
"(",
"in_value",
")",
"if",
"type",
"(",
"value",
")",
"is",
"list",
":",
"for",
"subval",
"in",
"value",
":",
"if",
"type",
"(",
"subval",
")",
"is",
"tuple",
":",
"subval",
"=",
"subval",
"[",
"1",
"]",
"validate_ENUM",
"(",
"subval",
",",
"restriction",
")",
"else",
":",
"if",
"value",
"not",
"in",
"restriction",
":",
"raise",
"ValidationError",
"(",
"\"ENUM : %s\"",
"%",
"(",
"restriction",
")",
")"
] | Test to ensure that the given value is contained in the provided list.
the value parameter must be either a single value or a 1-dimensional list.
All the values in this list must satisfy the ENUM | [
"Test",
"to",
"ensure",
"that",
"the",
"given",
"value",
"is",
"contained",
"in",
"the",
"provided",
"list",
".",
"the",
"value",
"parameter",
"must",
"be",
"either",
"a",
"single",
"value",
"or",
"a",
"1",
"-",
"dimensional",
"list",
".",
"All",
"the",
"values",
"in",
"this",
"list",
"must",
"satisfy",
"the",
"ENUM"
] | python | train |
mongodb/motor | motor/core.py | https://github.com/mongodb/motor/blob/6af22720723bde7c78eb8cb126962cfbfc034b2c/motor/core.py#L1161-L1170 | def close(self):
"""Explicitly kill this cursor on the server. Call like (in Tornado):
.. code-block:: python
yield cursor.close()
"""
if not self.closed:
self.closed = True
yield self._framework.yieldable(self._close()) | [
"def",
"close",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"closed",
":",
"self",
".",
"closed",
"=",
"True",
"yield",
"self",
".",
"_framework",
".",
"yieldable",
"(",
"self",
".",
"_close",
"(",
")",
")"
] | Explicitly kill this cursor on the server. Call like (in Tornado):
.. code-block:: python
yield cursor.close() | [
"Explicitly",
"kill",
"this",
"cursor",
"on",
"the",
"server",
".",
"Call",
"like",
"(",
"in",
"Tornado",
")",
":"
] | python | train |
CalebBell/thermo | thermo/thermal_conductivity.py | https://github.com/CalebBell/thermo/blob/3857ed023a3e64fd3039a32d53576c24990ef1c3/thermo/thermal_conductivity.py#L1244-L1272 | def load_all_methods(self):
r'''Method to initialize the object by precomputing any values which
may be used repeatedly and by retrieving mixture-specific variables.
All data are stored as attributes. This method also sets :obj:`Tmin`,
:obj:`Tmax`, and :obj:`all_methods` as a set of methods which should
work to calculate the property.
Called on initialization only. See the source code for the variables at
which the coefficients are stored. The coefficients can safely be
altered once the class is initialized. This method can be called again
to reset the parameters.
'''
methods = [DIPPR_9H, SIMPLE]
if len(self.CASs) == 2:
methods.append(FILIPPOV)
if '7732-18-5' in self.CASs and len(self.CASs)>1:
wCASs = [i for i in self.CASs if i != '7732-18-5']
if all([i in Magomedovk_thermal_cond.index for i in wCASs]):
methods.append(MAGOMEDOV)
self.wCASs = wCASs
self.index_w = self.CASs.index('7732-18-5')
self.all_methods = set(methods)
Tmins = [i.Tmin for i in self.ThermalConductivityLiquids if i.Tmin]
Tmaxs = [i.Tmax for i in self.ThermalConductivityLiquids if i.Tmax]
if Tmins:
self.Tmin = max(Tmins)
if Tmaxs:
self.Tmax = max(Tmaxs) | [
"def",
"load_all_methods",
"(",
"self",
")",
":",
"methods",
"=",
"[",
"DIPPR_9H",
",",
"SIMPLE",
"]",
"if",
"len",
"(",
"self",
".",
"CASs",
")",
"==",
"2",
":",
"methods",
".",
"append",
"(",
"FILIPPOV",
")",
"if",
"'7732-18-5'",
"in",
"self",
".",
"CASs",
"and",
"len",
"(",
"self",
".",
"CASs",
")",
">",
"1",
":",
"wCASs",
"=",
"[",
"i",
"for",
"i",
"in",
"self",
".",
"CASs",
"if",
"i",
"!=",
"'7732-18-5'",
"]",
"if",
"all",
"(",
"[",
"i",
"in",
"Magomedovk_thermal_cond",
".",
"index",
"for",
"i",
"in",
"wCASs",
"]",
")",
":",
"methods",
".",
"append",
"(",
"MAGOMEDOV",
")",
"self",
".",
"wCASs",
"=",
"wCASs",
"self",
".",
"index_w",
"=",
"self",
".",
"CASs",
".",
"index",
"(",
"'7732-18-5'",
")",
"self",
".",
"all_methods",
"=",
"set",
"(",
"methods",
")",
"Tmins",
"=",
"[",
"i",
".",
"Tmin",
"for",
"i",
"in",
"self",
".",
"ThermalConductivityLiquids",
"if",
"i",
".",
"Tmin",
"]",
"Tmaxs",
"=",
"[",
"i",
".",
"Tmax",
"for",
"i",
"in",
"self",
".",
"ThermalConductivityLiquids",
"if",
"i",
".",
"Tmax",
"]",
"if",
"Tmins",
":",
"self",
".",
"Tmin",
"=",
"max",
"(",
"Tmins",
")",
"if",
"Tmaxs",
":",
"self",
".",
"Tmax",
"=",
"max",
"(",
"Tmaxs",
")"
] | r'''Method to initialize the object by precomputing any values which
may be used repeatedly and by retrieving mixture-specific variables.
All data are stored as attributes. This method also sets :obj:`Tmin`,
:obj:`Tmax`, and :obj:`all_methods` as a set of methods which should
work to calculate the property.
Called on initialization only. See the source code for the variables at
which the coefficients are stored. The coefficients can safely be
altered once the class is initialized. This method can be called again
to reset the parameters. | [
"r",
"Method",
"to",
"initialize",
"the",
"object",
"by",
"precomputing",
"any",
"values",
"which",
"may",
"be",
"used",
"repeatedly",
"and",
"by",
"retrieving",
"mixture",
"-",
"specific",
"variables",
".",
"All",
"data",
"are",
"stored",
"as",
"attributes",
".",
"This",
"method",
"also",
"sets",
":",
"obj",
":",
"Tmin",
":",
"obj",
":",
"Tmax",
"and",
":",
"obj",
":",
"all_methods",
"as",
"a",
"set",
"of",
"methods",
"which",
"should",
"work",
"to",
"calculate",
"the",
"property",
"."
] | python | valid |
shoebot/shoebot | lib/web/BeautifulSoup.py | https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/lib/web/BeautifulSoup.py#L711-L719 | def decompose(self):
"""Recursively destroys the contents of this tree."""
contents = [i for i in self.contents]
for i in contents:
if isinstance(i, Tag):
i.decompose()
else:
i.extract()
self.extract() | [
"def",
"decompose",
"(",
"self",
")",
":",
"contents",
"=",
"[",
"i",
"for",
"i",
"in",
"self",
".",
"contents",
"]",
"for",
"i",
"in",
"contents",
":",
"if",
"isinstance",
"(",
"i",
",",
"Tag",
")",
":",
"i",
".",
"decompose",
"(",
")",
"else",
":",
"i",
".",
"extract",
"(",
")",
"self",
".",
"extract",
"(",
")"
] | Recursively destroys the contents of this tree. | [
"Recursively",
"destroys",
"the",
"contents",
"of",
"this",
"tree",
"."
] | python | valid |
tumblr/pytumblr | pytumblr/__init__.py | https://github.com/tumblr/pytumblr/blob/4a5cd7c4b8ae78d12811d9fd52620afa1692a415/pytumblr/__init__.py#L96-L111 | def tagged(self, tag, **kwargs):
"""
Gets a list of posts tagged with the given tag
:param tag: a string, the tag you want to look for
:param before: a unix timestamp, the timestamp you want to start at
to look at posts.
:param limit: the number of results you want
:param filter: the post format that you want returned: html, text, raw
client.tagged("gif", limit=10)
:returns: a dict created from the JSON response
"""
kwargs.update({'tag': tag})
return self.send_api_request("get", '/v2/tagged', kwargs, ['before', 'limit', 'filter', 'tag', 'api_key'], True) | [
"def",
"tagged",
"(",
"self",
",",
"tag",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
".",
"update",
"(",
"{",
"'tag'",
":",
"tag",
"}",
")",
"return",
"self",
".",
"send_api_request",
"(",
"\"get\"",
",",
"'/v2/tagged'",
",",
"kwargs",
",",
"[",
"'before'",
",",
"'limit'",
",",
"'filter'",
",",
"'tag'",
",",
"'api_key'",
"]",
",",
"True",
")"
] | Gets a list of posts tagged with the given tag
:param tag: a string, the tag you want to look for
:param before: a unix timestamp, the timestamp you want to start at
to look at posts.
:param limit: the number of results you want
:param filter: the post format that you want returned: html, text, raw
client.tagged("gif", limit=10)
:returns: a dict created from the JSON response | [
"Gets",
"a",
"list",
"of",
"posts",
"tagged",
"with",
"the",
"given",
"tag"
] | python | train |
tanghaibao/goatools | goatools/godag_plot.py | https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/godag_plot.py#L26-L37 | def plot_results(fout_png, goea_results, *args, **kws):
"""Given a list of GOEA results, plot result GOs up to top."""
if "{NS}" not in fout_png:
plt_goea_results(fout_png, goea_results, *args, **kws)
else:
# Plot separately by NS: BP, MF, CC
ns2goea_results = cx.defaultdict(list)
for rec in goea_results:
ns2goea_results[rec.NS].append(rec)
for ns_name, ns_res in ns2goea_results.items():
png = fout_png.format(NS=ns_name)
plt_goea_results(png, ns_res, *args, **kws) | [
"def",
"plot_results",
"(",
"fout_png",
",",
"goea_results",
",",
"*",
"args",
",",
"*",
"*",
"kws",
")",
":",
"if",
"\"{NS}\"",
"not",
"in",
"fout_png",
":",
"plt_goea_results",
"(",
"fout_png",
",",
"goea_results",
",",
"*",
"args",
",",
"*",
"*",
"kws",
")",
"else",
":",
"# Plot separately by NS: BP, MF, CC",
"ns2goea_results",
"=",
"cx",
".",
"defaultdict",
"(",
"list",
")",
"for",
"rec",
"in",
"goea_results",
":",
"ns2goea_results",
"[",
"rec",
".",
"NS",
"]",
".",
"append",
"(",
"rec",
")",
"for",
"ns_name",
",",
"ns_res",
"in",
"ns2goea_results",
".",
"items",
"(",
")",
":",
"png",
"=",
"fout_png",
".",
"format",
"(",
"NS",
"=",
"ns_name",
")",
"plt_goea_results",
"(",
"png",
",",
"ns_res",
",",
"*",
"args",
",",
"*",
"*",
"kws",
")"
] | Given a list of GOEA results, plot result GOs up to top. | [
"Given",
"a",
"list",
"of",
"GOEA",
"results",
"plot",
"result",
"GOs",
"up",
"to",
"top",
"."
] | python | train |
nathan-hoad/outbox | outbox.py | https://github.com/nathan-hoad/outbox/blob/afd28cd14023fdbcd40ad8925ea09c2a9b4d98cb/outbox.py#L219-L235 | def add_attachment(message, attachment, rfc2231=True):
'''Attach an attachment to a message as a side effect.
Arguments:
message: MIMEMultipart instance.
attachment: Attachment instance.
'''
data = attachment.read()
part = MIMEBase('application', 'octet-stream')
part.set_payload(data)
encoders.encode_base64(part)
filename = attachment.name if rfc2231 else Header(attachment.name).encode()
part.add_header('Content-Disposition', 'attachment',
filename=filename)
message.attach(part) | [
"def",
"add_attachment",
"(",
"message",
",",
"attachment",
",",
"rfc2231",
"=",
"True",
")",
":",
"data",
"=",
"attachment",
".",
"read",
"(",
")",
"part",
"=",
"MIMEBase",
"(",
"'application'",
",",
"'octet-stream'",
")",
"part",
".",
"set_payload",
"(",
"data",
")",
"encoders",
".",
"encode_base64",
"(",
"part",
")",
"filename",
"=",
"attachment",
".",
"name",
"if",
"rfc2231",
"else",
"Header",
"(",
"attachment",
".",
"name",
")",
".",
"encode",
"(",
")",
"part",
".",
"add_header",
"(",
"'Content-Disposition'",
",",
"'attachment'",
",",
"filename",
"=",
"filename",
")",
"message",
".",
"attach",
"(",
"part",
")"
] | Attach an attachment to a message as a side effect.
Arguments:
message: MIMEMultipart instance.
attachment: Attachment instance. | [
"Attach",
"an",
"attachment",
"to",
"a",
"message",
"as",
"a",
"side",
"effect",
"."
] | python | train |
trevisanj/a99 | a99/textinterface.py | https://github.com/trevisanj/a99/blob/193e6e3c9b3e4f4a0ba7eb3eece846fe7045c539/a99/textinterface.py#L377-L395 | def markdown_table(data, headers):
"""
Creates MarkDown table. Returns list of strings
Arguments:
data -- [(cell00, cell01, ...), (cell10, cell11, ...), ...]
headers -- sequence of strings: (header0, header1, ...)
"""
maxx = [max([len(x) for x in column]) for column in zip(*data)]
maxx = [max(ll) for ll in zip(maxx, [len(x) for x in headers])]
mask = " | ".join(["%-{0:d}s".format(n) for n in maxx])
ret = [mask % headers]
ret.append(" | ".join(["-"*n for n in maxx]))
for line in data:
ret.append(mask % line)
return ret | [
"def",
"markdown_table",
"(",
"data",
",",
"headers",
")",
":",
"maxx",
"=",
"[",
"max",
"(",
"[",
"len",
"(",
"x",
")",
"for",
"x",
"in",
"column",
"]",
")",
"for",
"column",
"in",
"zip",
"(",
"*",
"data",
")",
"]",
"maxx",
"=",
"[",
"max",
"(",
"ll",
")",
"for",
"ll",
"in",
"zip",
"(",
"maxx",
",",
"[",
"len",
"(",
"x",
")",
"for",
"x",
"in",
"headers",
"]",
")",
"]",
"mask",
"=",
"\" | \"",
".",
"join",
"(",
"[",
"\"%-{0:d}s\"",
".",
"format",
"(",
"n",
")",
"for",
"n",
"in",
"maxx",
"]",
")",
"ret",
"=",
"[",
"mask",
"%",
"headers",
"]",
"ret",
".",
"append",
"(",
"\" | \"",
".",
"join",
"(",
"[",
"\"-\"",
"*",
"n",
"for",
"n",
"in",
"maxx",
"]",
")",
")",
"for",
"line",
"in",
"data",
":",
"ret",
".",
"append",
"(",
"mask",
"%",
"line",
")",
"return",
"ret"
] | Creates MarkDown table. Returns list of strings
Arguments:
data -- [(cell00, cell01, ...), (cell10, cell11, ...), ...]
headers -- sequence of strings: (header0, header1, ...) | [
"Creates",
"MarkDown",
"table",
".",
"Returns",
"list",
"of",
"strings",
"Arguments",
":",
"data",
"--",
"[",
"(",
"cell00",
"cell01",
"...",
")",
"(",
"cell10",
"cell11",
"...",
")",
"...",
"]",
"headers",
"--",
"sequence",
"of",
"strings",
":",
"(",
"header0",
"header1",
"...",
")"
] | python | train |
jenisys/parse_type | parse_type/cardinality.py | https://github.com/jenisys/parse_type/blob/7cad3a67a5ca725cb786da31f656fd473084289f/parse_type/cardinality.py#L67-L78 | def compute_group_count(self, pattern):
"""Compute the number of regexp match groups when the pattern is provided
to the :func:`Cardinality.make_pattern()` method.
:param pattern: Item regexp pattern (as string).
:return: Number of regexp match groups in the cardinality pattern.
"""
group_count = self.group_count
pattern_repeated = 1
if self.is_many():
pattern_repeated = 2
return group_count + pattern_repeated * pattern_group_count(pattern) | [
"def",
"compute_group_count",
"(",
"self",
",",
"pattern",
")",
":",
"group_count",
"=",
"self",
".",
"group_count",
"pattern_repeated",
"=",
"1",
"if",
"self",
".",
"is_many",
"(",
")",
":",
"pattern_repeated",
"=",
"2",
"return",
"group_count",
"+",
"pattern_repeated",
"*",
"pattern_group_count",
"(",
"pattern",
")"
] | Compute the number of regexp match groups when the pattern is provided
to the :func:`Cardinality.make_pattern()` method.
:param pattern: Item regexp pattern (as string).
:return: Number of regexp match groups in the cardinality pattern. | [
"Compute",
"the",
"number",
"of",
"regexp",
"match",
"groups",
"when",
"the",
"pattern",
"is",
"provided",
"to",
"the",
":",
"func",
":",
"Cardinality",
".",
"make_pattern",
"()",
"method",
"."
] | python | train |
mitsei/dlkit | dlkit/handcar/learning/sessions.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/handcar/learning/sessions.py#L154-L172 | def get_objective(self, objective_id=None):
"""Gets the Objective specified by its Id.
In plenary mode, the exact Id is found or a NotFound results.
Otherwise, the returned Objective may have a different Id than
requested, such as the case where a duplicate Id was assigned to
an Objective and retained for compatibility.
arg: objectiveId (osid.id.Id): Id of the Objective
return: (osid.learning.Objective) - the objective
raise: NotFound - objectiveId not found
raise: NullArgument - objectiveId is null
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
compliance: mandatory - This method is must be implemented.
"""
if objective_id is None:
raise NullArgument()
url_path = construct_url('objectives', obj_id=objective_id)
return objects.Objective(self._get_request(url_path)) | [
"def",
"get_objective",
"(",
"self",
",",
"objective_id",
"=",
"None",
")",
":",
"if",
"objective_id",
"is",
"None",
":",
"raise",
"NullArgument",
"(",
")",
"url_path",
"=",
"construct_url",
"(",
"'objectives'",
",",
"obj_id",
"=",
"objective_id",
")",
"return",
"objects",
".",
"Objective",
"(",
"self",
".",
"_get_request",
"(",
"url_path",
")",
")"
] | Gets the Objective specified by its Id.
In plenary mode, the exact Id is found or a NotFound results.
Otherwise, the returned Objective may have a different Id than
requested, such as the case where a duplicate Id was assigned to
an Objective and retained for compatibility.
arg: objectiveId (osid.id.Id): Id of the Objective
return: (osid.learning.Objective) - the objective
raise: NotFound - objectiveId not found
raise: NullArgument - objectiveId is null
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
compliance: mandatory - This method is must be implemented. | [
"Gets",
"the",
"Objective",
"specified",
"by",
"its",
"Id",
".",
"In",
"plenary",
"mode",
"the",
"exact",
"Id",
"is",
"found",
"or",
"a",
"NotFound",
"results",
".",
"Otherwise",
"the",
"returned",
"Objective",
"may",
"have",
"a",
"different",
"Id",
"than",
"requested",
"such",
"as",
"the",
"case",
"where",
"a",
"duplicate",
"Id",
"was",
"assigned",
"to",
"an",
"Objective",
"and",
"retained",
"for",
"compatibility",
".",
"arg",
":",
"objectiveId",
"(",
"osid",
".",
"id",
".",
"Id",
")",
":",
"Id",
"of",
"the",
"Objective",
"return",
":",
"(",
"osid",
".",
"learning",
".",
"Objective",
")",
"-",
"the",
"objective",
"raise",
":",
"NotFound",
"-",
"objectiveId",
"not",
"found",
"raise",
":",
"NullArgument",
"-",
"objectiveId",
"is",
"null",
"raise",
":",
"OperationFailed",
"-",
"unable",
"to",
"complete",
"request",
"raise",
":",
"PermissionDenied",
"-",
"authorization",
"failure",
"compliance",
":",
"mandatory",
"-",
"This",
"method",
"is",
"must",
"be",
"implemented",
"."
] | python | train |
gwastro/pycbc | pycbc/tmpltbank/coord_utils.py | https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/tmpltbank/coord_utils.py#L26-L73 | def estimate_mass_range(numPoints, massRangeParams, metricParams, fUpper,\
covary=True):
"""
This function will generate a large set of points with random masses and
spins (using pycbc.tmpltbank.get_random_mass) and translate these points
into the xi_i coordinate system for the given upper frequency cutoff.
Parameters
----------
numPoints : int
Number of systems to simulate
massRangeParams : massRangeParameters instance
Instance holding all the details of mass ranges and spin ranges.
metricParams : metricParameters instance
Structure holding all the options for construction of the metric
and the eigenvalues, eigenvectors and covariance matrix
needed to manipulate the space.
fUpper : float
The value of fUpper to use when getting the mu coordinates from the
lambda coordinates. This must be a key in metricParams.evals and
metricParams.evecs (ie. we must know how to do the transformation for
the given value of fUpper). It also must be a key in
metricParams.evecsCV if covary=True.
covary : boolean, optional (default = True)
If this is given then evecsCV will be used to rotate from the Cartesian
coordinate system into the principal coordinate direction (xi_i). If
not given then points in the original Cartesian coordinates are
returned.
Returns
-------
xis : numpy.array
A list of the positions of each point in the xi_i coordinate system.
"""
vals_set = get_random_mass(numPoints, massRangeParams)
mass1 = vals_set[0]
mass2 = vals_set[1]
spin1z = vals_set[2]
spin2z = vals_set[3]
if covary:
lambdas = get_cov_params(mass1, mass2, spin1z, spin2z, metricParams,
fUpper)
else:
lambdas = get_conv_params(mass1, mass2, spin1z, spin2z, metricParams,
fUpper)
return numpy.array(lambdas) | [
"def",
"estimate_mass_range",
"(",
"numPoints",
",",
"massRangeParams",
",",
"metricParams",
",",
"fUpper",
",",
"covary",
"=",
"True",
")",
":",
"vals_set",
"=",
"get_random_mass",
"(",
"numPoints",
",",
"massRangeParams",
")",
"mass1",
"=",
"vals_set",
"[",
"0",
"]",
"mass2",
"=",
"vals_set",
"[",
"1",
"]",
"spin1z",
"=",
"vals_set",
"[",
"2",
"]",
"spin2z",
"=",
"vals_set",
"[",
"3",
"]",
"if",
"covary",
":",
"lambdas",
"=",
"get_cov_params",
"(",
"mass1",
",",
"mass2",
",",
"spin1z",
",",
"spin2z",
",",
"metricParams",
",",
"fUpper",
")",
"else",
":",
"lambdas",
"=",
"get_conv_params",
"(",
"mass1",
",",
"mass2",
",",
"spin1z",
",",
"spin2z",
",",
"metricParams",
",",
"fUpper",
")",
"return",
"numpy",
".",
"array",
"(",
"lambdas",
")"
] | This function will generate a large set of points with random masses and
spins (using pycbc.tmpltbank.get_random_mass) and translate these points
into the xi_i coordinate system for the given upper frequency cutoff.
Parameters
----------
numPoints : int
Number of systems to simulate
massRangeParams : massRangeParameters instance
Instance holding all the details of mass ranges and spin ranges.
metricParams : metricParameters instance
Structure holding all the options for construction of the metric
and the eigenvalues, eigenvectors and covariance matrix
needed to manipulate the space.
fUpper : float
The value of fUpper to use when getting the mu coordinates from the
lambda coordinates. This must be a key in metricParams.evals and
metricParams.evecs (ie. we must know how to do the transformation for
the given value of fUpper). It also must be a key in
metricParams.evecsCV if covary=True.
covary : boolean, optional (default = True)
If this is given then evecsCV will be used to rotate from the Cartesian
coordinate system into the principal coordinate direction (xi_i). If
not given then points in the original Cartesian coordinates are
returned.
Returns
-------
xis : numpy.array
A list of the positions of each point in the xi_i coordinate system. | [
"This",
"function",
"will",
"generate",
"a",
"large",
"set",
"of",
"points",
"with",
"random",
"masses",
"and",
"spins",
"(",
"using",
"pycbc",
".",
"tmpltbank",
".",
"get_random_mass",
")",
"and",
"translate",
"these",
"points",
"into",
"the",
"xi_i",
"coordinate",
"system",
"for",
"the",
"given",
"upper",
"frequency",
"cutoff",
"."
] | python | train |
MLAB-project/pymlab | examples/I2CSPI_HBSTEP_CAMPAP.py | https://github.com/MLAB-project/pymlab/blob/d18d858ae83b203defcf2aead0dbd11b3c444658/examples/I2CSPI_HBSTEP_CAMPAP.py#L88-L98 | def Move(self, units):
' Move some distance units from current position '
steps = units * self.SPU # translate units to steps
if steps > 0: # look for direction
spi.SPI_write_byte(self.CS, 0x40 | (~self.Dir & 1))
else:
spi.SPI_write_byte(self.CS, 0x40 | (self.Dir & 1))
steps = int(abs(steps))
spi.SPI_write_byte(self.CS, (steps >> 16) & 0xFF)
spi.SPI_write_byte(self.CS, (steps >> 8) & 0xFF)
spi.SPI_write_byte(self.CS, steps & 0xFF) | [
"def",
"Move",
"(",
"self",
",",
"units",
")",
":",
"steps",
"=",
"units",
"*",
"self",
".",
"SPU",
"# translate units to steps ",
"if",
"steps",
">",
"0",
":",
"# look for direction",
"spi",
".",
"SPI_write_byte",
"(",
"self",
".",
"CS",
",",
"0x40",
"|",
"(",
"~",
"self",
".",
"Dir",
"&",
"1",
")",
")",
"else",
":",
"spi",
".",
"SPI_write_byte",
"(",
"self",
".",
"CS",
",",
"0x40",
"|",
"(",
"self",
".",
"Dir",
"&",
"1",
")",
")",
"steps",
"=",
"int",
"(",
"abs",
"(",
"steps",
")",
")",
"spi",
".",
"SPI_write_byte",
"(",
"self",
".",
"CS",
",",
"(",
"steps",
">>",
"16",
")",
"&",
"0xFF",
")",
"spi",
".",
"SPI_write_byte",
"(",
"self",
".",
"CS",
",",
"(",
"steps",
">>",
"8",
")",
"&",
"0xFF",
")",
"spi",
".",
"SPI_write_byte",
"(",
"self",
".",
"CS",
",",
"steps",
"&",
"0xFF",
")"
] | Move some distance units from current position | [
"Move",
"some",
"distance",
"units",
"from",
"current",
"position"
] | python | train |
pandas-dev/pandas | pandas/io/parsers.py | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/parsers.py#L3535-L3571 | def get_rows(self, infer_nrows, skiprows=None):
"""
Read rows from self.f, skipping as specified.
We distinguish buffer_rows (the first <= infer_nrows
lines) from the rows returned to detect_colspecs
because it's simpler to leave the other locations
with skiprows logic alone than to modify them to
deal with the fact we skipped some rows here as
well.
Parameters
----------
infer_nrows : int
Number of rows to read from self.f, not counting
rows that are skipped.
skiprows: set, optional
Indices of rows to skip.
Returns
-------
detect_rows : list of str
A list containing the rows to read.
"""
if skiprows is None:
skiprows = set()
buffer_rows = []
detect_rows = []
for i, row in enumerate(self.f):
if i not in skiprows:
detect_rows.append(row)
buffer_rows.append(row)
if len(detect_rows) >= infer_nrows:
break
self.buffer = iter(buffer_rows)
return detect_rows | [
"def",
"get_rows",
"(",
"self",
",",
"infer_nrows",
",",
"skiprows",
"=",
"None",
")",
":",
"if",
"skiprows",
"is",
"None",
":",
"skiprows",
"=",
"set",
"(",
")",
"buffer_rows",
"=",
"[",
"]",
"detect_rows",
"=",
"[",
"]",
"for",
"i",
",",
"row",
"in",
"enumerate",
"(",
"self",
".",
"f",
")",
":",
"if",
"i",
"not",
"in",
"skiprows",
":",
"detect_rows",
".",
"append",
"(",
"row",
")",
"buffer_rows",
".",
"append",
"(",
"row",
")",
"if",
"len",
"(",
"detect_rows",
")",
">=",
"infer_nrows",
":",
"break",
"self",
".",
"buffer",
"=",
"iter",
"(",
"buffer_rows",
")",
"return",
"detect_rows"
] | Read rows from self.f, skipping as specified.
We distinguish buffer_rows (the first <= infer_nrows
lines) from the rows returned to detect_colspecs
because it's simpler to leave the other locations
with skiprows logic alone than to modify them to
deal with the fact we skipped some rows here as
well.
Parameters
----------
infer_nrows : int
Number of rows to read from self.f, not counting
rows that are skipped.
skiprows: set, optional
Indices of rows to skip.
Returns
-------
detect_rows : list of str
A list containing the rows to read. | [
"Read",
"rows",
"from",
"self",
".",
"f",
"skipping",
"as",
"specified",
"."
] | python | train |
bluedynamics/cone.ugm | src/cone/ugm/browser/remote.py | https://github.com/bluedynamics/cone.ugm/blob/3c197075f3f6e94781289311c5637bb9c8e5597c/src/cone/ugm/browser/remote.py#L12-L124 | def remote_add_user(model, request):
"""Add user via remote service.
Returns a JSON response containing success state and a message indicating
what happened::
{
success: true, // respective false
message: 'message'
}
Expected request parameters:
id
New user id.
password
User password to be set initially (optional).
roles
Comma seperated role names the user initially has.
groups
Comma seperated groups names the user should initially be member of.
attr.*
User attributes to be set. I.e. ``attr.mail`` would set the mail
attribute for newly created user. All request parameters prefixed with
``attr`` get checked against user attribute attrmap from settings.
Restrictions - All values, whether single or multi valued, are passed
as string or list of strings to the create function.
"""
params = request.params
uid = params.get('id')
if not uid:
return {
'success': False,
'message': u"No user ID given.",
}
users = model.backend
if uid in users:
return {
'success': False,
'message': u"User with given ID already exists.",
}
password = params.get('password')
add_roles = params.get('roles', '')
add_roles = [val.strip() for val in add_roles.split(',') if val]
add_groups = params.get('groups', '')
add_groups = [val.strip() for val in add_groups.split(',') if val]
attrs = dict()
for key, val in params.items():
if not key.startswith('attr.'):
continue
key = key[key.find('.') + 1:]
attrs[key] = val
settings = ugm_users(model)
attrmap = settings.attrs.users_form_attrmap
exposed = settings.attrs.users_exposed_attributes
if not exposed:
exposed = list()
valid_attrs = attrmap.keys() + exposed
checked_attrs = dict()
for key in valid_attrs:
val = attrs.get(key)
if not val:
continue
checked_attrs[key] = val
try:
user = users.create(uid, **checked_attrs)
message = u""
from cone.app.security import DEFAULT_ROLES
available_roles = [role[0] for role in DEFAULT_ROLES]
for role in add_roles:
if role not in available_roles:
message += u"Role '%s' given but inexistent. " % role
continue
user.add_role(role)
groups = users.parent.groups
for group in add_groups:
if group not in groups:
message += u"Group '%s' given but inexistent. " % group
continue
groups[group].add(uid)
users.parent()
if password is not None:
users.passwd(uid, None, password)
message += u"Created user with ID '%s'." % uid
return {
'success': True,
'message': message,
}
except Exception as e:
return {
'success': False,
'message': str(e),
}
finally:
model.invalidate() | [
"def",
"remote_add_user",
"(",
"model",
",",
"request",
")",
":",
"params",
"=",
"request",
".",
"params",
"uid",
"=",
"params",
".",
"get",
"(",
"'id'",
")",
"if",
"not",
"uid",
":",
"return",
"{",
"'success'",
":",
"False",
",",
"'message'",
":",
"u\"No user ID given.\"",
",",
"}",
"users",
"=",
"model",
".",
"backend",
"if",
"uid",
"in",
"users",
":",
"return",
"{",
"'success'",
":",
"False",
",",
"'message'",
":",
"u\"User with given ID already exists.\"",
",",
"}",
"password",
"=",
"params",
".",
"get",
"(",
"'password'",
")",
"add_roles",
"=",
"params",
".",
"get",
"(",
"'roles'",
",",
"''",
")",
"add_roles",
"=",
"[",
"val",
".",
"strip",
"(",
")",
"for",
"val",
"in",
"add_roles",
".",
"split",
"(",
"','",
")",
"if",
"val",
"]",
"add_groups",
"=",
"params",
".",
"get",
"(",
"'groups'",
",",
"''",
")",
"add_groups",
"=",
"[",
"val",
".",
"strip",
"(",
")",
"for",
"val",
"in",
"add_groups",
".",
"split",
"(",
"','",
")",
"if",
"val",
"]",
"attrs",
"=",
"dict",
"(",
")",
"for",
"key",
",",
"val",
"in",
"params",
".",
"items",
"(",
")",
":",
"if",
"not",
"key",
".",
"startswith",
"(",
"'attr.'",
")",
":",
"continue",
"key",
"=",
"key",
"[",
"key",
".",
"find",
"(",
"'.'",
")",
"+",
"1",
":",
"]",
"attrs",
"[",
"key",
"]",
"=",
"val",
"settings",
"=",
"ugm_users",
"(",
"model",
")",
"attrmap",
"=",
"settings",
".",
"attrs",
".",
"users_form_attrmap",
"exposed",
"=",
"settings",
".",
"attrs",
".",
"users_exposed_attributes",
"if",
"not",
"exposed",
":",
"exposed",
"=",
"list",
"(",
")",
"valid_attrs",
"=",
"attrmap",
".",
"keys",
"(",
")",
"+",
"exposed",
"checked_attrs",
"=",
"dict",
"(",
")",
"for",
"key",
"in",
"valid_attrs",
":",
"val",
"=",
"attrs",
".",
"get",
"(",
"key",
")",
"if",
"not",
"val",
":",
"continue",
"checked_attrs",
"[",
"key",
"]",
"=",
"val",
"try",
":",
"user",
"=",
"users",
".",
"create",
"(",
"uid",
",",
"*",
"*",
"checked_attrs",
")",
"message",
"=",
"u\"\"",
"from",
"cone",
".",
"app",
".",
"security",
"import",
"DEFAULT_ROLES",
"available_roles",
"=",
"[",
"role",
"[",
"0",
"]",
"for",
"role",
"in",
"DEFAULT_ROLES",
"]",
"for",
"role",
"in",
"add_roles",
":",
"if",
"role",
"not",
"in",
"available_roles",
":",
"message",
"+=",
"u\"Role '%s' given but inexistent. \"",
"%",
"role",
"continue",
"user",
".",
"add_role",
"(",
"role",
")",
"groups",
"=",
"users",
".",
"parent",
".",
"groups",
"for",
"group",
"in",
"add_groups",
":",
"if",
"group",
"not",
"in",
"groups",
":",
"message",
"+=",
"u\"Group '%s' given but inexistent. \"",
"%",
"group",
"continue",
"groups",
"[",
"group",
"]",
".",
"add",
"(",
"uid",
")",
"users",
".",
"parent",
"(",
")",
"if",
"password",
"is",
"not",
"None",
":",
"users",
".",
"passwd",
"(",
"uid",
",",
"None",
",",
"password",
")",
"message",
"+=",
"u\"Created user with ID '%s'.\"",
"%",
"uid",
"return",
"{",
"'success'",
":",
"True",
",",
"'message'",
":",
"message",
",",
"}",
"except",
"Exception",
"as",
"e",
":",
"return",
"{",
"'success'",
":",
"False",
",",
"'message'",
":",
"str",
"(",
"e",
")",
",",
"}",
"finally",
":",
"model",
".",
"invalidate",
"(",
")"
] | Add user via remote service.
Returns a JSON response containing success state and a message indicating
what happened::
{
success: true, // respective false
message: 'message'
}
Expected request parameters:
id
New user id.
password
User password to be set initially (optional).
roles
Comma seperated role names the user initially has.
groups
Comma seperated groups names the user should initially be member of.
attr.*
User attributes to be set. I.e. ``attr.mail`` would set the mail
attribute for newly created user. All request parameters prefixed with
``attr`` get checked against user attribute attrmap from settings.
Restrictions - All values, whether single or multi valued, are passed
as string or list of strings to the create function. | [
"Add",
"user",
"via",
"remote",
"service",
"."
] | python | train |
annoviko/pyclustering | pyclustering/cluster/ga.py | https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/ga.py#L675-L706 | def _calc_fitness_function(centres, data, chromosomes):
"""!
@brief Calculate fitness function values for chromosomes.
@param[in] centres (list): Cluster centers.
@param[in] data (list): Input data that is used for clustering process.
@param[in] chromosomes (list): Chromosomes whose fitness function's values are calculated.
@return (list) Fitness function value for each chromosome correspondingly.
"""
# Get count of chromosomes and clusters
count_chromosome = len(chromosomes)
# Initialize fitness function values
fitness_function = np.zeros(count_chromosome)
# Calc fitness function for each chromosome
for _idx_chromosome in range(count_chromosome):
# Get centers for a selected chromosome
centres_data = np.zeros(data.shape)
# Fill data centres
for _idx in range(len(data)):
centres_data[_idx] = centres[_idx_chromosome][chromosomes[_idx_chromosome][_idx]]
# Get City Block distance for a chromosome
fitness_function[_idx_chromosome] += np.sum(abs(data - centres_data))
return fitness_function | [
"def",
"_calc_fitness_function",
"(",
"centres",
",",
"data",
",",
"chromosomes",
")",
":",
"# Get count of chromosomes and clusters",
"count_chromosome",
"=",
"len",
"(",
"chromosomes",
")",
"# Initialize fitness function values",
"fitness_function",
"=",
"np",
".",
"zeros",
"(",
"count_chromosome",
")",
"# Calc fitness function for each chromosome",
"for",
"_idx_chromosome",
"in",
"range",
"(",
"count_chromosome",
")",
":",
"# Get centers for a selected chromosome",
"centres_data",
"=",
"np",
".",
"zeros",
"(",
"data",
".",
"shape",
")",
"# Fill data centres",
"for",
"_idx",
"in",
"range",
"(",
"len",
"(",
"data",
")",
")",
":",
"centres_data",
"[",
"_idx",
"]",
"=",
"centres",
"[",
"_idx_chromosome",
"]",
"[",
"chromosomes",
"[",
"_idx_chromosome",
"]",
"[",
"_idx",
"]",
"]",
"# Get City Block distance for a chromosome",
"fitness_function",
"[",
"_idx_chromosome",
"]",
"+=",
"np",
".",
"sum",
"(",
"abs",
"(",
"data",
"-",
"centres_data",
")",
")",
"return",
"fitness_function"
] | !
@brief Calculate fitness function values for chromosomes.
@param[in] centres (list): Cluster centers.
@param[in] data (list): Input data that is used for clustering process.
@param[in] chromosomes (list): Chromosomes whose fitness function's values are calculated.
@return (list) Fitness function value for each chromosome correspondingly. | [
"!"
] | python | valid |
NatLibFi/Skosify | skosify/skosify.py | https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/skosify.py#L546-L569 | def setup_top_concepts(rdf, mark_top_concepts):
"""Determine the top concepts of each concept scheme and mark them using
hasTopConcept/topConceptOf."""
for cs in sorted(rdf.subjects(RDF.type, SKOS.ConceptScheme)):
for conc in sorted(rdf.subjects(SKOS.inScheme, cs)):
if (conc, RDF.type, SKOS.Concept) not in rdf:
continue # not a Concept, so can't be a top concept
# check whether it's a top concept
broader = rdf.value(conc, SKOS.broader, None, any=True)
if broader is None: # yes it is a top concept!
if (cs, SKOS.hasTopConcept, conc) not in rdf and \
(conc, SKOS.topConceptOf, cs) not in rdf:
if mark_top_concepts:
logging.info(
"Marking loose concept %s "
"as top concept of scheme %s", conc, cs)
rdf.add((cs, SKOS.hasTopConcept, conc))
rdf.add((conc, SKOS.topConceptOf, cs))
else:
logging.debug(
"Not marking loose concept %s as top concept "
"of scheme %s, as mark_top_concepts is disabled",
conc, cs) | [
"def",
"setup_top_concepts",
"(",
"rdf",
",",
"mark_top_concepts",
")",
":",
"for",
"cs",
"in",
"sorted",
"(",
"rdf",
".",
"subjects",
"(",
"RDF",
".",
"type",
",",
"SKOS",
".",
"ConceptScheme",
")",
")",
":",
"for",
"conc",
"in",
"sorted",
"(",
"rdf",
".",
"subjects",
"(",
"SKOS",
".",
"inScheme",
",",
"cs",
")",
")",
":",
"if",
"(",
"conc",
",",
"RDF",
".",
"type",
",",
"SKOS",
".",
"Concept",
")",
"not",
"in",
"rdf",
":",
"continue",
"# not a Concept, so can't be a top concept",
"# check whether it's a top concept",
"broader",
"=",
"rdf",
".",
"value",
"(",
"conc",
",",
"SKOS",
".",
"broader",
",",
"None",
",",
"any",
"=",
"True",
")",
"if",
"broader",
"is",
"None",
":",
"# yes it is a top concept!",
"if",
"(",
"cs",
",",
"SKOS",
".",
"hasTopConcept",
",",
"conc",
")",
"not",
"in",
"rdf",
"and",
"(",
"conc",
",",
"SKOS",
".",
"topConceptOf",
",",
"cs",
")",
"not",
"in",
"rdf",
":",
"if",
"mark_top_concepts",
":",
"logging",
".",
"info",
"(",
"\"Marking loose concept %s \"",
"\"as top concept of scheme %s\"",
",",
"conc",
",",
"cs",
")",
"rdf",
".",
"add",
"(",
"(",
"cs",
",",
"SKOS",
".",
"hasTopConcept",
",",
"conc",
")",
")",
"rdf",
".",
"add",
"(",
"(",
"conc",
",",
"SKOS",
".",
"topConceptOf",
",",
"cs",
")",
")",
"else",
":",
"logging",
".",
"debug",
"(",
"\"Not marking loose concept %s as top concept \"",
"\"of scheme %s, as mark_top_concepts is disabled\"",
",",
"conc",
",",
"cs",
")"
] | Determine the top concepts of each concept scheme and mark them using
hasTopConcept/topConceptOf. | [
"Determine",
"the",
"top",
"concepts",
"of",
"each",
"concept",
"scheme",
"and",
"mark",
"them",
"using",
"hasTopConcept",
"/",
"topConceptOf",
"."
] | python | train |
tchellomello/python-arlo | pyarlo/camera.py | https://github.com/tchellomello/python-arlo/blob/db70aeb81705309c56ad32bbab1094f6cd146524/pyarlo/camera.py#L304-L317 | def motion_detection_sensitivity(self):
"""Sensitivity level of Camera motion detection."""
if not self.triggers:
return None
for trigger in self.triggers:
if trigger.get("type") != "pirMotionActive":
continue
sensitivity = trigger.get("sensitivity")
if sensitivity:
return sensitivity.get("default")
return None | [
"def",
"motion_detection_sensitivity",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"triggers",
":",
"return",
"None",
"for",
"trigger",
"in",
"self",
".",
"triggers",
":",
"if",
"trigger",
".",
"get",
"(",
"\"type\"",
")",
"!=",
"\"pirMotionActive\"",
":",
"continue",
"sensitivity",
"=",
"trigger",
".",
"get",
"(",
"\"sensitivity\"",
")",
"if",
"sensitivity",
":",
"return",
"sensitivity",
".",
"get",
"(",
"\"default\"",
")",
"return",
"None"
] | Sensitivity level of Camera motion detection. | [
"Sensitivity",
"level",
"of",
"Camera",
"motion",
"detection",
"."
] | python | train |
amzn/ion-python | amazon/ion/reader_text.py | https://github.com/amzn/ion-python/blob/0b21fa3ba7755f55f745e4aa970d86343b82449d/amazon/ion/reader_text.py#L474-L480 | def set_ion_type(self, ion_type):
"""Sets context to the given IonType."""
if ion_type is self.ion_type:
return self
self.ion_type = ion_type
self.line_comment = False
return self | [
"def",
"set_ion_type",
"(",
"self",
",",
"ion_type",
")",
":",
"if",
"ion_type",
"is",
"self",
".",
"ion_type",
":",
"return",
"self",
"self",
".",
"ion_type",
"=",
"ion_type",
"self",
".",
"line_comment",
"=",
"False",
"return",
"self"
] | Sets context to the given IonType. | [
"Sets",
"context",
"to",
"the",
"given",
"IonType",
"."
] | python | train |
Neurita/boyle | boyle/dicom/comparison.py | https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/dicom/comparison.py#L521-L540 | def merge_groups(self, indices):
"""Extend the lists within the DICOM groups dictionary.
The indices will indicate which list have to be extended by which
other list.
Parameters
----------
indices: list or tuple of 2 iterables of int, bot having the same len
The indices of the lists that have to be merged, both iterables
items will be read pair by pair, the first is the index to the
list that will be extended with the list of the second index.
The indices can be constructed with Numpy e.g.,
indices = np.where(square_matrix)
"""
try:
merged = merge_dict_of_lists(self.dicom_groups, indices,
pop_later=True, copy=True)
self.dicom_groups = merged
except IndexError:
raise IndexError('Index out of range to merge DICOM groups.') | [
"def",
"merge_groups",
"(",
"self",
",",
"indices",
")",
":",
"try",
":",
"merged",
"=",
"merge_dict_of_lists",
"(",
"self",
".",
"dicom_groups",
",",
"indices",
",",
"pop_later",
"=",
"True",
",",
"copy",
"=",
"True",
")",
"self",
".",
"dicom_groups",
"=",
"merged",
"except",
"IndexError",
":",
"raise",
"IndexError",
"(",
"'Index out of range to merge DICOM groups.'",
")"
] | Extend the lists within the DICOM groups dictionary.
The indices will indicate which list have to be extended by which
other list.
Parameters
----------
indices: list or tuple of 2 iterables of int, bot having the same len
The indices of the lists that have to be merged, both iterables
items will be read pair by pair, the first is the index to the
list that will be extended with the list of the second index.
The indices can be constructed with Numpy e.g.,
indices = np.where(square_matrix) | [
"Extend",
"the",
"lists",
"within",
"the",
"DICOM",
"groups",
"dictionary",
".",
"The",
"indices",
"will",
"indicate",
"which",
"list",
"have",
"to",
"be",
"extended",
"by",
"which",
"other",
"list",
"."
] | python | valid |
mlperf/training | image_classification/tensorflow/official/resnet/imagenet_main.py | https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/image_classification/tensorflow/official/resnet/imagenet_main.py#L242-L273 | def _get_block_sizes(resnet_size):
"""Retrieve the size of each block_layer in the ResNet model.
The number of block layers used for the Resnet model varies according
to the size of the model. This helper grabs the layer set we want, throwing
an error if a non-standard size has been selected.
Args:
resnet_size: The number of convolutional layers needed in the model.
Returns:
A list of block sizes to use in building the model.
Raises:
KeyError: if invalid resnet_size is received.
"""
choices = {
18: [2, 2, 2, 2],
34: [3, 4, 6, 3],
50: [3, 4, 6, 3],
101: [3, 4, 23, 3],
152: [3, 8, 36, 3],
200: [3, 24, 36, 3]
}
try:
return choices[resnet_size]
except KeyError:
err = ('Could not find layers for selected Resnet size.\n'
'Size received: {}; sizes allowed: {}.'.format(
resnet_size, choices.keys()))
raise ValueError(err) | [
"def",
"_get_block_sizes",
"(",
"resnet_size",
")",
":",
"choices",
"=",
"{",
"18",
":",
"[",
"2",
",",
"2",
",",
"2",
",",
"2",
"]",
",",
"34",
":",
"[",
"3",
",",
"4",
",",
"6",
",",
"3",
"]",
",",
"50",
":",
"[",
"3",
",",
"4",
",",
"6",
",",
"3",
"]",
",",
"101",
":",
"[",
"3",
",",
"4",
",",
"23",
",",
"3",
"]",
",",
"152",
":",
"[",
"3",
",",
"8",
",",
"36",
",",
"3",
"]",
",",
"200",
":",
"[",
"3",
",",
"24",
",",
"36",
",",
"3",
"]",
"}",
"try",
":",
"return",
"choices",
"[",
"resnet_size",
"]",
"except",
"KeyError",
":",
"err",
"=",
"(",
"'Could not find layers for selected Resnet size.\\n'",
"'Size received: {}; sizes allowed: {}.'",
".",
"format",
"(",
"resnet_size",
",",
"choices",
".",
"keys",
"(",
")",
")",
")",
"raise",
"ValueError",
"(",
"err",
")"
] | Retrieve the size of each block_layer in the ResNet model.
The number of block layers used for the Resnet model varies according
to the size of the model. This helper grabs the layer set we want, throwing
an error if a non-standard size has been selected.
Args:
resnet_size: The number of convolutional layers needed in the model.
Returns:
A list of block sizes to use in building the model.
Raises:
KeyError: if invalid resnet_size is received. | [
"Retrieve",
"the",
"size",
"of",
"each",
"block_layer",
"in",
"the",
"ResNet",
"model",
"."
] | python | train |
deepmind/pysc2 | pysc2/lib/stopwatch.py | https://github.com/deepmind/pysc2/blob/df4cc4b00f07a2242be9ba153d4a7f4ad2017897/pysc2/lib/stopwatch.py#L254-L282 | def str(self, threshold=0.1):
"""Return a string representation of the timings."""
if not self._times:
return ""
total = sum(s.sum for k, s in six.iteritems(self._times) if "." not in k)
table = [["", "% total", "sum", "avg", "dev", "min", "max", "num"]]
for k, v in sorted(self._times.items()):
percent = 100 * v.sum / (total or 1)
if percent > threshold: # ignore anything below the threshold
table.append([
k,
"%.2f%%" % percent,
"%.4f" % v.sum,
"%.4f" % v.avg,
"%.4f" % v.dev,
"%.4f" % v.min,
"%.4f" % v.max,
"%d" % v.num,
])
col_widths = [max(len(row[i]) for row in table)
for i in range(len(table[0]))]
out = ""
for row in table:
out += " " + row[0].ljust(col_widths[0]) + " "
out += " ".join(
val.rjust(width) for val, width in zip(row[1:], col_widths[1:]))
out += "\n"
return out | [
"def",
"str",
"(",
"self",
",",
"threshold",
"=",
"0.1",
")",
":",
"if",
"not",
"self",
".",
"_times",
":",
"return",
"\"\"",
"total",
"=",
"sum",
"(",
"s",
".",
"sum",
"for",
"k",
",",
"s",
"in",
"six",
".",
"iteritems",
"(",
"self",
".",
"_times",
")",
"if",
"\".\"",
"not",
"in",
"k",
")",
"table",
"=",
"[",
"[",
"\"\"",
",",
"\"% total\"",
",",
"\"sum\"",
",",
"\"avg\"",
",",
"\"dev\"",
",",
"\"min\"",
",",
"\"max\"",
",",
"\"num\"",
"]",
"]",
"for",
"k",
",",
"v",
"in",
"sorted",
"(",
"self",
".",
"_times",
".",
"items",
"(",
")",
")",
":",
"percent",
"=",
"100",
"*",
"v",
".",
"sum",
"/",
"(",
"total",
"or",
"1",
")",
"if",
"percent",
">",
"threshold",
":",
"# ignore anything below the threshold",
"table",
".",
"append",
"(",
"[",
"k",
",",
"\"%.2f%%\"",
"%",
"percent",
",",
"\"%.4f\"",
"%",
"v",
".",
"sum",
",",
"\"%.4f\"",
"%",
"v",
".",
"avg",
",",
"\"%.4f\"",
"%",
"v",
".",
"dev",
",",
"\"%.4f\"",
"%",
"v",
".",
"min",
",",
"\"%.4f\"",
"%",
"v",
".",
"max",
",",
"\"%d\"",
"%",
"v",
".",
"num",
",",
"]",
")",
"col_widths",
"=",
"[",
"max",
"(",
"len",
"(",
"row",
"[",
"i",
"]",
")",
"for",
"row",
"in",
"table",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"table",
"[",
"0",
"]",
")",
")",
"]",
"out",
"=",
"\"\"",
"for",
"row",
"in",
"table",
":",
"out",
"+=",
"\" \"",
"+",
"row",
"[",
"0",
"]",
".",
"ljust",
"(",
"col_widths",
"[",
"0",
"]",
")",
"+",
"\" \"",
"out",
"+=",
"\" \"",
".",
"join",
"(",
"val",
".",
"rjust",
"(",
"width",
")",
"for",
"val",
",",
"width",
"in",
"zip",
"(",
"row",
"[",
"1",
":",
"]",
",",
"col_widths",
"[",
"1",
":",
"]",
")",
")",
"out",
"+=",
"\"\\n\"",
"return",
"out"
] | Return a string representation of the timings. | [
"Return",
"a",
"string",
"representation",
"of",
"the",
"timings",
"."
] | python | train |
saltstack/salt | salt/utils/aggregation.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/aggregation.py#L151-L185 | def levelise(level):
'''
Describe which levels are allowed to do deep merging.
level can be:
True
all levels are True
False
all levels are False
an int
only the first levels are True, the others are False
a sequence
it describes which levels are True, it can be:
* a list of bool and int values
* a string of 0 and 1 characters
'''
if not level: # False, 0, [] ...
return False, False
if level is True:
return True, True
if isinstance(level, int):
return True, level - 1
try: # a sequence
deep, subs = int(level[0]), level[1:]
return bool(deep), subs
except Exception as error:
log.warning(error)
raise | [
"def",
"levelise",
"(",
"level",
")",
":",
"if",
"not",
"level",
":",
"# False, 0, [] ...",
"return",
"False",
",",
"False",
"if",
"level",
"is",
"True",
":",
"return",
"True",
",",
"True",
"if",
"isinstance",
"(",
"level",
",",
"int",
")",
":",
"return",
"True",
",",
"level",
"-",
"1",
"try",
":",
"# a sequence",
"deep",
",",
"subs",
"=",
"int",
"(",
"level",
"[",
"0",
"]",
")",
",",
"level",
"[",
"1",
":",
"]",
"return",
"bool",
"(",
"deep",
")",
",",
"subs",
"except",
"Exception",
"as",
"error",
":",
"log",
".",
"warning",
"(",
"error",
")",
"raise"
] | Describe which levels are allowed to do deep merging.
level can be:
True
all levels are True
False
all levels are False
an int
only the first levels are True, the others are False
a sequence
it describes which levels are True, it can be:
* a list of bool and int values
* a string of 0 and 1 characters | [
"Describe",
"which",
"levels",
"are",
"allowed",
"to",
"do",
"deep",
"merging",
"."
] | python | train |
ming060/robotframework-uiautomatorlibrary | uiautomatorlibrary/Mobile.py | https://github.com/ming060/robotframework-uiautomatorlibrary/blob/b70202b6a8aa68b4efd9d029c2845407fb33451a/uiautomatorlibrary/Mobile.py#L296-L302 | def swipe_bottom(self, steps=10, *args, **selectors):
"""
Swipe the UI object with *selectors* from center to bottom
See `Swipe Left` for more details.
"""
self.device(**selectors).swipe.down(steps=steps) | [
"def",
"swipe_bottom",
"(",
"self",
",",
"steps",
"=",
"10",
",",
"*",
"args",
",",
"*",
"*",
"selectors",
")",
":",
"self",
".",
"device",
"(",
"*",
"*",
"selectors",
")",
".",
"swipe",
".",
"down",
"(",
"steps",
"=",
"steps",
")"
] | Swipe the UI object with *selectors* from center to bottom
See `Swipe Left` for more details. | [
"Swipe",
"the",
"UI",
"object",
"with",
"*",
"selectors",
"*",
"from",
"center",
"to",
"bottom"
] | python | train |
ask/carrot | carrot/backends/queue.py | https://github.com/ask/carrot/blob/5889a25cd2e274642071c9bba39772f4b3e3d9da/carrot/backends/queue.py#L74-L78 | def queue_purge(self, queue, **kwargs):
"""Discard all messages in the queue."""
qsize = mqueue.qsize()
mqueue.queue.clear()
return qsize | [
"def",
"queue_purge",
"(",
"self",
",",
"queue",
",",
"*",
"*",
"kwargs",
")",
":",
"qsize",
"=",
"mqueue",
".",
"qsize",
"(",
")",
"mqueue",
".",
"queue",
".",
"clear",
"(",
")",
"return",
"qsize"
] | Discard all messages in the queue. | [
"Discard",
"all",
"messages",
"in",
"the",
"queue",
"."
] | python | train |
uber/rides-python-sdk | uber_rides/client.py | https://github.com/uber/rides-python-sdk/blob/76ecd75ab5235d792ec1010e36eca679ba285127/uber_rides/client.py#L211-L242 | def get_promotions(
self,
start_latitude,
start_longitude,
end_latitude,
end_longitude,
):
"""Get information about the promotions available to a user.
Parameters
start_latitude (float)
The latitude component of a start location.
start_longitude (float)
The longitude component of a start location.
end_latitude (float)
The latitude component of a end location.
end_longitude (float)
The longitude component of a end location.
Returns
(Response)
A Response object containing available promotions.
"""
args = OrderedDict([
('start_latitude', start_latitude),
('start_longitude', start_longitude),
('end_latitude', end_latitude),
('end_longitude', end_longitude)
])
return self._api_call('GET', 'v1.2/promotions', args=args) | [
"def",
"get_promotions",
"(",
"self",
",",
"start_latitude",
",",
"start_longitude",
",",
"end_latitude",
",",
"end_longitude",
",",
")",
":",
"args",
"=",
"OrderedDict",
"(",
"[",
"(",
"'start_latitude'",
",",
"start_latitude",
")",
",",
"(",
"'start_longitude'",
",",
"start_longitude",
")",
",",
"(",
"'end_latitude'",
",",
"end_latitude",
")",
",",
"(",
"'end_longitude'",
",",
"end_longitude",
")",
"]",
")",
"return",
"self",
".",
"_api_call",
"(",
"'GET'",
",",
"'v1.2/promotions'",
",",
"args",
"=",
"args",
")"
] | Get information about the promotions available to a user.
Parameters
start_latitude (float)
The latitude component of a start location.
start_longitude (float)
The longitude component of a start location.
end_latitude (float)
The latitude component of a end location.
end_longitude (float)
The longitude component of a end location.
Returns
(Response)
A Response object containing available promotions. | [
"Get",
"information",
"about",
"the",
"promotions",
"available",
"to",
"a",
"user",
"."
] | python | train |
networks-lab/metaknowledge | metaknowledge/medline/tagProcessing/tagFunctions.py | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/medline/tagProcessing/tagFunctions.py#L218-L225 | def AD(val):
"""Affiliation
Undoing what the parser does then splitting at the semicolons and dropping newlines extra fitlering is required beacuse some AD's end with a semicolon"""
retDict = {}
for v in val:
split = v.split(' : ')
retDict[split[0]] = [s for s in' : '.join(split[1:]).replace('\n', '').split(';') if s != '']
return retDict | [
"def",
"AD",
"(",
"val",
")",
":",
"retDict",
"=",
"{",
"}",
"for",
"v",
"in",
"val",
":",
"split",
"=",
"v",
".",
"split",
"(",
"' : '",
")",
"retDict",
"[",
"split",
"[",
"0",
"]",
"]",
"=",
"[",
"s",
"for",
"s",
"in",
"' : '",
".",
"join",
"(",
"split",
"[",
"1",
":",
"]",
")",
".",
"replace",
"(",
"'\\n'",
",",
"''",
")",
".",
"split",
"(",
"';'",
")",
"if",
"s",
"!=",
"''",
"]",
"return",
"retDict"
] | Affiliation
Undoing what the parser does then splitting at the semicolons and dropping newlines extra fitlering is required beacuse some AD's end with a semicolon | [
"Affiliation",
"Undoing",
"what",
"the",
"parser",
"does",
"then",
"splitting",
"at",
"the",
"semicolons",
"and",
"dropping",
"newlines",
"extra",
"fitlering",
"is",
"required",
"beacuse",
"some",
"AD",
"s",
"end",
"with",
"a",
"semicolon"
] | python | train |
Trax-air/swagger-parser | swagger_parser/swagger_parser.py | https://github.com/Trax-air/swagger-parser/blob/d97f962a417e76320c59c33dcb223e4373e516d5/swagger_parser/swagger_parser.py#L535-L569 | def _validate_type(self, properties_spec, value):
"""Validate the given value with the given property spec.
Args:
properties_dict: specification of the property to check (From definition not route).
value: value to check.
Returns:
True if the value is valid for the given spec.
"""
if 'type' not in properties_spec.keys():
# Validate sub definition
def_name = self.get_definition_name_from_ref(properties_spec['$ref'])
return self.validate_definition(def_name, value)
# Validate array
elif properties_spec['type'] == 'array':
if not isinstance(value, list):
return False
# Check type
if ('type' in properties_spec['items'].keys() and
any(not self.check_type(item, properties_spec['items']['type']) for item in value)):
return False
# Check ref
elif ('$ref' in properties_spec['items'].keys()):
def_name = self.get_definition_name_from_ref(properties_spec['items']['$ref'])
if any(not self.validate_definition(def_name, item) for item in value):
return False
else: # Classic types
if not self.check_type(value, properties_spec['type']):
return False
return True | [
"def",
"_validate_type",
"(",
"self",
",",
"properties_spec",
",",
"value",
")",
":",
"if",
"'type'",
"not",
"in",
"properties_spec",
".",
"keys",
"(",
")",
":",
"# Validate sub definition",
"def_name",
"=",
"self",
".",
"get_definition_name_from_ref",
"(",
"properties_spec",
"[",
"'$ref'",
"]",
")",
"return",
"self",
".",
"validate_definition",
"(",
"def_name",
",",
"value",
")",
"# Validate array",
"elif",
"properties_spec",
"[",
"'type'",
"]",
"==",
"'array'",
":",
"if",
"not",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"return",
"False",
"# Check type",
"if",
"(",
"'type'",
"in",
"properties_spec",
"[",
"'items'",
"]",
".",
"keys",
"(",
")",
"and",
"any",
"(",
"not",
"self",
".",
"check_type",
"(",
"item",
",",
"properties_spec",
"[",
"'items'",
"]",
"[",
"'type'",
"]",
")",
"for",
"item",
"in",
"value",
")",
")",
":",
"return",
"False",
"# Check ref",
"elif",
"(",
"'$ref'",
"in",
"properties_spec",
"[",
"'items'",
"]",
".",
"keys",
"(",
")",
")",
":",
"def_name",
"=",
"self",
".",
"get_definition_name_from_ref",
"(",
"properties_spec",
"[",
"'items'",
"]",
"[",
"'$ref'",
"]",
")",
"if",
"any",
"(",
"not",
"self",
".",
"validate_definition",
"(",
"def_name",
",",
"item",
")",
"for",
"item",
"in",
"value",
")",
":",
"return",
"False",
"else",
":",
"# Classic types",
"if",
"not",
"self",
".",
"check_type",
"(",
"value",
",",
"properties_spec",
"[",
"'type'",
"]",
")",
":",
"return",
"False",
"return",
"True"
] | Validate the given value with the given property spec.
Args:
properties_dict: specification of the property to check (From definition not route).
value: value to check.
Returns:
True if the value is valid for the given spec. | [
"Validate",
"the",
"given",
"value",
"with",
"the",
"given",
"property",
"spec",
"."
] | python | train |
cldf/pycldf | src/pycldf/__main__.py | https://github.com/cldf/pycldf/blob/636f1eb3ea769394e14ad9e42a83b6096efa9728/src/pycldf/__main__.py#L37-L47 | def validate(args):
"""
cldf validate <DATASET>
Validate a dataset against the CLDF specification, i.e. check
- whether required tables and columns are present
- whether values for required columns are present
- the referential integrity of the dataset
"""
ds = _get_dataset(args)
ds.validate(log=args.log) | [
"def",
"validate",
"(",
"args",
")",
":",
"ds",
"=",
"_get_dataset",
"(",
"args",
")",
"ds",
".",
"validate",
"(",
"log",
"=",
"args",
".",
"log",
")"
] | cldf validate <DATASET>
Validate a dataset against the CLDF specification, i.e. check
- whether required tables and columns are present
- whether values for required columns are present
- the referential integrity of the dataset | [
"cldf",
"validate",
"<DATASET",
">"
] | python | valid |
pypa/pipenv | pipenv/vendor/urllib3/packages/backports/makefile.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/urllib3/packages/backports/makefile.py#L14-L53 | def backport_makefile(self, mode="r", buffering=None, encoding=None,
errors=None, newline=None):
"""
Backport of ``socket.makefile`` from Python 3.5.
"""
if not set(mode) <= {"r", "w", "b"}:
raise ValueError(
"invalid mode %r (only r, w, b allowed)" % (mode,)
)
writing = "w" in mode
reading = "r" in mode or not writing
assert reading or writing
binary = "b" in mode
rawmode = ""
if reading:
rawmode += "r"
if writing:
rawmode += "w"
raw = SocketIO(self, rawmode)
self._makefile_refs += 1
if buffering is None:
buffering = -1
if buffering < 0:
buffering = io.DEFAULT_BUFFER_SIZE
if buffering == 0:
if not binary:
raise ValueError("unbuffered streams must be binary")
return raw
if reading and writing:
buffer = io.BufferedRWPair(raw, raw, buffering)
elif reading:
buffer = io.BufferedReader(raw, buffering)
else:
assert writing
buffer = io.BufferedWriter(raw, buffering)
if binary:
return buffer
text = io.TextIOWrapper(buffer, encoding, errors, newline)
text.mode = mode
return text | [
"def",
"backport_makefile",
"(",
"self",
",",
"mode",
"=",
"\"r\"",
",",
"buffering",
"=",
"None",
",",
"encoding",
"=",
"None",
",",
"errors",
"=",
"None",
",",
"newline",
"=",
"None",
")",
":",
"if",
"not",
"set",
"(",
"mode",
")",
"<=",
"{",
"\"r\"",
",",
"\"w\"",
",",
"\"b\"",
"}",
":",
"raise",
"ValueError",
"(",
"\"invalid mode %r (only r, w, b allowed)\"",
"%",
"(",
"mode",
",",
")",
")",
"writing",
"=",
"\"w\"",
"in",
"mode",
"reading",
"=",
"\"r\"",
"in",
"mode",
"or",
"not",
"writing",
"assert",
"reading",
"or",
"writing",
"binary",
"=",
"\"b\"",
"in",
"mode",
"rawmode",
"=",
"\"\"",
"if",
"reading",
":",
"rawmode",
"+=",
"\"r\"",
"if",
"writing",
":",
"rawmode",
"+=",
"\"w\"",
"raw",
"=",
"SocketIO",
"(",
"self",
",",
"rawmode",
")",
"self",
".",
"_makefile_refs",
"+=",
"1",
"if",
"buffering",
"is",
"None",
":",
"buffering",
"=",
"-",
"1",
"if",
"buffering",
"<",
"0",
":",
"buffering",
"=",
"io",
".",
"DEFAULT_BUFFER_SIZE",
"if",
"buffering",
"==",
"0",
":",
"if",
"not",
"binary",
":",
"raise",
"ValueError",
"(",
"\"unbuffered streams must be binary\"",
")",
"return",
"raw",
"if",
"reading",
"and",
"writing",
":",
"buffer",
"=",
"io",
".",
"BufferedRWPair",
"(",
"raw",
",",
"raw",
",",
"buffering",
")",
"elif",
"reading",
":",
"buffer",
"=",
"io",
".",
"BufferedReader",
"(",
"raw",
",",
"buffering",
")",
"else",
":",
"assert",
"writing",
"buffer",
"=",
"io",
".",
"BufferedWriter",
"(",
"raw",
",",
"buffering",
")",
"if",
"binary",
":",
"return",
"buffer",
"text",
"=",
"io",
".",
"TextIOWrapper",
"(",
"buffer",
",",
"encoding",
",",
"errors",
",",
"newline",
")",
"text",
".",
"mode",
"=",
"mode",
"return",
"text"
] | Backport of ``socket.makefile`` from Python 3.5. | [
"Backport",
"of",
"socket",
".",
"makefile",
"from",
"Python",
"3",
".",
"5",
"."
] | python | train |
abourget/gevent-socketio | socketio/virtsocket.py | https://github.com/abourget/gevent-socketio/blob/1cdb1594a315326987a17ce0924ea448a82fab01/socketio/virtsocket.py#L331-L334 | def send_packet(self, pkt):
"""Low-level interface to queue a packet on the wire (encoded as wire
protocol"""
self.put_client_msg(packet.encode(pkt, self.json_dumps)) | [
"def",
"send_packet",
"(",
"self",
",",
"pkt",
")",
":",
"self",
".",
"put_client_msg",
"(",
"packet",
".",
"encode",
"(",
"pkt",
",",
"self",
".",
"json_dumps",
")",
")"
] | Low-level interface to queue a packet on the wire (encoded as wire
protocol | [
"Low",
"-",
"level",
"interface",
"to",
"queue",
"a",
"packet",
"on",
"the",
"wire",
"(",
"encoded",
"as",
"wire",
"protocol"
] | python | valid |
RetailMeNotSandbox/acky | acky/ec2.py | https://github.com/RetailMeNotSandbox/acky/blob/fcd4d092c42892ede7c924cafc41e9cf4be3fb9f/acky/ec2.py#L260-L288 | def Launcher(self, config=None):
"""Provides a configurable launcher for EC2 instances."""
class _launcher(EC2ApiClient):
"""Configurable launcher for EC2 instances. Create the Launcher
(passing an optional dict of its attributes), set its attributes
(as described in the RunInstances API docs), then launch().
"""
def __init__(self, aws, config):
super(_launcher, self).__init__(aws)
self.config = config
self._attr = list(self.__dict__.keys()) + ['_attr']
def launch(self, ami, min_count, max_count=0):
"""Use given AMI to launch min_count instances with the
current configuration. Returns instance info list.
"""
params = config.copy()
params.update(dict([i for i in self.__dict__.items()
if i[0] not in self._attr]))
return self.call("RunInstances",
ImageId=ami,
MinCount=min_count,
MaxCount=max_count or min_count,
response_data_key="Instances",
**params)
if not config:
config = {}
return _launcher(self._aws, config) | [
"def",
"Launcher",
"(",
"self",
",",
"config",
"=",
"None",
")",
":",
"class",
"_launcher",
"(",
"EC2ApiClient",
")",
":",
"\"\"\"Configurable launcher for EC2 instances. Create the Launcher\n (passing an optional dict of its attributes), set its attributes\n (as described in the RunInstances API docs), then launch().\n \"\"\"",
"def",
"__init__",
"(",
"self",
",",
"aws",
",",
"config",
")",
":",
"super",
"(",
"_launcher",
",",
"self",
")",
".",
"__init__",
"(",
"aws",
")",
"self",
".",
"config",
"=",
"config",
"self",
".",
"_attr",
"=",
"list",
"(",
"self",
".",
"__dict__",
".",
"keys",
"(",
")",
")",
"+",
"[",
"'_attr'",
"]",
"def",
"launch",
"(",
"self",
",",
"ami",
",",
"min_count",
",",
"max_count",
"=",
"0",
")",
":",
"\"\"\"Use given AMI to launch min_count instances with the\n current configuration. Returns instance info list.\n \"\"\"",
"params",
"=",
"config",
".",
"copy",
"(",
")",
"params",
".",
"update",
"(",
"dict",
"(",
"[",
"i",
"for",
"i",
"in",
"self",
".",
"__dict__",
".",
"items",
"(",
")",
"if",
"i",
"[",
"0",
"]",
"not",
"in",
"self",
".",
"_attr",
"]",
")",
")",
"return",
"self",
".",
"call",
"(",
"\"RunInstances\"",
",",
"ImageId",
"=",
"ami",
",",
"MinCount",
"=",
"min_count",
",",
"MaxCount",
"=",
"max_count",
"or",
"min_count",
",",
"response_data_key",
"=",
"\"Instances\"",
",",
"*",
"*",
"params",
")",
"if",
"not",
"config",
":",
"config",
"=",
"{",
"}",
"return",
"_launcher",
"(",
"self",
".",
"_aws",
",",
"config",
")"
] | Provides a configurable launcher for EC2 instances. | [
"Provides",
"a",
"configurable",
"launcher",
"for",
"EC2",
"instances",
"."
] | python | train |
cloudsmith-io/cloudsmith-cli | cloudsmith_cli/cli/validators.py | https://github.com/cloudsmith-io/cloudsmith-cli/blob/5bc245ca5d0bfa85380be48e7c206b4c86cc6c8e/cloudsmith_cli/cli/validators.py#L104-L108 | def validate_owner_repo_distro(ctx, param, value):
"""Ensure that owner/repo/distro/version is formatted correctly."""
# pylint: disable=unused-argument
form = "OWNER/REPO/DISTRO[/RELEASE]"
return validate_slashes(param, value, minimum=3, maximum=4, form=form) | [
"def",
"validate_owner_repo_distro",
"(",
"ctx",
",",
"param",
",",
"value",
")",
":",
"# pylint: disable=unused-argument",
"form",
"=",
"\"OWNER/REPO/DISTRO[/RELEASE]\"",
"return",
"validate_slashes",
"(",
"param",
",",
"value",
",",
"minimum",
"=",
"3",
",",
"maximum",
"=",
"4",
",",
"form",
"=",
"form",
")"
] | Ensure that owner/repo/distro/version is formatted correctly. | [
"Ensure",
"that",
"owner",
"/",
"repo",
"/",
"distro",
"/",
"version",
"is",
"formatted",
"correctly",
"."
] | python | train |
materialsproject/pymatgen | pymatgen/symmetry/groups.py | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/symmetry/groups.py#L66-L77 | def is_subgroup(self, supergroup):
"""
True if this group is a subgroup of the supplied group.
Args:
supergroup (SymmetryGroup): Supergroup to test.
Returns:
True if this group is a subgroup of the supplied group.
"""
warnings.warn("This is not fully functional. Only trivial subsets are tested right now. ")
return set(self.symmetry_ops).issubset(supergroup.symmetry_ops) | [
"def",
"is_subgroup",
"(",
"self",
",",
"supergroup",
")",
":",
"warnings",
".",
"warn",
"(",
"\"This is not fully functional. Only trivial subsets are tested right now. \"",
")",
"return",
"set",
"(",
"self",
".",
"symmetry_ops",
")",
".",
"issubset",
"(",
"supergroup",
".",
"symmetry_ops",
")"
] | True if this group is a subgroup of the supplied group.
Args:
supergroup (SymmetryGroup): Supergroup to test.
Returns:
True if this group is a subgroup of the supplied group. | [
"True",
"if",
"this",
"group",
"is",
"a",
"subgroup",
"of",
"the",
"supplied",
"group",
"."
] | python | train |
flo-compbio/genometools | genometools/expression/matrix.py | https://github.com/flo-compbio/genometools/blob/dd962bb26d60a0f14ca14d8c9a4dd75768962c7d/genometools/expression/matrix.py#L469-L493 | def write_tsv(self, file_path: str, encoding: str = 'UTF-8',
sep: str = '\t'):
"""Write expression matrix to a tab-delimited text file.
Parameters
----------
file_path: str
The path of the output file.
encoding: str, optional
The file encoding. ("UTF-8")
Returns
-------
None
"""
#if six.PY2:
# sep = sep.encode('UTF-8')
self.to_csv(
file_path, sep=sep, float_format='%.5f', mode='w',
encoding=encoding, quoting=csv.QUOTE_NONE
)
_LOGGER.info('Wrote %d x %d expression matrix to "%s".',
self.p, self.n, file_path) | [
"def",
"write_tsv",
"(",
"self",
",",
"file_path",
":",
"str",
",",
"encoding",
":",
"str",
"=",
"'UTF-8'",
",",
"sep",
":",
"str",
"=",
"'\\t'",
")",
":",
"#if six.PY2:",
"# sep = sep.encode('UTF-8')",
"self",
".",
"to_csv",
"(",
"file_path",
",",
"sep",
"=",
"sep",
",",
"float_format",
"=",
"'%.5f'",
",",
"mode",
"=",
"'w'",
",",
"encoding",
"=",
"encoding",
",",
"quoting",
"=",
"csv",
".",
"QUOTE_NONE",
")",
"_LOGGER",
".",
"info",
"(",
"'Wrote %d x %d expression matrix to \"%s\".'",
",",
"self",
".",
"p",
",",
"self",
".",
"n",
",",
"file_path",
")"
] | Write expression matrix to a tab-delimited text file.
Parameters
----------
file_path: str
The path of the output file.
encoding: str, optional
The file encoding. ("UTF-8")
Returns
-------
None | [
"Write",
"expression",
"matrix",
"to",
"a",
"tab",
"-",
"delimited",
"text",
"file",
"."
] | python | train |
drbild/sslpsk | sslpsk/sslpsk.py | https://github.com/drbild/sslpsk/blob/583f7b1f775c33ddc1196a400188005c50cfeb0f/sslpsk/sslpsk.py#L38-L47 | def _python_psk_client_callback(ssl_id, hint):
"""Called by _sslpsk.c to return the (psk, identity) tuple for the socket with
the specified ssl socket.
"""
if ssl_id not in _callbacks:
return ("", "")
else:
res = _callbacks[ssl_id](hint)
return res if isinstance(res, tuple) else (res, "") | [
"def",
"_python_psk_client_callback",
"(",
"ssl_id",
",",
"hint",
")",
":",
"if",
"ssl_id",
"not",
"in",
"_callbacks",
":",
"return",
"(",
"\"\"",
",",
"\"\"",
")",
"else",
":",
"res",
"=",
"_callbacks",
"[",
"ssl_id",
"]",
"(",
"hint",
")",
"return",
"res",
"if",
"isinstance",
"(",
"res",
",",
"tuple",
")",
"else",
"(",
"res",
",",
"\"\"",
")"
] | Called by _sslpsk.c to return the (psk, identity) tuple for the socket with
the specified ssl socket. | [
"Called",
"by",
"_sslpsk",
".",
"c",
"to",
"return",
"the",
"(",
"psk",
"identity",
")",
"tuple",
"for",
"the",
"socket",
"with",
"the",
"specified",
"ssl",
"socket",
"."
] | python | train |
shexSpec/grammar | parsers/python/pyshexc/parser_impl/shex_shape_expression_parser.py | https://github.com/shexSpec/grammar/blob/4497cd1f73fa6703bca6e2cb53ba9c120f22e48c/parsers/python/pyshexc/parser_impl/shex_shape_expression_parser.py#L46-L55 | def visitShapeOr(self, ctx: ShExDocParser.ShapeOrContext):
""" shapeOr: shapeAnd (KW_OR shapeAnd)* """
if len(ctx.shapeAnd()) > 1:
self.expr = ShapeOr(id=self.label, shapeExprs=[])
for sa in ctx.shapeAnd():
sep = ShexShapeExpressionParser(self.context)
sep.visit(sa)
self.expr.shapeExprs.append(sep.expr)
else:
self.visit(ctx.shapeAnd(0)) | [
"def",
"visitShapeOr",
"(",
"self",
",",
"ctx",
":",
"ShExDocParser",
".",
"ShapeOrContext",
")",
":",
"if",
"len",
"(",
"ctx",
".",
"shapeAnd",
"(",
")",
")",
">",
"1",
":",
"self",
".",
"expr",
"=",
"ShapeOr",
"(",
"id",
"=",
"self",
".",
"label",
",",
"shapeExprs",
"=",
"[",
"]",
")",
"for",
"sa",
"in",
"ctx",
".",
"shapeAnd",
"(",
")",
":",
"sep",
"=",
"ShexShapeExpressionParser",
"(",
"self",
".",
"context",
")",
"sep",
".",
"visit",
"(",
"sa",
")",
"self",
".",
"expr",
".",
"shapeExprs",
".",
"append",
"(",
"sep",
".",
"expr",
")",
"else",
":",
"self",
".",
"visit",
"(",
"ctx",
".",
"shapeAnd",
"(",
"0",
")",
")"
] | shapeOr: shapeAnd (KW_OR shapeAnd)* | [
"shapeOr",
":",
"shapeAnd",
"(",
"KW_OR",
"shapeAnd",
")",
"*"
] | python | train |
spyder-ide/spyder | spyder/plugins/ipythonconsole/plugin.py | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/ipythonconsole/plugin.py#L191-L219 | def apply_plugin_settings(self, options):
"""Apply configuration file's plugin settings"""
font_n = 'plugin_font'
font_o = self.get_plugin_font()
help_n = 'connect_to_oi'
help_o = CONF.get('help', 'connect/ipython_console')
color_scheme_n = 'color_scheme_name'
color_scheme_o = CONF.get('appearance', 'selected')
show_time_n = 'show_elapsed_time'
show_time_o = self.get_option(show_time_n)
reset_namespace_n = 'show_reset_namespace_warning'
reset_namespace_o = self.get_option(reset_namespace_n)
ask_before_restart_n = 'ask_before_restart'
ask_before_restart_o = self.get_option(ask_before_restart_n)
for client in self.clients:
control = client.get_control()
if font_n in options:
client.set_font(font_o)
if help_n in options and control is not None:
control.set_help_enabled(help_o)
if color_scheme_n in options:
client.set_color_scheme(color_scheme_o)
if show_time_n in options:
client.show_time_action.setChecked(show_time_o)
client.set_elapsed_time_visible(show_time_o)
if reset_namespace_n in options:
client.reset_warning = reset_namespace_o
if ask_before_restart_n in options:
client.ask_before_restart = ask_before_restart_o | [
"def",
"apply_plugin_settings",
"(",
"self",
",",
"options",
")",
":",
"font_n",
"=",
"'plugin_font'",
"font_o",
"=",
"self",
".",
"get_plugin_font",
"(",
")",
"help_n",
"=",
"'connect_to_oi'",
"help_o",
"=",
"CONF",
".",
"get",
"(",
"'help'",
",",
"'connect/ipython_console'",
")",
"color_scheme_n",
"=",
"'color_scheme_name'",
"color_scheme_o",
"=",
"CONF",
".",
"get",
"(",
"'appearance'",
",",
"'selected'",
")",
"show_time_n",
"=",
"'show_elapsed_time'",
"show_time_o",
"=",
"self",
".",
"get_option",
"(",
"show_time_n",
")",
"reset_namespace_n",
"=",
"'show_reset_namespace_warning'",
"reset_namespace_o",
"=",
"self",
".",
"get_option",
"(",
"reset_namespace_n",
")",
"ask_before_restart_n",
"=",
"'ask_before_restart'",
"ask_before_restart_o",
"=",
"self",
".",
"get_option",
"(",
"ask_before_restart_n",
")",
"for",
"client",
"in",
"self",
".",
"clients",
":",
"control",
"=",
"client",
".",
"get_control",
"(",
")",
"if",
"font_n",
"in",
"options",
":",
"client",
".",
"set_font",
"(",
"font_o",
")",
"if",
"help_n",
"in",
"options",
"and",
"control",
"is",
"not",
"None",
":",
"control",
".",
"set_help_enabled",
"(",
"help_o",
")",
"if",
"color_scheme_n",
"in",
"options",
":",
"client",
".",
"set_color_scheme",
"(",
"color_scheme_o",
")",
"if",
"show_time_n",
"in",
"options",
":",
"client",
".",
"show_time_action",
".",
"setChecked",
"(",
"show_time_o",
")",
"client",
".",
"set_elapsed_time_visible",
"(",
"show_time_o",
")",
"if",
"reset_namespace_n",
"in",
"options",
":",
"client",
".",
"reset_warning",
"=",
"reset_namespace_o",
"if",
"ask_before_restart_n",
"in",
"options",
":",
"client",
".",
"ask_before_restart",
"=",
"ask_before_restart_o"
] | Apply configuration file's plugin settings | [
"Apply",
"configuration",
"file",
"s",
"plugin",
"settings"
] | python | train |
stianaske/pybotvac | pybotvac/account.py | https://github.com/stianaske/pybotvac/blob/e3f655e81070ff209aaa4efb7880016cf2599e6d/pybotvac/account.py#L151-L163 | def refresh_persistent_maps(self):
"""
Get information about persistent maps of the robots.
:return:
"""
for robot in self._robots:
resp2 = (requests.get(urljoin(
self.ENDPOINT,
'users/me/robots/{}/persistent_maps'.format(robot.serial)),
headers=self._headers))
resp2.raise_for_status()
self._persistent_maps.update({robot.serial: resp2.json()}) | [
"def",
"refresh_persistent_maps",
"(",
"self",
")",
":",
"for",
"robot",
"in",
"self",
".",
"_robots",
":",
"resp2",
"=",
"(",
"requests",
".",
"get",
"(",
"urljoin",
"(",
"self",
".",
"ENDPOINT",
",",
"'users/me/robots/{}/persistent_maps'",
".",
"format",
"(",
"robot",
".",
"serial",
")",
")",
",",
"headers",
"=",
"self",
".",
"_headers",
")",
")",
"resp2",
".",
"raise_for_status",
"(",
")",
"self",
".",
"_persistent_maps",
".",
"update",
"(",
"{",
"robot",
".",
"serial",
":",
"resp2",
".",
"json",
"(",
")",
"}",
")"
] | Get information about persistent maps of the robots.
:return: | [
"Get",
"information",
"about",
"persistent",
"maps",
"of",
"the",
"robots",
"."
] | python | valid |
poppy-project/pypot | pypot/dynamixel/io/io_320.py | https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/dynamixel/io/io_320.py#L32-L43 | def factory_reset(self, ids, except_ids=False, except_baudrate_and_ids=False):
""" Reset all motors on the bus to their factory default settings. """
mode = (0x02 if except_baudrate_and_ids else
0x01 if except_ids else 0xFF)
for id in ids:
try:
self._send_packet(self._protocol.DxlResetPacket(id, mode))
except (DxlTimeoutError, DxlCommunicationError):
pass | [
"def",
"factory_reset",
"(",
"self",
",",
"ids",
",",
"except_ids",
"=",
"False",
",",
"except_baudrate_and_ids",
"=",
"False",
")",
":",
"mode",
"=",
"(",
"0x02",
"if",
"except_baudrate_and_ids",
"else",
"0x01",
"if",
"except_ids",
"else",
"0xFF",
")",
"for",
"id",
"in",
"ids",
":",
"try",
":",
"self",
".",
"_send_packet",
"(",
"self",
".",
"_protocol",
".",
"DxlResetPacket",
"(",
"id",
",",
"mode",
")",
")",
"except",
"(",
"DxlTimeoutError",
",",
"DxlCommunicationError",
")",
":",
"pass"
] | Reset all motors on the bus to their factory default settings. | [
"Reset",
"all",
"motors",
"on",
"the",
"bus",
"to",
"their",
"factory",
"default",
"settings",
"."
] | python | train |
SeabornGames/RequestClient | example_bindings/user.py | https://github.com/SeabornGames/RequestClient/blob/21aeb951ddfdb6ee453ad0edc896ff224e06425d/example_bindings/user.py#L132-L142 | def post(self, user_ids=None, usernames=None, status=None):
"""
:param user_ids: list of int of the user_ids to return
:param usernames: list of str of the usernames to return
:param status: str of the status
:return: list of User
"""
return self.connection.post('user/array',
data=dict(user_ids=user_ids,
usernames=usernames,
status=status)) | [
"def",
"post",
"(",
"self",
",",
"user_ids",
"=",
"None",
",",
"usernames",
"=",
"None",
",",
"status",
"=",
"None",
")",
":",
"return",
"self",
".",
"connection",
".",
"post",
"(",
"'user/array'",
",",
"data",
"=",
"dict",
"(",
"user_ids",
"=",
"user_ids",
",",
"usernames",
"=",
"usernames",
",",
"status",
"=",
"status",
")",
")"
] | :param user_ids: list of int of the user_ids to return
:param usernames: list of str of the usernames to return
:param status: str of the status
:return: list of User | [
":",
"param",
"user_ids",
":",
"list",
"of",
"int",
"of",
"the",
"user_ids",
"to",
"return",
":",
"param",
"usernames",
":",
"list",
"of",
"str",
"of",
"the",
"usernames",
"to",
"return",
":",
"param",
"status",
":",
"str",
"of",
"the",
"status",
":",
"return",
":",
"list",
"of",
"User"
] | python | train |
spyder-ide/spyder | spyder/plugins/editor/widgets/base.py | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/widgets/base.py#L520-L543 | def cursor_position_changed(self):
"""Brace matching"""
if self.bracepos is not None:
self.__highlight(self.bracepos, cancel=True)
self.bracepos = None
cursor = self.textCursor()
if cursor.position() == 0:
return
cursor.movePosition(QTextCursor.PreviousCharacter,
QTextCursor.KeepAnchor)
text = to_text_string(cursor.selectedText())
pos1 = cursor.position()
if text in (')', ']', '}'):
pos2 = self.find_brace_match(pos1, text, forward=False)
elif text in ('(', '[', '{'):
pos2 = self.find_brace_match(pos1, text, forward=True)
else:
return
if pos2 is not None:
self.bracepos = (pos1, pos2)
self.__highlight(self.bracepos, color=self.matched_p_color)
else:
self.bracepos = (pos1,)
self.__highlight(self.bracepos, color=self.unmatched_p_color) | [
"def",
"cursor_position_changed",
"(",
"self",
")",
":",
"if",
"self",
".",
"bracepos",
"is",
"not",
"None",
":",
"self",
".",
"__highlight",
"(",
"self",
".",
"bracepos",
",",
"cancel",
"=",
"True",
")",
"self",
".",
"bracepos",
"=",
"None",
"cursor",
"=",
"self",
".",
"textCursor",
"(",
")",
"if",
"cursor",
".",
"position",
"(",
")",
"==",
"0",
":",
"return",
"cursor",
".",
"movePosition",
"(",
"QTextCursor",
".",
"PreviousCharacter",
",",
"QTextCursor",
".",
"KeepAnchor",
")",
"text",
"=",
"to_text_string",
"(",
"cursor",
".",
"selectedText",
"(",
")",
")",
"pos1",
"=",
"cursor",
".",
"position",
"(",
")",
"if",
"text",
"in",
"(",
"')'",
",",
"']'",
",",
"'}'",
")",
":",
"pos2",
"=",
"self",
".",
"find_brace_match",
"(",
"pos1",
",",
"text",
",",
"forward",
"=",
"False",
")",
"elif",
"text",
"in",
"(",
"'('",
",",
"'['",
",",
"'{'",
")",
":",
"pos2",
"=",
"self",
".",
"find_brace_match",
"(",
"pos1",
",",
"text",
",",
"forward",
"=",
"True",
")",
"else",
":",
"return",
"if",
"pos2",
"is",
"not",
"None",
":",
"self",
".",
"bracepos",
"=",
"(",
"pos1",
",",
"pos2",
")",
"self",
".",
"__highlight",
"(",
"self",
".",
"bracepos",
",",
"color",
"=",
"self",
".",
"matched_p_color",
")",
"else",
":",
"self",
".",
"bracepos",
"=",
"(",
"pos1",
",",
")",
"self",
".",
"__highlight",
"(",
"self",
".",
"bracepos",
",",
"color",
"=",
"self",
".",
"unmatched_p_color",
")"
] | Brace matching | [
"Brace",
"matching"
] | python | train |
mozilla/mozilla-django-oidc | mozilla_django_oidc/contrib/drf.py | https://github.com/mozilla/mozilla-django-oidc/blob/e780130deacccbafc85a92f48d1407e042f5f955/mozilla_django_oidc/contrib/drf.py#L96-L120 | def get_access_token(self, request):
"""
Get the access token based on a request.
Returns None if no authentication details were provided. Raises
AuthenticationFailed if the token is incorrect.
"""
header = authentication.get_authorization_header(request)
if not header:
return None
header = header.decode(authentication.HTTP_HEADER_ENCODING)
auth = header.split()
if auth[0].lower() != 'bearer':
return None
if len(auth) == 1:
msg = 'Invalid "bearer" header: No credentials provided.'
raise exceptions.AuthenticationFailed(msg)
elif len(auth) > 2:
msg = 'Invalid "bearer" header: Credentials string should not contain spaces.'
raise exceptions.AuthenticationFailed(msg)
return auth[1] | [
"def",
"get_access_token",
"(",
"self",
",",
"request",
")",
":",
"header",
"=",
"authentication",
".",
"get_authorization_header",
"(",
"request",
")",
"if",
"not",
"header",
":",
"return",
"None",
"header",
"=",
"header",
".",
"decode",
"(",
"authentication",
".",
"HTTP_HEADER_ENCODING",
")",
"auth",
"=",
"header",
".",
"split",
"(",
")",
"if",
"auth",
"[",
"0",
"]",
".",
"lower",
"(",
")",
"!=",
"'bearer'",
":",
"return",
"None",
"if",
"len",
"(",
"auth",
")",
"==",
"1",
":",
"msg",
"=",
"'Invalid \"bearer\" header: No credentials provided.'",
"raise",
"exceptions",
".",
"AuthenticationFailed",
"(",
"msg",
")",
"elif",
"len",
"(",
"auth",
")",
">",
"2",
":",
"msg",
"=",
"'Invalid \"bearer\" header: Credentials string should not contain spaces.'",
"raise",
"exceptions",
".",
"AuthenticationFailed",
"(",
"msg",
")",
"return",
"auth",
"[",
"1",
"]"
] | Get the access token based on a request.
Returns None if no authentication details were provided. Raises
AuthenticationFailed if the token is incorrect. | [
"Get",
"the",
"access",
"token",
"based",
"on",
"a",
"request",
"."
] | python | train |
oscarlazoarjona/fast | build/lib/fast/rk4.py | https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/build/lib/fast/rk4.py#L56-L411 | def write_rk4(path,name,laser,omega,gamma,r,Lij,states=None,verbose=1):
r"""
This function writes the Fortran code needed to calculate the time evolution of the density matrix elements
`\rho_{ij}` using the Runge-Kutta method of order 4.
INPUT:
- ``path`` - A string with the working directory where all files will be stored. It must end with ``/``.
- ``name`` - A string with the name of the experiment. All files produced will begin with this name.
- ``laser`` - A list of Laser objects (see the Laser class).
- ``omega`` - A matrix or list of lists containing the frequency differences `\omega_{ij}`.
- ``gamma`` - A matrix or list of lists containing the spontaneous decay frequencies `\gamma_{ij}`.
- ``r`` - A list of three matrices or lists of lists containing the components of the position operator `r_{-1ij},r_{0ij},r_{1ij}`.
- ``Lij`` - A list with elements of the form ``[i,j,[l1,l2,...]]`` representing the sets `L_{ij}` of which lasers excite wich transitions. It does not need to contain an element for all ``i,j`` pairs, but only those which have a laser that excites them.
- ``Omega`` - A floating point number indicating the frequency scale for the equations. The frequencies ``omega`` and ``gamma`` are divided by this number. If ``None`` the equations and the input are taken in SI units.
OUTPUT:
- A file ``name.f90`` is created in ``path``.
"""
global omega_rescaled
t0=time()
Ne=len(omega[0])
Nl=len(laser)
if states==None: states=range(1,Ne+1)
#We make some checks
for i in range(Ne):
for j in range(Ne):
b1=not ('.' in str(omega[i][j]) or 'e' in str(omega[i][j]))
if b1: raise ValueError,'omega must be composed of floating point numbers.'
b2=not ('.' in str(gamma[i][j]) or 'e' in str(gamma[i][j]))
if b2: raise ValueError,'gamma must be composed of floating point numbers.'
#We rescale the frequencies as requested.
#~ if Omega != None:
#~ omega_rescaled=[[omega[i][j]/Omega for j in range(Ne)] for i in range(Ne)]
#~ #gamma=[[gamma[i][j]/Omega for j in range(Ne)] for i in range(Ne)]
#~ else:
#~ omega_rescaled=omega[:]
omega_rescaled=omega[:]
#We determine wether it is possible to eliminate explicit time-dependance
theta=find_phase_transformation(Ne,Nl,r,Lij)
#We find the detunings if required
#We construct the correspondence i <-> I between degenerate and non-degenerate indices.
i_d,I_nd,Nnd=calculate_iI_correspondence(omega)
#We get wich transitions each laser induces
detunings,detuningsij=laser_detunings(Lij,Nl,i_d,I_nd,Nnd)
#We get how many transitions each laser induces
detuning_indices=[len(detunings[i]) for i in range(Nl)]
#The number of detunings
Nd=sum([len(detunings[l]) for l in range(Nl)])
combinations=detuning_combinations(detuning_indices)
code0='''program evolution_rk4
implicit none
complex*16, dimension('''+str(Ne*(Ne+1)/2-1)+') :: x,k1,k2,k3,k4\n'
code0+=''' real*8 :: dt,t,ddelta,delta,delta0
integer :: i,j,n,ldelta,ndelta,detuning_index,n_aprox,n_mod
logical :: print_steps,run_spectrum\n'''
code0+=' real*8, dimension('+str(Nl)+') :: E0,detuning_knob\n'
code0+=' real*8, dimension('+str(Nd)+') :: detuning\n\n'
code0+=" open(unit=1,file='"+path+name+".dat',status='unknown')\n\n"
code0+=' n_aprox=1500\n'
code0+=' !We load the parameters\n'
code0+=" open(unit=2,file='"+path+name+"_params.dat',status='unknown')\n"
code0+=''' read(2,*) n
read(2,*) dt
read(2,*) print_steps
read(2,*) x
read(2,*) E0\n'''
code0+=' read(2,*) detuning_knob\n'
code0+=' read(2,*) run_spectrum\n\n'
code0+=''' if (run_spectrum) then
read(2,*) ldelta
read(2,*) ndelta
read(2,*) ddelta
close(2)
delta0=detuning_knob(ldelta)
n_mod=ndelta/n_aprox
else
ldelta=1; ndelta=1; ddelta=0; delta=0
close(2)
n_mod=n/n_aprox
end if
if (n_mod==0) n_mod=1\n\n\n'''
#We add the code to caculate all the initial detunings for each laser.
code0+=' !We calculate the initial detunings.\n'
#We find the minimal frequency corresponding to each laser.
omega_min,omega_min_indices=find_omega_min(omega_rescaled,Nl,detuningsij,i_d,I_nd)
det_index=1
for l in range(Nl):
omega0=omega_min[l]
for p in detuningsij[l]:
code0+=' detuning('+str(det_index)+')='
code0+=format_double(omega0-omega_rescaled[p[0]][p[1]])+'+detuning_knob('+str(l+1)+')\n'
det_index+=1
code0+='\n'
code0+=''' t=0
if (.not. run_spectrum) WRITE(1,*) t,real(x),imag(x('''+str(Ne)+''':))\n'''
code0+=''' !We start the detuning variation\n'''
code0+=' delta=detuning_knob(ldelta)\n'
code0+=''' do j=1,ndelta
!We run the Runge Kutta method
t=0.0
do i=1,n-1\n'''
code0+=' call f(x , t , k1, E0, detuning, detuning_knob)\n'
code0+=' call f(x+0.5*k1*dt, t+dt*0.5, k2, E0, detuning, detuning_knob)\n'
code0+=' call f(x+0.5*k2*dt, t+dt*0.5, k3, E0, detuning, detuning_knob)\n'
code0+=' call f(x +k3*dt, t+dt , k4, E0, detuning, detuning_knob)\n'
code0+=''' x= x+(k1+2*k2+2*k3+k4)*dt/6
if (print_steps.and. .not. run_spectrum) print*,'t=',t,'delta=',delta
t= t+ dt
if (isnan(real(x(1)))) stop 1
if (.not. run_spectrum .and. mod(i,n_mod)==0) WRITE(1,*) t,real(x),imag(x('''+str(Ne)+''':))
end do
if (print_steps) print*, 'delta=',delta,'percentage=',100*(delta-delta0)/(ddelta*ndelta)
!We recalculate the detunings
if (run_spectrum) then
if (mod(j,n_mod)==0) WRITE(1,*) delta,real(x),imag(x('''+str(Ne)+''':))
delta=delta+ddelta
detuning_knob(ldelta)=detuning_knob(ldelta)+ddelta\n'''
#We add the code to caculate all detunings for each laser
#This way of assigining a global index ll to the detunings ammounts to
# ll= number_of_previous_detunings
# + number_of_detuning_ordered_by_row_and_from_left_to_right_column
#like this
#->
#-> ->
#-> -> ->
#for each l
#We find the minimal frequency corresponding to each laser
omega_min,omega_min_indices=find_omega_min(omega_rescaled,Nl,detuningsij,i_d,I_nd)
det_index=1
for l in range(Nl):
omega0=omega_min[l]
code0+=' if (ldelta=='+str(l+1)+') then\n'
for p in detuningsij[l]:
code0+=' detuning('+str(det_index)+')=detuning('+str(det_index)+')'
#code0+='+('+str(omega0-omega_rescaled[p[0]][p[1]])+'+ddelta\n'
code0+='+ddelta\n'
det_index+=1
code0+=' end if\n'
code0+=''' end if
end do
close(1)
end program\n\n'''
code0+='subroutine f(x,t,y, E0, detuning,detuning_knob)\n'
code0+=''' implicit none
real*8, intent(in) :: t\n'''
code0+=' complex*16, dimension('+str(Ne*(Ne+1)/2-1)+'), intent(in) :: x\n'
code0+=' complex*16, dimension('+str(Ne*(Ne+1)/2-1)+'), intent(out) :: y\n'
code0+=' real*8, dimension('+str(Nl)+'), intent(in) :: E0,detuning_knob\n'
code0+=' real*8, dimension('+str(Nd)+'), intent(in) :: detuning\n\n'
code0+=' complex*16 :: I,fact,aux\n'
code0+=' real*8 :: rho11\n\n'
code0+=' I=(0,1D0)\n'
#We establish the scaling of the equations
#~ if Omega==None:
#~ h =1.054571726e-34; e=1.602176565e-19
#~ code0+=' fact=I*'+str(e/h)+'\n'
#~ else:
#~ #code0+=' fact=I*'+str(float(Omega/sqrt(2)))+'\n'
#~ code0+=' fact=I*'+str(float(1/sqrt(2)))+'\n'
#~ #code0+=' fact=I*'+str(float(1/(sqrt(2)*Omega)))+'\n'
code0+=' fact=I*'+format_double(float(1/sqrt(2)))+'\n'
#We give the code to calculate rho11
code0+=' rho11=1\n'
for i in range(1,Ne):
code0+=' rho11=rho11 -x('+str(i)+')\n'
code0+='\n\n'
####################################################################
#We produce the code for the first order equations.
####################################################################
if len(theta)>0:
code=''
for mu in range(1,Ne*(Ne+1)/2):
i,j,s=IJ(mu,Ne)
#print 'ecuacion mu=',mu,',i,j=',i,j
eqmu=' y('+str(mu)+')= 0\n'
####################################################################
#We add the terms associated with the effective hamiltonian
#other than those associated with the phase transformation.
for k in range(1,Ne+1):
#Case 1
if k>=j:
for l in Lij[i-1][k-1]:
if k>i:
#print 'E0^',l,-1,'r',i,k,'rho',k,j,'case 1.1'
eqmu+=add_line(Ne,mu,'+',laser,l,-1, r,i,k, k,j)
elif k<i:
#print 'E0^',l, 1,'r',i,k,'rho',k,j,'case 1.2'
eqmu+=add_line(Ne,mu,'+',laser,l, 1, r,i,k, k,j)
#Case 2
elif k<j:
for l in Lij[i-1][k-1]:
if k>i:
#print 'E0^',l,-1,'r',i,k,'rhoa',j,k,'case 2.1'
eqmu+=add_line(Ne,mu,'+',laser,l,-1, r,i,k, j,k,True)
elif k<i:
#print 'E0^',l, 1,'r',i,k,'rhoa',j,k,'case 2.2'
eqmu+=add_line(Ne,mu,'+',laser,l, 1, r,i,k, j,k,True)
#Case 3
if k<=i:
for l in Lij[k-1][j-1]:
if k<j:
#print 'E0^',l,-1,'r',k,j,'rho',i,k,'case 3.1'
eqmu+=add_line(Ne,mu,'-',laser,l,-1, r,k,j, i,k)
elif k>j:
#print 'E0^',l, 1,'r',k,j,'rho',i,k,'case 3.2'
eqmu+=add_line(Ne,mu,'-',laser,l, 1, r,k,j, i,k)
#Case 4
elif k>i:
for l in Lij[k-1][j-1]:
if k<j:
#print 'E0^',l,-1,'r',k,j,'rhoa',k,i,'case 4.1'
eqmu+=add_line(Ne,mu,'-',laser,l,-1, r,k,j, k,i,True)
elif k>j:
#print 'E0^',l, 1,'r',k,j,'rhoa',k,i,'case 4.2'
eqmu+=add_line(Ne,mu,'-',laser,l, 1, r,k,j, k,i,True)
eqmu+=' y('+str(mu)+')=y('+str(mu)+')*fact\n'
####################################################################
#We add the terms associated with the phase transformation.
extra=Theta(i,j,theta,omega_rescaled,omega_min,detunings,detuningsij,
combinations,detuning_indices,Lij,i_d,I_nd,Nnd,
verbose=verbose,states=states)
if extra!='':
eqmu+=' y('+str(mu)+')=y('+str(mu)+') + I*('+extra+')*x('+str(mu)+')\n'
####################################################################
#~ if i==j:
#~ for k in range(1,Ne+1):
#~ if k < i:
#~ muii=Mu(i,i,s=1,N=Ne)
#~ eqmu+=' y('+str(mu)+')= y('+str(mu)+') - ('+format_double(gamma[i-1][k-1])+')*x('+str(muii)+')\n'
#~ elif k > i:
#~ mukk=Mu(k,k,s=1,N=Ne)
#~ eqmu+=' y('+str(mu)+')= y('+str(mu)+') - ('+format_double(gamma[i-1][k-1])+')*x('+str(mukk)+')\n'
#~ eqmu+='\n'
#~ else:
#~ eqmu+=' y('+str(mu)+')= y('+str(mu)+') - ('+format_double(gamma[i-1][j-1]/2)+')*x('+str(mu)+')\n'
#~
####################################################################
code+=eqmu+'\n'
#We add the terms associated with spontaneous decay.
#First for populations.
for i in range(2,Ne+1):
mu=Mu(i,i,1,Ne)
for k in range(1,Ne+1):
gams=0
if k<i:
gams+=gamma[i-1][k-1]
elif k>i:
nu=Mu(k,k,1,Ne)
ga=gamma[i-1][k-1]
if ga != 0:
code+=' y('+str(mu)+')=y('+str(mu)+')'
code+='-('+format_double(ga)+')*x('+str(nu)+')\n'
if gams!=0:
code+=' y('+str(mu)+')=y('+str(mu)+')'
code+='-('+format_double(gams)+')*x('+str(mu)+')\n'
#And now for coherences
for i in range(1,Ne+1):
for j in range(1,i):
gams=gamma[i-1][j-1]/2
if gams!=0:
for a in range(i+1,Ne+1):
mu=Mu(a,i,+1,Ne)
code+=' y('+str(mu)+')=y('+str(mu)+')'
code+='-('+format_double(gams)+')*x('+str(mu)+')\n'
#~ mu=Mu(a,i,-1,Ne)
#~ code+=' y('+str(mu)+')=y('+str(mu)+')'
#~ code+='-('+format_double(gams)+')*x('+str(mu)+')\n'
for b in range(1,i):
mu=Mu(i,b,+1,Ne)
code+=' y('+str(mu)+')=y('+str(mu)+')'
code+='-('+format_double(gams)+')*x('+str(mu)+')\n'
#~ mu=Mu(i,b,-1,Ne)
#~ code+=' y('+str(mu)+')=y('+str(mu)+')'
#~ code+='-('+format_double(gams)+')*x('+str(mu)+')\n'
####################################################################
####################################################################
####################################################################
#code+=' y=y/'+str(Omega)+'\n'
f=file(path+name+'.f90','w')
code=code0+code+'end subroutine\n'
f.write(code)
f.close()
return time()-t0
else:
print 'There was no phase transformation capable of eliminating explicit time dependance.' | [
"def",
"write_rk4",
"(",
"path",
",",
"name",
",",
"laser",
",",
"omega",
",",
"gamma",
",",
"r",
",",
"Lij",
",",
"states",
"=",
"None",
",",
"verbose",
"=",
"1",
")",
":",
"global",
"omega_rescaled",
"t0",
"=",
"time",
"(",
")",
"Ne",
"=",
"len",
"(",
"omega",
"[",
"0",
"]",
")",
"Nl",
"=",
"len",
"(",
"laser",
")",
"if",
"states",
"==",
"None",
":",
"states",
"=",
"range",
"(",
"1",
",",
"Ne",
"+",
"1",
")",
"#We make some checks",
"for",
"i",
"in",
"range",
"(",
"Ne",
")",
":",
"for",
"j",
"in",
"range",
"(",
"Ne",
")",
":",
"b1",
"=",
"not",
"(",
"'.'",
"in",
"str",
"(",
"omega",
"[",
"i",
"]",
"[",
"j",
"]",
")",
"or",
"'e'",
"in",
"str",
"(",
"omega",
"[",
"i",
"]",
"[",
"j",
"]",
")",
")",
"if",
"b1",
":",
"raise",
"ValueError",
",",
"'omega must be composed of floating point numbers.'",
"b2",
"=",
"not",
"(",
"'.'",
"in",
"str",
"(",
"gamma",
"[",
"i",
"]",
"[",
"j",
"]",
")",
"or",
"'e'",
"in",
"str",
"(",
"gamma",
"[",
"i",
"]",
"[",
"j",
"]",
")",
")",
"if",
"b2",
":",
"raise",
"ValueError",
",",
"'gamma must be composed of floating point numbers.'",
"#We rescale the frequencies as requested.",
"#~ if Omega != None:",
"#~ omega_rescaled=[[omega[i][j]/Omega for j in range(Ne)] for i in range(Ne)]",
"#~ #gamma=[[gamma[i][j]/Omega for j in range(Ne)] for i in range(Ne)]",
"#~ else:",
"#~ omega_rescaled=omega[:]",
"omega_rescaled",
"=",
"omega",
"[",
":",
"]",
"#We determine wether it is possible to eliminate explicit time-dependance",
"theta",
"=",
"find_phase_transformation",
"(",
"Ne",
",",
"Nl",
",",
"r",
",",
"Lij",
")",
"#We find the detunings if required",
"#We construct the correspondence i <-> I between degenerate and non-degenerate indices.",
"i_d",
",",
"I_nd",
",",
"Nnd",
"=",
"calculate_iI_correspondence",
"(",
"omega",
")",
"#We get wich transitions each laser induces",
"detunings",
",",
"detuningsij",
"=",
"laser_detunings",
"(",
"Lij",
",",
"Nl",
",",
"i_d",
",",
"I_nd",
",",
"Nnd",
")",
"#We get how many transitions each laser induces",
"detuning_indices",
"=",
"[",
"len",
"(",
"detunings",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"Nl",
")",
"]",
"#The number of detunings",
"Nd",
"=",
"sum",
"(",
"[",
"len",
"(",
"detunings",
"[",
"l",
"]",
")",
"for",
"l",
"in",
"range",
"(",
"Nl",
")",
"]",
")",
"combinations",
"=",
"detuning_combinations",
"(",
"detuning_indices",
")",
"code0",
"=",
"'''program evolution_rk4\n\timplicit none\n\tcomplex*16, dimension('''",
"+",
"str",
"(",
"Ne",
"*",
"(",
"Ne",
"+",
"1",
")",
"/",
"2",
"-",
"1",
")",
"+",
"') :: x,k1,k2,k3,k4\\n'",
"code0",
"+=",
"''' real*8 :: dt,t,ddelta,delta,delta0\n\tinteger :: i,j,n,ldelta,ndelta,detuning_index,n_aprox,n_mod\n\n\tlogical :: print_steps,run_spectrum\\n'''",
"code0",
"+=",
"' real*8, dimension('",
"+",
"str",
"(",
"Nl",
")",
"+",
"') :: E0,detuning_knob\\n'",
"code0",
"+=",
"' real*8, dimension('",
"+",
"str",
"(",
"Nd",
")",
"+",
"') :: detuning\\n\\n'",
"code0",
"+=",
"\" open(unit=1,file='\"",
"+",
"path",
"+",
"name",
"+",
"\".dat',status='unknown')\\n\\n\"",
"code0",
"+=",
"' n_aprox=1500\\n'",
"code0",
"+=",
"' !We load the parameters\\n'",
"code0",
"+=",
"\" open(unit=2,file='\"",
"+",
"path",
"+",
"name",
"+",
"\"_params.dat',status='unknown')\\n\"",
"code0",
"+=",
"''' read(2,*) n\n read(2,*) dt\n read(2,*) print_steps\n read(2,*) x\n read(2,*) E0\\n'''",
"code0",
"+=",
"' read(2,*) detuning_knob\\n'",
"code0",
"+=",
"' read(2,*) run_spectrum\\n\\n'",
"code0",
"+=",
"''' if (run_spectrum) then\n\t\tread(2,*) ldelta\n\t\tread(2,*) ndelta\n\t\tread(2,*) ddelta\n\t\tclose(2)\n\t\tdelta0=detuning_knob(ldelta)\n\t\tn_mod=ndelta/n_aprox\n else\n\t\tldelta=1; ndelta=1; ddelta=0; delta=0\n\t\tclose(2)\n\t\tn_mod=n/n_aprox\n end if\n if (n_mod==0) n_mod=1\\n\\n\\n'''",
"#We add the code to caculate all the initial detunings for each laser.",
"code0",
"+=",
"'\t!We calculate the initial detunings.\\n'",
"#We find the minimal frequency corresponding to each laser.",
"omega_min",
",",
"omega_min_indices",
"=",
"find_omega_min",
"(",
"omega_rescaled",
",",
"Nl",
",",
"detuningsij",
",",
"i_d",
",",
"I_nd",
")",
"det_index",
"=",
"1",
"for",
"l",
"in",
"range",
"(",
"Nl",
")",
":",
"omega0",
"=",
"omega_min",
"[",
"l",
"]",
"for",
"p",
"in",
"detuningsij",
"[",
"l",
"]",
":",
"code0",
"+=",
"'\tdetuning('",
"+",
"str",
"(",
"det_index",
")",
"+",
"')='",
"code0",
"+=",
"format_double",
"(",
"omega0",
"-",
"omega_rescaled",
"[",
"p",
"[",
"0",
"]",
"]",
"[",
"p",
"[",
"1",
"]",
"]",
")",
"+",
"'+detuning_knob('",
"+",
"str",
"(",
"l",
"+",
"1",
")",
"+",
"')\\n'",
"det_index",
"+=",
"1",
"code0",
"+=",
"'\\n'",
"code0",
"+=",
"'''\tt=0\n\tif (.not. run_spectrum) WRITE(1,*) t,real(x),imag(x('''",
"+",
"str",
"(",
"Ne",
")",
"+",
"''':))\\n'''",
"code0",
"+=",
"'''\t!We start the detuning variation\\n'''",
"code0",
"+=",
"'\tdelta=detuning_knob(ldelta)\\n'",
"code0",
"+=",
"''' do j=1,ndelta\n\t\t!We run the Runge Kutta method\n\t\tt=0.0\n\t\tdo i=1,n-1\\n'''",
"code0",
"+=",
"' call f(x , t , k1, E0, detuning, detuning_knob)\\n'",
"code0",
"+=",
"' call f(x+0.5*k1*dt, t+dt*0.5, k2, E0, detuning, detuning_knob)\\n'",
"code0",
"+=",
"' call f(x+0.5*k2*dt, t+dt*0.5, k3, E0, detuning, detuning_knob)\\n'",
"code0",
"+=",
"' call f(x +k3*dt, t+dt , k4, E0, detuning, detuning_knob)\\n'",
"code0",
"+=",
"'''\t\t\tx= x+(k1+2*k2+2*k3+k4)*dt/6\n\t\t\tif (print_steps.and. .not. run_spectrum) print*,'t=',t,'delta=',delta\n\t\t\tt= t+ dt\n\t\t\t\n\t\t\tif (isnan(real(x(1)))) stop 1\n\t\t\tif (.not. run_spectrum .and. mod(i,n_mod)==0) WRITE(1,*) t,real(x),imag(x('''",
"+",
"str",
"(",
"Ne",
")",
"+",
"''':))\n\t\tend do\n\t\tif (print_steps) print*, 'delta=',delta,'percentage=',100*(delta-delta0)/(ddelta*ndelta)\n\t\t\n\t\t!We recalculate the detunings\n\t\tif (run_spectrum) then\n\t\t\tif (mod(j,n_mod)==0) WRITE(1,*) delta,real(x),imag(x('''",
"+",
"str",
"(",
"Ne",
")",
"+",
"''':))\n\t\t\tdelta=delta+ddelta\n\t\t\tdetuning_knob(ldelta)=detuning_knob(ldelta)+ddelta\\n'''",
"#We add the code to caculate all detunings for each laser",
"#This way of assigining a global index ll to the detunings ammounts to",
"# ll= number_of_previous_detunings ",
"# + number_of_detuning_ordered_by_row_and_from_left_to_right_column",
"#like this",
"#->",
"#-> ->",
"#-> -> ->",
"#for each l",
"#We find the minimal frequency corresponding to each laser\t\t",
"omega_min",
",",
"omega_min_indices",
"=",
"find_omega_min",
"(",
"omega_rescaled",
",",
"Nl",
",",
"detuningsij",
",",
"i_d",
",",
"I_nd",
")",
"det_index",
"=",
"1",
"for",
"l",
"in",
"range",
"(",
"Nl",
")",
":",
"omega0",
"=",
"omega_min",
"[",
"l",
"]",
"code0",
"+=",
"'\t\t\tif (ldelta=='",
"+",
"str",
"(",
"l",
"+",
"1",
")",
"+",
"') then\\n'",
"for",
"p",
"in",
"detuningsij",
"[",
"l",
"]",
":",
"code0",
"+=",
"'\t\t\t\tdetuning('",
"+",
"str",
"(",
"det_index",
")",
"+",
"')=detuning('",
"+",
"str",
"(",
"det_index",
")",
"+",
"')'",
"#code0+='+('+str(omega0-omega_rescaled[p[0]][p[1]])+'+ddelta\\n'",
"code0",
"+=",
"'+ddelta\\n'",
"det_index",
"+=",
"1",
"code0",
"+=",
"'\t\t\tend if\\n'",
"code0",
"+=",
"'''\t\tend if\n\t\n\t\n\tend do\n\t\n close(1)\n \nend program\\n\\n'''",
"code0",
"+=",
"'subroutine f(x,t,y, E0, detuning,detuning_knob)\\n'",
"code0",
"+=",
"''' implicit none\n real*8, intent(in) :: t\\n'''",
"code0",
"+=",
"' complex*16, dimension('",
"+",
"str",
"(",
"Ne",
"*",
"(",
"Ne",
"+",
"1",
")",
"/",
"2",
"-",
"1",
")",
"+",
"'), intent(in) :: x\\n'",
"code0",
"+=",
"' complex*16, dimension('",
"+",
"str",
"(",
"Ne",
"*",
"(",
"Ne",
"+",
"1",
")",
"/",
"2",
"-",
"1",
")",
"+",
"'), intent(out) :: y\\n'",
"code0",
"+=",
"' real*8, dimension('",
"+",
"str",
"(",
"Nl",
")",
"+",
"'), intent(in) :: E0,detuning_knob\\n'",
"code0",
"+=",
"' real*8, dimension('",
"+",
"str",
"(",
"Nd",
")",
"+",
"'), intent(in) :: detuning\\n\\n'",
"code0",
"+=",
"' complex*16 :: I,fact,aux\\n'",
"code0",
"+=",
"' real*8 :: rho11\\n\\n'",
"code0",
"+=",
"' I=(0,1D0)\\n'",
"#We establish the scaling of the equations",
"#~ if Omega==None:",
"#~ h =1.054571726e-34; e=1.602176565e-19",
"#~ code0+=' fact=I*'+str(e/h)+'\\n'",
"#~ else:",
"#~ #code0+=' fact=I*'+str(float(Omega/sqrt(2)))+'\\n'",
"#~ code0+=' fact=I*'+str(float(1/sqrt(2)))+'\\n'",
"#~ #code0+=' fact=I*'+str(float(1/(sqrt(2)*Omega)))+'\\n'",
"code0",
"+=",
"' fact=I*'",
"+",
"format_double",
"(",
"float",
"(",
"1",
"/",
"sqrt",
"(",
"2",
")",
")",
")",
"+",
"'\\n'",
"#We give the code to calculate rho11",
"code0",
"+=",
"' rho11=1\\n'",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"Ne",
")",
":",
"code0",
"+=",
"' rho11=rho11 -x('",
"+",
"str",
"(",
"i",
")",
"+",
"')\\n'",
"code0",
"+=",
"'\\n\\n'",
"####################################################################",
"#We produce the code for the first order equations.",
"####################################################################",
"if",
"len",
"(",
"theta",
")",
">",
"0",
":",
"code",
"=",
"''",
"for",
"mu",
"in",
"range",
"(",
"1",
",",
"Ne",
"*",
"(",
"Ne",
"+",
"1",
")",
"/",
"2",
")",
":",
"i",
",",
"j",
",",
"s",
"=",
"IJ",
"(",
"mu",
",",
"Ne",
")",
"#print 'ecuacion mu=',mu,',i,j=',i,j",
"eqmu",
"=",
"' y('",
"+",
"str",
"(",
"mu",
")",
"+",
"')= 0\\n'",
"####################################################################",
"#We add the terms associated with the effective hamiltonian",
"#other than those associated with the phase transformation.",
"for",
"k",
"in",
"range",
"(",
"1",
",",
"Ne",
"+",
"1",
")",
":",
"#Case 1",
"if",
"k",
">=",
"j",
":",
"for",
"l",
"in",
"Lij",
"[",
"i",
"-",
"1",
"]",
"[",
"k",
"-",
"1",
"]",
":",
"if",
"k",
">",
"i",
":",
"#print 'E0^',l,-1,'r',i,k,'rho',k,j,'case 1.1'",
"eqmu",
"+=",
"add_line",
"(",
"Ne",
",",
"mu",
",",
"'+'",
",",
"laser",
",",
"l",
",",
"-",
"1",
",",
"r",
",",
"i",
",",
"k",
",",
"k",
",",
"j",
")",
"elif",
"k",
"<",
"i",
":",
"#print 'E0^',l, 1,'r',i,k,'rho',k,j,'case 1.2'",
"eqmu",
"+=",
"add_line",
"(",
"Ne",
",",
"mu",
",",
"'+'",
",",
"laser",
",",
"l",
",",
"1",
",",
"r",
",",
"i",
",",
"k",
",",
"k",
",",
"j",
")",
"#Case 2",
"elif",
"k",
"<",
"j",
":",
"for",
"l",
"in",
"Lij",
"[",
"i",
"-",
"1",
"]",
"[",
"k",
"-",
"1",
"]",
":",
"if",
"k",
">",
"i",
":",
"#print 'E0^',l,-1,'r',i,k,'rhoa',j,k,'case 2.1'",
"eqmu",
"+=",
"add_line",
"(",
"Ne",
",",
"mu",
",",
"'+'",
",",
"laser",
",",
"l",
",",
"-",
"1",
",",
"r",
",",
"i",
",",
"k",
",",
"j",
",",
"k",
",",
"True",
")",
"elif",
"k",
"<",
"i",
":",
"#print 'E0^',l, 1,'r',i,k,'rhoa',j,k,'case 2.2'",
"eqmu",
"+=",
"add_line",
"(",
"Ne",
",",
"mu",
",",
"'+'",
",",
"laser",
",",
"l",
",",
"1",
",",
"r",
",",
"i",
",",
"k",
",",
"j",
",",
"k",
",",
"True",
")",
"#Case 3",
"if",
"k",
"<=",
"i",
":",
"for",
"l",
"in",
"Lij",
"[",
"k",
"-",
"1",
"]",
"[",
"j",
"-",
"1",
"]",
":",
"if",
"k",
"<",
"j",
":",
"#print 'E0^',l,-1,'r',k,j,'rho',i,k,'case 3.1'",
"eqmu",
"+=",
"add_line",
"(",
"Ne",
",",
"mu",
",",
"'-'",
",",
"laser",
",",
"l",
",",
"-",
"1",
",",
"r",
",",
"k",
",",
"j",
",",
"i",
",",
"k",
")",
"elif",
"k",
">",
"j",
":",
"#print 'E0^',l, 1,'r',k,j,'rho',i,k,'case 3.2'",
"eqmu",
"+=",
"add_line",
"(",
"Ne",
",",
"mu",
",",
"'-'",
",",
"laser",
",",
"l",
",",
"1",
",",
"r",
",",
"k",
",",
"j",
",",
"i",
",",
"k",
")",
"#Case 4",
"elif",
"k",
">",
"i",
":",
"for",
"l",
"in",
"Lij",
"[",
"k",
"-",
"1",
"]",
"[",
"j",
"-",
"1",
"]",
":",
"if",
"k",
"<",
"j",
":",
"#print 'E0^',l,-1,'r',k,j,'rhoa',k,i,'case 4.1'",
"eqmu",
"+=",
"add_line",
"(",
"Ne",
",",
"mu",
",",
"'-'",
",",
"laser",
",",
"l",
",",
"-",
"1",
",",
"r",
",",
"k",
",",
"j",
",",
"k",
",",
"i",
",",
"True",
")",
"elif",
"k",
">",
"j",
":",
"#print 'E0^',l, 1,'r',k,j,'rhoa',k,i,'case 4.2'",
"eqmu",
"+=",
"add_line",
"(",
"Ne",
",",
"mu",
",",
"'-'",
",",
"laser",
",",
"l",
",",
"1",
",",
"r",
",",
"k",
",",
"j",
",",
"k",
",",
"i",
",",
"True",
")",
"eqmu",
"+=",
"' y('",
"+",
"str",
"(",
"mu",
")",
"+",
"')=y('",
"+",
"str",
"(",
"mu",
")",
"+",
"')*fact\\n'",
"####################################################################",
"#We add the terms associated with the phase transformation.",
"extra",
"=",
"Theta",
"(",
"i",
",",
"j",
",",
"theta",
",",
"omega_rescaled",
",",
"omega_min",
",",
"detunings",
",",
"detuningsij",
",",
"combinations",
",",
"detuning_indices",
",",
"Lij",
",",
"i_d",
",",
"I_nd",
",",
"Nnd",
",",
"verbose",
"=",
"verbose",
",",
"states",
"=",
"states",
")",
"if",
"extra",
"!=",
"''",
":",
"eqmu",
"+=",
"' y('",
"+",
"str",
"(",
"mu",
")",
"+",
"')=y('",
"+",
"str",
"(",
"mu",
")",
"+",
"') + I*('",
"+",
"extra",
"+",
"')*x('",
"+",
"str",
"(",
"mu",
")",
"+",
"')\\n'",
"####################################################################",
"#~ if i==j:",
"#~ for k in range(1,Ne+1):",
"#~ if k < i:",
"#~ muii=Mu(i,i,s=1,N=Ne)",
"#~ eqmu+=' y('+str(mu)+')= y('+str(mu)+') - ('+format_double(gamma[i-1][k-1])+')*x('+str(muii)+')\\n'",
"#~ elif k > i:",
"#~ mukk=Mu(k,k,s=1,N=Ne)",
"#~ eqmu+=' y('+str(mu)+')= y('+str(mu)+') - ('+format_double(gamma[i-1][k-1])+')*x('+str(mukk)+')\\n'",
"#~ eqmu+='\\n'",
"#~ else:",
"#~ eqmu+=' y('+str(mu)+')= y('+str(mu)+') - ('+format_double(gamma[i-1][j-1]/2)+')*x('+str(mu)+')\\n'",
"#~ ",
"####################################################################",
"code",
"+=",
"eqmu",
"+",
"'\\n'",
"#We add the terms associated with spontaneous decay.\t\t",
"#First for populations.",
"for",
"i",
"in",
"range",
"(",
"2",
",",
"Ne",
"+",
"1",
")",
":",
"mu",
"=",
"Mu",
"(",
"i",
",",
"i",
",",
"1",
",",
"Ne",
")",
"for",
"k",
"in",
"range",
"(",
"1",
",",
"Ne",
"+",
"1",
")",
":",
"gams",
"=",
"0",
"if",
"k",
"<",
"i",
":",
"gams",
"+=",
"gamma",
"[",
"i",
"-",
"1",
"]",
"[",
"k",
"-",
"1",
"]",
"elif",
"k",
">",
"i",
":",
"nu",
"=",
"Mu",
"(",
"k",
",",
"k",
",",
"1",
",",
"Ne",
")",
"ga",
"=",
"gamma",
"[",
"i",
"-",
"1",
"]",
"[",
"k",
"-",
"1",
"]",
"if",
"ga",
"!=",
"0",
":",
"code",
"+=",
"' y('",
"+",
"str",
"(",
"mu",
")",
"+",
"')=y('",
"+",
"str",
"(",
"mu",
")",
"+",
"')'",
"code",
"+=",
"'-('",
"+",
"format_double",
"(",
"ga",
")",
"+",
"')*x('",
"+",
"str",
"(",
"nu",
")",
"+",
"')\\n'",
"if",
"gams",
"!=",
"0",
":",
"code",
"+=",
"' y('",
"+",
"str",
"(",
"mu",
")",
"+",
"')=y('",
"+",
"str",
"(",
"mu",
")",
"+",
"')'",
"code",
"+=",
"'-('",
"+",
"format_double",
"(",
"gams",
")",
"+",
"')*x('",
"+",
"str",
"(",
"mu",
")",
"+",
"')\\n'",
"#And now for coherences\t",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"Ne",
"+",
"1",
")",
":",
"for",
"j",
"in",
"range",
"(",
"1",
",",
"i",
")",
":",
"gams",
"=",
"gamma",
"[",
"i",
"-",
"1",
"]",
"[",
"j",
"-",
"1",
"]",
"/",
"2",
"if",
"gams",
"!=",
"0",
":",
"for",
"a",
"in",
"range",
"(",
"i",
"+",
"1",
",",
"Ne",
"+",
"1",
")",
":",
"mu",
"=",
"Mu",
"(",
"a",
",",
"i",
",",
"+",
"1",
",",
"Ne",
")",
"code",
"+=",
"' y('",
"+",
"str",
"(",
"mu",
")",
"+",
"')=y('",
"+",
"str",
"(",
"mu",
")",
"+",
"')'",
"code",
"+=",
"'-('",
"+",
"format_double",
"(",
"gams",
")",
"+",
"')*x('",
"+",
"str",
"(",
"mu",
")",
"+",
"')\\n'",
"#~ mu=Mu(a,i,-1,Ne)",
"#~ code+=' y('+str(mu)+')=y('+str(mu)+')'",
"#~ code+='-('+format_double(gams)+')*x('+str(mu)+')\\n'",
"for",
"b",
"in",
"range",
"(",
"1",
",",
"i",
")",
":",
"mu",
"=",
"Mu",
"(",
"i",
",",
"b",
",",
"+",
"1",
",",
"Ne",
")",
"code",
"+=",
"' y('",
"+",
"str",
"(",
"mu",
")",
"+",
"')=y('",
"+",
"str",
"(",
"mu",
")",
"+",
"')'",
"code",
"+=",
"'-('",
"+",
"format_double",
"(",
"gams",
")",
"+",
"')*x('",
"+",
"str",
"(",
"mu",
")",
"+",
"')\\n'",
"#~ mu=Mu(i,b,-1,Ne)",
"#~ code+=' y('+str(mu)+')=y('+str(mu)+')'",
"#~ code+='-('+format_double(gams)+')*x('+str(mu)+')\\n'",
"####################################################################",
"####################################################################",
"####################################################################",
"#code+=' y=y/'+str(Omega)+'\\n'",
"f",
"=",
"file",
"(",
"path",
"+",
"name",
"+",
"'.f90'",
",",
"'w'",
")",
"code",
"=",
"code0",
"+",
"code",
"+",
"'end subroutine\\n'",
"f",
".",
"write",
"(",
"code",
")",
"f",
".",
"close",
"(",
")",
"return",
"time",
"(",
")",
"-",
"t0",
"else",
":",
"print",
"'There was no phase transformation capable of eliminating explicit time dependance.'"
] | r"""
This function writes the Fortran code needed to calculate the time evolution of the density matrix elements
`\rho_{ij}` using the Runge-Kutta method of order 4.
INPUT:
- ``path`` - A string with the working directory where all files will be stored. It must end with ``/``.
- ``name`` - A string with the name of the experiment. All files produced will begin with this name.
- ``laser`` - A list of Laser objects (see the Laser class).
- ``omega`` - A matrix or list of lists containing the frequency differences `\omega_{ij}`.
- ``gamma`` - A matrix or list of lists containing the spontaneous decay frequencies `\gamma_{ij}`.
- ``r`` - A list of three matrices or lists of lists containing the components of the position operator `r_{-1ij},r_{0ij},r_{1ij}`.
- ``Lij`` - A list with elements of the form ``[i,j,[l1,l2,...]]`` representing the sets `L_{ij}` of which lasers excite wich transitions. It does not need to contain an element for all ``i,j`` pairs, but only those which have a laser that excites them.
- ``Omega`` - A floating point number indicating the frequency scale for the equations. The frequencies ``omega`` and ``gamma`` are divided by this number. If ``None`` the equations and the input are taken in SI units.
OUTPUT:
- A file ``name.f90`` is created in ``path``. | [
"r",
"This",
"function",
"writes",
"the",
"Fortran",
"code",
"needed",
"to",
"calculate",
"the",
"time",
"evolution",
"of",
"the",
"density",
"matrix",
"elements",
"\\",
"rho_",
"{",
"ij",
"}",
"using",
"the",
"Runge",
"-",
"Kutta",
"method",
"of",
"order",
"4",
"."
] | python | train |
pypyr/pypyr-cli | pypyr/moduleloader.py | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/moduleloader.py#L15-L48 | def get_module(module_abs_import):
"""Use importlib to get the module dynamically.
Get instance of the module specified by the module_abs_import.
This means that module_abs_import must be resolvable from this package.
Args:
module_abs_import: string. Absolute name of module to import.
Raises:
PyModuleNotFoundError: if module not found.
"""
logger.debug("starting")
logger.debug(f"loading module {module_abs_import}")
try:
imported_module = importlib.import_module(module_abs_import)
logger.debug("done")
return imported_module
except ModuleNotFoundError as err:
msg = ("The module doesn't exist. Looking for a file like this: "
f"{module_abs_import}")
extended_msg = (f"{module_abs_import}.py should be in your working "
"dir or it should be installed to the python path."
"\nIf you have 'package.sub.mod' your current working "
"dir should contain ./package/sub/mod.py\n"
"If you specified 'mymodulename', your current "
"working dir should contain ./mymodulename.py\n"
"If the module is not in your current working dir, it "
"must exist in your current python path - so you "
"should have run pip install or setup.py")
logger.error(msg)
raise PyModuleNotFoundError(extended_msg) from err | [
"def",
"get_module",
"(",
"module_abs_import",
")",
":",
"logger",
".",
"debug",
"(",
"\"starting\"",
")",
"logger",
".",
"debug",
"(",
"f\"loading module {module_abs_import}\"",
")",
"try",
":",
"imported_module",
"=",
"importlib",
".",
"import_module",
"(",
"module_abs_import",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")",
"return",
"imported_module",
"except",
"ModuleNotFoundError",
"as",
"err",
":",
"msg",
"=",
"(",
"\"The module doesn't exist. Looking for a file like this: \"",
"f\"{module_abs_import}\"",
")",
"extended_msg",
"=",
"(",
"f\"{module_abs_import}.py should be in your working \"",
"\"dir or it should be installed to the python path.\"",
"\"\\nIf you have 'package.sub.mod' your current working \"",
"\"dir should contain ./package/sub/mod.py\\n\"",
"\"If you specified 'mymodulename', your current \"",
"\"working dir should contain ./mymodulename.py\\n\"",
"\"If the module is not in your current working dir, it \"",
"\"must exist in your current python path - so you \"",
"\"should have run pip install or setup.py\"",
")",
"logger",
".",
"error",
"(",
"msg",
")",
"raise",
"PyModuleNotFoundError",
"(",
"extended_msg",
")",
"from",
"err"
] | Use importlib to get the module dynamically.
Get instance of the module specified by the module_abs_import.
This means that module_abs_import must be resolvable from this package.
Args:
module_abs_import: string. Absolute name of module to import.
Raises:
PyModuleNotFoundError: if module not found. | [
"Use",
"importlib",
"to",
"get",
"the",
"module",
"dynamically",
"."
] | python | train |
zyga/json-schema-validator | json_schema_validator/validator.py | https://github.com/zyga/json-schema-validator/blob/0504605da5c0a9a5b5b05c41b37661aec9652144/json_schema_validator/validator.py#L166-L169 | def _push_property_schema(self, prop):
"""Construct a sub-schema from a property of the current schema."""
schema = Schema(self._schema.properties[prop])
self._push_schema(schema, ".properties." + prop) | [
"def",
"_push_property_schema",
"(",
"self",
",",
"prop",
")",
":",
"schema",
"=",
"Schema",
"(",
"self",
".",
"_schema",
".",
"properties",
"[",
"prop",
"]",
")",
"self",
".",
"_push_schema",
"(",
"schema",
",",
"\".properties.\"",
"+",
"prop",
")"
] | Construct a sub-schema from a property of the current schema. | [
"Construct",
"a",
"sub",
"-",
"schema",
"from",
"a",
"property",
"of",
"the",
"current",
"schema",
"."
] | python | train |
Yubico/python-yubico | yubico/yubikey_neo_usb_hid.py | https://github.com/Yubico/python-yubico/blob/a72e8eddb90da6ee96e29f60912ca1f2872c9aea/yubico/yubikey_neo_usb_hid.py#L323-L329 | def to_frame(self, slot=SLOT.DEVICE_CONFIG):
"""
Return the current configuration as a YubiKeyFrame object.
"""
data = self.to_string()
payload = data.ljust(64, b'\0')
return yubikey_frame.YubiKeyFrame(command=slot, payload=payload) | [
"def",
"to_frame",
"(",
"self",
",",
"slot",
"=",
"SLOT",
".",
"DEVICE_CONFIG",
")",
":",
"data",
"=",
"self",
".",
"to_string",
"(",
")",
"payload",
"=",
"data",
".",
"ljust",
"(",
"64",
",",
"b'\\0'",
")",
"return",
"yubikey_frame",
".",
"YubiKeyFrame",
"(",
"command",
"=",
"slot",
",",
"payload",
"=",
"payload",
")"
] | Return the current configuration as a YubiKeyFrame object. | [
"Return",
"the",
"current",
"configuration",
"as",
"a",
"YubiKeyFrame",
"object",
"."
] | python | train |
tomplus/kubernetes_asyncio | kubernetes_asyncio/client/api/settings_v1alpha1_api.py | https://github.com/tomplus/kubernetes_asyncio/blob/f9ab15317ec921409714c7afef11aeb0f579985d/kubernetes_asyncio/client/api/settings_v1alpha1_api.py#L755-L779 | def patch_namespaced_pod_preset(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_pod_preset # noqa: E501
partially update the specified PodPreset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_pod_preset(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the PodPreset (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param UNKNOWN_BASE_TYPE body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1alpha1PodPreset
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_pod_preset_with_http_info(name, namespace, body, **kwargs) # noqa: E501
else:
(data) = self.patch_namespaced_pod_preset_with_http_info(name, namespace, body, **kwargs) # noqa: E501
return data | [
"def",
"patch_namespaced_pod_preset",
"(",
"self",
",",
"name",
",",
"namespace",
",",
"body",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa: E501",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async_req'",
")",
":",
"return",
"self",
".",
"patch_namespaced_pod_preset_with_http_info",
"(",
"name",
",",
"namespace",
",",
"body",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"patch_namespaced_pod_preset_with_http_info",
"(",
"name",
",",
"namespace",
",",
"body",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"return",
"data"
] | patch_namespaced_pod_preset # noqa: E501
partially update the specified PodPreset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_pod_preset(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the PodPreset (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param UNKNOWN_BASE_TYPE body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1alpha1PodPreset
If the method is called asynchronously,
returns the request thread. | [
"patch_namespaced_pod_preset",
"#",
"noqa",
":",
"E501"
] | python | train |
evhub/coconut | coconut/compiler/header.py | https://github.com/evhub/coconut/blob/ff97177344e7604e89a0a98a977a87ed2a56fc6d/coconut/compiler/header.py#L103-L234 | def process_header_args(which, target, use_hash, no_tco, strict):
"""Create the dictionary passed to str.format in the header, target_startswith, and target_info."""
target_startswith = one_num_ver(target)
target_info = get_target_info(target)
try_backport_lru_cache = r'''try:
from backports.functools_lru_cache import lru_cache
functools.lru_cache = lru_cache
except ImportError: pass
'''
try_import_trollius = r'''try:
import trollius as asyncio
except ImportError:
class you_need_to_install_trollius: pass
asyncio = you_need_to_install_trollius()
'''
format_dict = dict(
comment=comment(),
empty_dict="{}",
target_startswith=target_startswith,
default_encoding=default_encoding,
hash_line=hash_prefix + use_hash + "\n" if use_hash is not None else "",
typing_line="# type: ignore\n" if which == "__coconut__" else "",
VERSION_STR=VERSION_STR,
module_docstring='"""Built-in Coconut utilities."""\n\n' if which == "__coconut__" else "",
object="(object)" if target_startswith != "3" else "",
import_asyncio=_indent(
"" if not target or target_info >= (3, 5)
else "import asyncio\n" if target_info >= (3, 4)
else r'''if _coconut_sys.version_info >= (3, 4):
import asyncio
else:
''' + _indent(try_import_trollius) if target_info >= (3,)
else try_import_trollius,
),
import_pickle=_indent(
r'''if _coconut_sys.version_info < (3,):
import cPickle as pickle
else:
import pickle''' if not target
else "import cPickle as pickle" if target_info < (3,)
else "import pickle"
),
import_OrderedDict=_indent(
r'''if _coconut_sys.version_info >= (2, 7):
OrderedDict = collections.OrderedDict
else:
OrderedDict = dict'''
if not target
else "OrderedDict = collections.OrderedDict" if target_info >= (2, 7)
else "OrderedDict = dict"
),
import_collections_abc=_indent(
r'''if _coconut_sys.version_info < (3, 3):
abc = collections
else:
import collections.abc as abc'''
if target_startswith != "2"
else "abc = collections"
),
bind_lru_cache=_indent(
r'''if _coconut_sys.version_info < (3, 2):
''' + _indent(try_backport_lru_cache)
if not target
else try_backport_lru_cache if target_startswith == "2"
else ""
),
comma_bytearray=", bytearray" if target_startswith != "3" else "",
static_repr="staticmethod(repr)" if target_startswith != "3" else "repr",
with_ThreadPoolExecutor=(
r'''from multiprocessing import cpu_count # cpu_count() * 5 is the default Python 3.5 thread count
with ThreadPoolExecutor(cpu_count() * 5)''' if target_info < (3, 5)
else '''with ThreadPoolExecutor()'''
),
tco_decorator="@_coconut_tco\n" + " " * 8 if not no_tco else "",
tail_call_func_args_kwargs="func(*args, **kwargs)" if no_tco else "_coconut_tail_call(func, *args, **kwargs)",
comma_tco=", _coconut_tail_call, _coconut_tco" if not no_tco else "",
def_coconut_NamedTuple=(
r'''def _coconut_NamedTuple(name, fields):
return _coconut.collections.namedtuple(name, [x for x, t in fields])'''
if target_info < (3, 6)
else "from typing import NamedTuple as _coconut_NamedTuple"
),
def_prepattern=(
r'''def prepattern(base_func):
"""DEPRECATED: Use addpattern instead."""
def pattern_prepender(func):
return addpattern(func)(base_func)
return pattern_prepender
''' if not strict else ""
),
def_datamaker=(
r'''def datamaker(data_type):
"""DEPRECATED: Use makedata instead."""
return _coconut.functools.partial(makedata, data_type)
''' if not strict else ""
),
__coconut__=(
'"__coconut__"' if target_startswith == "3"
else 'b"__coconut__"' if target_startswith == "2"
else 'str("__coconut__")'
),
)
format_dict["underscore_imports"] = "_coconut, _coconut_NamedTuple, _coconut_MatchError{comma_tco}, _coconut_igetitem, _coconut_base_compose, _coconut_forward_compose, _coconut_back_compose, _coconut_forward_star_compose, _coconut_back_star_compose, _coconut_pipe, _coconut_star_pipe, _coconut_back_pipe, _coconut_back_star_pipe, _coconut_bool_and, _coconut_bool_or, _coconut_none_coalesce, _coconut_minus, _coconut_map, _coconut_partial".format(**format_dict)
# ._coconut_tco_func is used in main.coco, so don't remove it
# here without replacing its usage there
format_dict["def_tco"] = "" if no_tco else '''class _coconut_tail_call{object}:
__slots__ = ("func", "args", "kwargs")
def __init__(self, func, *args, **kwargs):
self.func, self.args, self.kwargs = func, args, kwargs
_coconut_tco_func_dict = {empty_dict}
def _coconut_tco(func):
@_coconut.functools.wraps(func)
def tail_call_optimized_func(*args, **kwargs):
call_func = func
while True:
wkref = _coconut_tco_func_dict.get(_coconut.id(call_func))
if wkref is not None and wkref() is call_func:
call_func = call_func._coconut_tco_func
result = call_func(*args, **kwargs) # pass --no-tco to clean up your traceback
if not isinstance(result, _coconut_tail_call):
return result
call_func, args, kwargs = result.func, result.args, result.kwargs
tail_call_optimized_func._coconut_tco_func = func
_coconut_tco_func_dict[_coconut.id(tail_call_optimized_func)] = _coconut.weakref.ref(tail_call_optimized_func)
return tail_call_optimized_func
'''.format(**format_dict)
return format_dict, target_startswith, target_info | [
"def",
"process_header_args",
"(",
"which",
",",
"target",
",",
"use_hash",
",",
"no_tco",
",",
"strict",
")",
":",
"target_startswith",
"=",
"one_num_ver",
"(",
"target",
")",
"target_info",
"=",
"get_target_info",
"(",
"target",
")",
"try_backport_lru_cache",
"=",
"r'''try:\n from backports.functools_lru_cache import lru_cache\n functools.lru_cache = lru_cache\nexcept ImportError: pass\n'''",
"try_import_trollius",
"=",
"r'''try:\n import trollius as asyncio\nexcept ImportError:\n class you_need_to_install_trollius: pass\n asyncio = you_need_to_install_trollius()\n'''",
"format_dict",
"=",
"dict",
"(",
"comment",
"=",
"comment",
"(",
")",
",",
"empty_dict",
"=",
"\"{}\"",
",",
"target_startswith",
"=",
"target_startswith",
",",
"default_encoding",
"=",
"default_encoding",
",",
"hash_line",
"=",
"hash_prefix",
"+",
"use_hash",
"+",
"\"\\n\"",
"if",
"use_hash",
"is",
"not",
"None",
"else",
"\"\"",
",",
"typing_line",
"=",
"\"# type: ignore\\n\"",
"if",
"which",
"==",
"\"__coconut__\"",
"else",
"\"\"",
",",
"VERSION_STR",
"=",
"VERSION_STR",
",",
"module_docstring",
"=",
"'\"\"\"Built-in Coconut utilities.\"\"\"\\n\\n'",
"if",
"which",
"==",
"\"__coconut__\"",
"else",
"\"\"",
",",
"object",
"=",
"\"(object)\"",
"if",
"target_startswith",
"!=",
"\"3\"",
"else",
"\"\"",
",",
"import_asyncio",
"=",
"_indent",
"(",
"\"\"",
"if",
"not",
"target",
"or",
"target_info",
">=",
"(",
"3",
",",
"5",
")",
"else",
"\"import asyncio\\n\"",
"if",
"target_info",
">=",
"(",
"3",
",",
"4",
")",
"else",
"r'''if _coconut_sys.version_info >= (3, 4):\n import asyncio\nelse:\n'''",
"+",
"_indent",
"(",
"try_import_trollius",
")",
"if",
"target_info",
">=",
"(",
"3",
",",
")",
"else",
"try_import_trollius",
",",
")",
",",
"import_pickle",
"=",
"_indent",
"(",
"r'''if _coconut_sys.version_info < (3,):\n import cPickle as pickle\nelse:\n import pickle'''",
"if",
"not",
"target",
"else",
"\"import cPickle as pickle\"",
"if",
"target_info",
"<",
"(",
"3",
",",
")",
"else",
"\"import pickle\"",
")",
",",
"import_OrderedDict",
"=",
"_indent",
"(",
"r'''if _coconut_sys.version_info >= (2, 7):\n OrderedDict = collections.OrderedDict\nelse:\n OrderedDict = dict'''",
"if",
"not",
"target",
"else",
"\"OrderedDict = collections.OrderedDict\"",
"if",
"target_info",
">=",
"(",
"2",
",",
"7",
")",
"else",
"\"OrderedDict = dict\"",
")",
",",
"import_collections_abc",
"=",
"_indent",
"(",
"r'''if _coconut_sys.version_info < (3, 3):\n abc = collections\nelse:\n import collections.abc as abc'''",
"if",
"target_startswith",
"!=",
"\"2\"",
"else",
"\"abc = collections\"",
")",
",",
"bind_lru_cache",
"=",
"_indent",
"(",
"r'''if _coconut_sys.version_info < (3, 2):\n'''",
"+",
"_indent",
"(",
"try_backport_lru_cache",
")",
"if",
"not",
"target",
"else",
"try_backport_lru_cache",
"if",
"target_startswith",
"==",
"\"2\"",
"else",
"\"\"",
")",
",",
"comma_bytearray",
"=",
"\", bytearray\"",
"if",
"target_startswith",
"!=",
"\"3\"",
"else",
"\"\"",
",",
"static_repr",
"=",
"\"staticmethod(repr)\"",
"if",
"target_startswith",
"!=",
"\"3\"",
"else",
"\"repr\"",
",",
"with_ThreadPoolExecutor",
"=",
"(",
"r'''from multiprocessing import cpu_count # cpu_count() * 5 is the default Python 3.5 thread count\n with ThreadPoolExecutor(cpu_count() * 5)'''",
"if",
"target_info",
"<",
"(",
"3",
",",
"5",
")",
"else",
"'''with ThreadPoolExecutor()'''",
")",
",",
"tco_decorator",
"=",
"\"@_coconut_tco\\n\"",
"+",
"\" \"",
"*",
"8",
"if",
"not",
"no_tco",
"else",
"\"\"",
",",
"tail_call_func_args_kwargs",
"=",
"\"func(*args, **kwargs)\"",
"if",
"no_tco",
"else",
"\"_coconut_tail_call(func, *args, **kwargs)\"",
",",
"comma_tco",
"=",
"\", _coconut_tail_call, _coconut_tco\"",
"if",
"not",
"no_tco",
"else",
"\"\"",
",",
"def_coconut_NamedTuple",
"=",
"(",
"r'''def _coconut_NamedTuple(name, fields):\n return _coconut.collections.namedtuple(name, [x for x, t in fields])'''",
"if",
"target_info",
"<",
"(",
"3",
",",
"6",
")",
"else",
"\"from typing import NamedTuple as _coconut_NamedTuple\"",
")",
",",
"def_prepattern",
"=",
"(",
"r'''def prepattern(base_func):\n \"\"\"DEPRECATED: Use addpattern instead.\"\"\"\n def pattern_prepender(func):\n return addpattern(func)(base_func)\n return pattern_prepender\n'''",
"if",
"not",
"strict",
"else",
"\"\"",
")",
",",
"def_datamaker",
"=",
"(",
"r'''def datamaker(data_type):\n \"\"\"DEPRECATED: Use makedata instead.\"\"\"\n return _coconut.functools.partial(makedata, data_type)\n'''",
"if",
"not",
"strict",
"else",
"\"\"",
")",
",",
"__coconut__",
"=",
"(",
"'\"__coconut__\"'",
"if",
"target_startswith",
"==",
"\"3\"",
"else",
"'b\"__coconut__\"'",
"if",
"target_startswith",
"==",
"\"2\"",
"else",
"'str(\"__coconut__\")'",
")",
",",
")",
"format_dict",
"[",
"\"underscore_imports\"",
"]",
"=",
"\"_coconut, _coconut_NamedTuple, _coconut_MatchError{comma_tco}, _coconut_igetitem, _coconut_base_compose, _coconut_forward_compose, _coconut_back_compose, _coconut_forward_star_compose, _coconut_back_star_compose, _coconut_pipe, _coconut_star_pipe, _coconut_back_pipe, _coconut_back_star_pipe, _coconut_bool_and, _coconut_bool_or, _coconut_none_coalesce, _coconut_minus, _coconut_map, _coconut_partial\"",
".",
"format",
"(",
"*",
"*",
"format_dict",
")",
"# ._coconut_tco_func is used in main.coco, so don't remove it",
"# here without replacing its usage there",
"format_dict",
"[",
"\"def_tco\"",
"]",
"=",
"\"\"",
"if",
"no_tco",
"else",
"'''class _coconut_tail_call{object}:\n __slots__ = (\"func\", \"args\", \"kwargs\")\n def __init__(self, func, *args, **kwargs):\n self.func, self.args, self.kwargs = func, args, kwargs\n_coconut_tco_func_dict = {empty_dict}\ndef _coconut_tco(func):\n @_coconut.functools.wraps(func)\n def tail_call_optimized_func(*args, **kwargs):\n call_func = func\n while True:\n wkref = _coconut_tco_func_dict.get(_coconut.id(call_func))\n if wkref is not None and wkref() is call_func:\n call_func = call_func._coconut_tco_func\n result = call_func(*args, **kwargs) # pass --no-tco to clean up your traceback\n if not isinstance(result, _coconut_tail_call):\n return result\n call_func, args, kwargs = result.func, result.args, result.kwargs\n tail_call_optimized_func._coconut_tco_func = func\n _coconut_tco_func_dict[_coconut.id(tail_call_optimized_func)] = _coconut.weakref.ref(tail_call_optimized_func)\n return tail_call_optimized_func\n'''",
".",
"format",
"(",
"*",
"*",
"format_dict",
")",
"return",
"format_dict",
",",
"target_startswith",
",",
"target_info"
] | Create the dictionary passed to str.format in the header, target_startswith, and target_info. | [
"Create",
"the",
"dictionary",
"passed",
"to",
"str",
".",
"format",
"in",
"the",
"header",
"target_startswith",
"and",
"target_info",
"."
] | python | train |
pycontribs/pyrax | pyrax/autoscale.py | https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/autoscale.py#L1041-L1053 | def update(self, scaling_group, name=None, cooldown=None, min_entities=None,
max_entities=None, metadata=None):
"""
Updates an existing ScalingGroup. One or more of the attributes can be
specified.
NOTE: if you specify metadata, it will *replace* any existing metadata.
If you want to add to it, you either need to pass the complete dict of
metadata, or call the update_metadata() method.
"""
return self._manager.update(scaling_group, name=name, cooldown=cooldown,
min_entities=min_entities, max_entities=max_entities,
metadata=metadata) | [
"def",
"update",
"(",
"self",
",",
"scaling_group",
",",
"name",
"=",
"None",
",",
"cooldown",
"=",
"None",
",",
"min_entities",
"=",
"None",
",",
"max_entities",
"=",
"None",
",",
"metadata",
"=",
"None",
")",
":",
"return",
"self",
".",
"_manager",
".",
"update",
"(",
"scaling_group",
",",
"name",
"=",
"name",
",",
"cooldown",
"=",
"cooldown",
",",
"min_entities",
"=",
"min_entities",
",",
"max_entities",
"=",
"max_entities",
",",
"metadata",
"=",
"metadata",
")"
] | Updates an existing ScalingGroup. One or more of the attributes can be
specified.
NOTE: if you specify metadata, it will *replace* any existing metadata.
If you want to add to it, you either need to pass the complete dict of
metadata, or call the update_metadata() method. | [
"Updates",
"an",
"existing",
"ScalingGroup",
".",
"One",
"or",
"more",
"of",
"the",
"attributes",
"can",
"be",
"specified",
"."
] | python | train |
Infinidat/infi.clickhouse_orm | src/infi/clickhouse_orm/utils.py | https://github.com/Infinidat/infi.clickhouse_orm/blob/595f2023e334e3925a5c3fbfdd6083a5992a7169/src/infi/clickhouse_orm/utils.py#L50-L81 | def parse_array(array_string):
"""
Parse an array string as returned by clickhouse. For example:
"['hello', 'world']" ==> ["hello", "world"]
"[1,2,3]" ==> [1, 2, 3]
"""
# Sanity check
if len(array_string) < 2 or array_string[0] != '[' or array_string[-1] != ']':
raise ValueError('Invalid array string: "%s"' % array_string)
# Drop opening brace
array_string = array_string[1:]
# Go over the string, lopping off each value at the beginning until nothing is left
values = []
while True:
if array_string == ']':
# End of array
return values
elif array_string[0] in ', ':
# In between values
array_string = array_string[1:]
elif array_string[0] == "'":
# Start of quoted value, find its end
match = re.search(r"[^\\]'", array_string)
if match is None:
raise ValueError('Missing closing quote: "%s"' % array_string)
values.append(array_string[1 : match.start() + 1])
array_string = array_string[match.end():]
else:
# Start of non-quoted value, find its end
match = re.search(r",|\]", array_string)
values.append(array_string[0 : match.start()])
array_string = array_string[match.end() - 1:] | [
"def",
"parse_array",
"(",
"array_string",
")",
":",
"# Sanity check",
"if",
"len",
"(",
"array_string",
")",
"<",
"2",
"or",
"array_string",
"[",
"0",
"]",
"!=",
"'['",
"or",
"array_string",
"[",
"-",
"1",
"]",
"!=",
"']'",
":",
"raise",
"ValueError",
"(",
"'Invalid array string: \"%s\"'",
"%",
"array_string",
")",
"# Drop opening brace",
"array_string",
"=",
"array_string",
"[",
"1",
":",
"]",
"# Go over the string, lopping off each value at the beginning until nothing is left",
"values",
"=",
"[",
"]",
"while",
"True",
":",
"if",
"array_string",
"==",
"']'",
":",
"# End of array",
"return",
"values",
"elif",
"array_string",
"[",
"0",
"]",
"in",
"', '",
":",
"# In between values",
"array_string",
"=",
"array_string",
"[",
"1",
":",
"]",
"elif",
"array_string",
"[",
"0",
"]",
"==",
"\"'\"",
":",
"# Start of quoted value, find its end",
"match",
"=",
"re",
".",
"search",
"(",
"r\"[^\\\\]'\"",
",",
"array_string",
")",
"if",
"match",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'Missing closing quote: \"%s\"'",
"%",
"array_string",
")",
"values",
".",
"append",
"(",
"array_string",
"[",
"1",
":",
"match",
".",
"start",
"(",
")",
"+",
"1",
"]",
")",
"array_string",
"=",
"array_string",
"[",
"match",
".",
"end",
"(",
")",
":",
"]",
"else",
":",
"# Start of non-quoted value, find its end",
"match",
"=",
"re",
".",
"search",
"(",
"r\",|\\]\"",
",",
"array_string",
")",
"values",
".",
"append",
"(",
"array_string",
"[",
"0",
":",
"match",
".",
"start",
"(",
")",
"]",
")",
"array_string",
"=",
"array_string",
"[",
"match",
".",
"end",
"(",
")",
"-",
"1",
":",
"]"
] | Parse an array string as returned by clickhouse. For example:
"['hello', 'world']" ==> ["hello", "world"]
"[1,2,3]" ==> [1, 2, 3] | [
"Parse",
"an",
"array",
"string",
"as",
"returned",
"by",
"clickhouse",
".",
"For",
"example",
":",
"[",
"hello",
"world",
"]",
"==",
">",
"[",
"hello",
"world",
"]",
"[",
"1",
"2",
"3",
"]",
"==",
">",
"[",
"1",
"2",
"3",
"]"
] | python | train |
gwpy/gwpy | gwpy/io/nds2.py | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/nds2.py#L524-L543 | def _strip_ctype(name, ctype, protocol=2):
"""Strip the ctype from a channel name for the given nds server version
This is needed because NDS1 servers store trend channels _including_
the suffix, but not raw channels, and NDS2 doesn't do this.
"""
# parse channel type from name (e.g. 'L1:GDS-CALIB_STRAIN,reduced')
try:
name, ctypestr = name.rsplit(',', 1)
except ValueError:
pass
else:
ctype = Nds2ChannelType.find(ctypestr).value
# NDS1 stores channels with trend suffix, so we put it back:
if protocol == 1 and ctype in (
Nds2ChannelType.STREND.value,
Nds2ChannelType.MTREND.value
):
name += ',{0}'.format(ctypestr)
return name, ctype | [
"def",
"_strip_ctype",
"(",
"name",
",",
"ctype",
",",
"protocol",
"=",
"2",
")",
":",
"# parse channel type from name (e.g. 'L1:GDS-CALIB_STRAIN,reduced')",
"try",
":",
"name",
",",
"ctypestr",
"=",
"name",
".",
"rsplit",
"(",
"','",
",",
"1",
")",
"except",
"ValueError",
":",
"pass",
"else",
":",
"ctype",
"=",
"Nds2ChannelType",
".",
"find",
"(",
"ctypestr",
")",
".",
"value",
"# NDS1 stores channels with trend suffix, so we put it back:",
"if",
"protocol",
"==",
"1",
"and",
"ctype",
"in",
"(",
"Nds2ChannelType",
".",
"STREND",
".",
"value",
",",
"Nds2ChannelType",
".",
"MTREND",
".",
"value",
")",
":",
"name",
"+=",
"',{0}'",
".",
"format",
"(",
"ctypestr",
")",
"return",
"name",
",",
"ctype"
] | Strip the ctype from a channel name for the given nds server version
This is needed because NDS1 servers store trend channels _including_
the suffix, but not raw channels, and NDS2 doesn't do this. | [
"Strip",
"the",
"ctype",
"from",
"a",
"channel",
"name",
"for",
"the",
"given",
"nds",
"server",
"version"
] | python | train |
linnarsson-lab/loompy | loompy/loom_layer.py | https://github.com/linnarsson-lab/loompy/blob/62c8373a92b058753baa3a95331fb541f560f599/loompy/loom_layer.py#L146-L197 | def map(self, f_list: List[Callable[[np.ndarray], int]], axis: int = 0, chunksize: int = 1000, selection: np.ndarray = None) -> List[np.ndarray]:
"""
Apply a function along an axis without loading the entire dataset in memory.
Args:
f_list (list of func): Function(s) that takes a numpy ndarray as argument
axis (int): Axis along which to apply the function (0 = rows, 1 = columns)
chunksize (int): Number of rows (columns) to load per chunk
selection (array of bool): Columns (rows) to include
Returns:
numpy.ndarray result of function application
If you supply a list of functions, the result will be a list of numpy arrays. This is more
efficient than repeatedly calling map() one function at a time.
"""
if hasattr(f_list, '__call__'):
raise ValueError("f_list must be a list of functions, not a function itself")
result = []
if axis == 0:
rows_per_chunk = chunksize
for i in range(len(f_list)):
result.append(np.zeros(self.shape[0]))
ix = 0
while ix < self.shape[0]:
rows_per_chunk = min(self.shape[0] - ix, rows_per_chunk)
if selection is not None:
chunk = self[ix:ix + rows_per_chunk, :][:, selection]
else:
chunk = self[ix:ix + rows_per_chunk, :]
for i in range(len(f_list)):
result[i][ix:ix + rows_per_chunk] = np.apply_along_axis(f_list[i], 1, chunk)
ix = ix + rows_per_chunk
elif axis == 1:
cols_per_chunk = chunksize
for i in range(len(f_list)):
result.append(np.zeros(self.shape[1]))
ix = 0
while ix < self.shape[1]:
cols_per_chunk = min(self.shape[1] - ix, cols_per_chunk)
if selection is not None:
chunk = self[:, ix:ix + cols_per_chunk][selection, :]
else:
chunk = self[:, ix:ix + cols_per_chunk]
for i in range(len(f_list)):
result[i][ix:ix + cols_per_chunk] = np.apply_along_axis(f_list[i], 0, chunk)
ix = ix + cols_per_chunk
return result | [
"def",
"map",
"(",
"self",
",",
"f_list",
":",
"List",
"[",
"Callable",
"[",
"[",
"np",
".",
"ndarray",
"]",
",",
"int",
"]",
"]",
",",
"axis",
":",
"int",
"=",
"0",
",",
"chunksize",
":",
"int",
"=",
"1000",
",",
"selection",
":",
"np",
".",
"ndarray",
"=",
"None",
")",
"->",
"List",
"[",
"np",
".",
"ndarray",
"]",
":",
"if",
"hasattr",
"(",
"f_list",
",",
"'__call__'",
")",
":",
"raise",
"ValueError",
"(",
"\"f_list must be a list of functions, not a function itself\"",
")",
"result",
"=",
"[",
"]",
"if",
"axis",
"==",
"0",
":",
"rows_per_chunk",
"=",
"chunksize",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"f_list",
")",
")",
":",
"result",
".",
"append",
"(",
"np",
".",
"zeros",
"(",
"self",
".",
"shape",
"[",
"0",
"]",
")",
")",
"ix",
"=",
"0",
"while",
"ix",
"<",
"self",
".",
"shape",
"[",
"0",
"]",
":",
"rows_per_chunk",
"=",
"min",
"(",
"self",
".",
"shape",
"[",
"0",
"]",
"-",
"ix",
",",
"rows_per_chunk",
")",
"if",
"selection",
"is",
"not",
"None",
":",
"chunk",
"=",
"self",
"[",
"ix",
":",
"ix",
"+",
"rows_per_chunk",
",",
":",
"]",
"[",
":",
",",
"selection",
"]",
"else",
":",
"chunk",
"=",
"self",
"[",
"ix",
":",
"ix",
"+",
"rows_per_chunk",
",",
":",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"f_list",
")",
")",
":",
"result",
"[",
"i",
"]",
"[",
"ix",
":",
"ix",
"+",
"rows_per_chunk",
"]",
"=",
"np",
".",
"apply_along_axis",
"(",
"f_list",
"[",
"i",
"]",
",",
"1",
",",
"chunk",
")",
"ix",
"=",
"ix",
"+",
"rows_per_chunk",
"elif",
"axis",
"==",
"1",
":",
"cols_per_chunk",
"=",
"chunksize",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"f_list",
")",
")",
":",
"result",
".",
"append",
"(",
"np",
".",
"zeros",
"(",
"self",
".",
"shape",
"[",
"1",
"]",
")",
")",
"ix",
"=",
"0",
"while",
"ix",
"<",
"self",
".",
"shape",
"[",
"1",
"]",
":",
"cols_per_chunk",
"=",
"min",
"(",
"self",
".",
"shape",
"[",
"1",
"]",
"-",
"ix",
",",
"cols_per_chunk",
")",
"if",
"selection",
"is",
"not",
"None",
":",
"chunk",
"=",
"self",
"[",
":",
",",
"ix",
":",
"ix",
"+",
"cols_per_chunk",
"]",
"[",
"selection",
",",
":",
"]",
"else",
":",
"chunk",
"=",
"self",
"[",
":",
",",
"ix",
":",
"ix",
"+",
"cols_per_chunk",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"f_list",
")",
")",
":",
"result",
"[",
"i",
"]",
"[",
"ix",
":",
"ix",
"+",
"cols_per_chunk",
"]",
"=",
"np",
".",
"apply_along_axis",
"(",
"f_list",
"[",
"i",
"]",
",",
"0",
",",
"chunk",
")",
"ix",
"=",
"ix",
"+",
"cols_per_chunk",
"return",
"result"
] | Apply a function along an axis without loading the entire dataset in memory.
Args:
f_list (list of func): Function(s) that takes a numpy ndarray as argument
axis (int): Axis along which to apply the function (0 = rows, 1 = columns)
chunksize (int): Number of rows (columns) to load per chunk
selection (array of bool): Columns (rows) to include
Returns:
numpy.ndarray result of function application
If you supply a list of functions, the result will be a list of numpy arrays. This is more
efficient than repeatedly calling map() one function at a time. | [
"Apply",
"a",
"function",
"along",
"an",
"axis",
"without",
"loading",
"the",
"entire",
"dataset",
"in",
"memory",
"."
] | python | train |
amperser/proselint | proselint/checks/redundancy/misc.py | https://github.com/amperser/proselint/blob/cb619ee4023cc7856f5fb96aec2a33a2c9f1a2e2/proselint/checks/redundancy/misc.py#L126-L141 | def check_nordquist(text):
"""Suggest the preferred forms.
source: Richard Nordquist
source_url: http://grammar.about.com/bio/Richard-Nordquist-22176.htm
"""
err = "redundancy.nordquist"
msg = "Redundancy. Use '{}' instead of '{}'."
redundancies = [
["essential", ["absolutely essential"]],
["necessary", ["absolutely necessary"]],
["a.m.", ["a.m. in the morning"]],
["p.m.", ["p.m. at night"]],
]
return preferred_forms_check(text, redundancies, err, msg) | [
"def",
"check_nordquist",
"(",
"text",
")",
":",
"err",
"=",
"\"redundancy.nordquist\"",
"msg",
"=",
"\"Redundancy. Use '{}' instead of '{}'.\"",
"redundancies",
"=",
"[",
"[",
"\"essential\"",
",",
"[",
"\"absolutely essential\"",
"]",
"]",
",",
"[",
"\"necessary\"",
",",
"[",
"\"absolutely necessary\"",
"]",
"]",
",",
"[",
"\"a.m.\"",
",",
"[",
"\"a.m. in the morning\"",
"]",
"]",
",",
"[",
"\"p.m.\"",
",",
"[",
"\"p.m. at night\"",
"]",
"]",
",",
"]",
"return",
"preferred_forms_check",
"(",
"text",
",",
"redundancies",
",",
"err",
",",
"msg",
")"
] | Suggest the preferred forms.
source: Richard Nordquist
source_url: http://grammar.about.com/bio/Richard-Nordquist-22176.htm | [
"Suggest",
"the",
"preferred",
"forms",
"."
] | python | train |
saltstack/salt | salt/modules/rh_service.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/rh_service.py#L364-L382 | def available(name, limit=''):
'''
Return True if the named service is available. Use the ``limit`` param to
restrict results to services of that type.
CLI Examples:
.. code-block:: bash
salt '*' service.available sshd
salt '*' service.available sshd limit=upstart
salt '*' service.available sshd limit=sysvinit
'''
if limit == 'upstart':
return _service_is_upstart(name)
elif limit == 'sysvinit':
return _service_is_sysv(name)
else:
return _service_is_upstart(name) or _service_is_sysv(name) or _service_is_chkconfig(name) | [
"def",
"available",
"(",
"name",
",",
"limit",
"=",
"''",
")",
":",
"if",
"limit",
"==",
"'upstart'",
":",
"return",
"_service_is_upstart",
"(",
"name",
")",
"elif",
"limit",
"==",
"'sysvinit'",
":",
"return",
"_service_is_sysv",
"(",
"name",
")",
"else",
":",
"return",
"_service_is_upstart",
"(",
"name",
")",
"or",
"_service_is_sysv",
"(",
"name",
")",
"or",
"_service_is_chkconfig",
"(",
"name",
")"
] | Return True if the named service is available. Use the ``limit`` param to
restrict results to services of that type.
CLI Examples:
.. code-block:: bash
salt '*' service.available sshd
salt '*' service.available sshd limit=upstart
salt '*' service.available sshd limit=sysvinit | [
"Return",
"True",
"if",
"the",
"named",
"service",
"is",
"available",
".",
"Use",
"the",
"limit",
"param",
"to",
"restrict",
"results",
"to",
"services",
"of",
"that",
"type",
"."
] | python | train |
laurencium/Causalinference | causalinference/core/summary.py | https://github.com/laurencium/Causalinference/blob/3b20ae0560c711628fba47975180c8484d8aa3e7/causalinference/core/summary.py#L40-L49 | def _summarize_pscore(self, pscore_c, pscore_t):
"""
Called by Strata class during initialization.
"""
self._dict['p_min'] = min(pscore_c.min(), pscore_t.min())
self._dict['p_max'] = max(pscore_c.max(), pscore_t.max())
self._dict['p_c_mean'] = pscore_c.mean()
self._dict['p_t_mean'] = pscore_t.mean() | [
"def",
"_summarize_pscore",
"(",
"self",
",",
"pscore_c",
",",
"pscore_t",
")",
":",
"self",
".",
"_dict",
"[",
"'p_min'",
"]",
"=",
"min",
"(",
"pscore_c",
".",
"min",
"(",
")",
",",
"pscore_t",
".",
"min",
"(",
")",
")",
"self",
".",
"_dict",
"[",
"'p_max'",
"]",
"=",
"max",
"(",
"pscore_c",
".",
"max",
"(",
")",
",",
"pscore_t",
".",
"max",
"(",
")",
")",
"self",
".",
"_dict",
"[",
"'p_c_mean'",
"]",
"=",
"pscore_c",
".",
"mean",
"(",
")",
"self",
".",
"_dict",
"[",
"'p_t_mean'",
"]",
"=",
"pscore_t",
".",
"mean",
"(",
")"
] | Called by Strata class during initialization. | [
"Called",
"by",
"Strata",
"class",
"during",
"initialization",
"."
] | python | train |
dmlc/gluon-nlp | src/gluonnlp/model/convolutional_encoder.py | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/src/gluonnlp/model/convolutional_encoder.py#L135-L166 | def hybrid_forward(self, F, inputs, mask=None): # pylint: disable=arguments-differ
r"""
Forward computation for char_encoder
Parameters
----------
inputs: NDArray
The input tensor is of shape `(seq_len, batch_size, embedding_size)` TNC.
mask: NDArray
The mask applied to the input of shape `(seq_len, batch_size)`, the mask will
be broadcasted along the embedding dimension.
Returns
----------
output: NDArray
The output of the encoder with shape `(batch_size, output_size)`
"""
if mask is not None:
inputs = F.broadcast_mul(inputs, mask.expand_dims(-1))
inputs = F.transpose(inputs, axes=(1, 2, 0))
output = self._convs(inputs)
if self._highways:
output = self._highways(output)
if self._projection:
output = self._projection(output)
return output | [
"def",
"hybrid_forward",
"(",
"self",
",",
"F",
",",
"inputs",
",",
"mask",
"=",
"None",
")",
":",
"# pylint: disable=arguments-differ",
"if",
"mask",
"is",
"not",
"None",
":",
"inputs",
"=",
"F",
".",
"broadcast_mul",
"(",
"inputs",
",",
"mask",
".",
"expand_dims",
"(",
"-",
"1",
")",
")",
"inputs",
"=",
"F",
".",
"transpose",
"(",
"inputs",
",",
"axes",
"=",
"(",
"1",
",",
"2",
",",
"0",
")",
")",
"output",
"=",
"self",
".",
"_convs",
"(",
"inputs",
")",
"if",
"self",
".",
"_highways",
":",
"output",
"=",
"self",
".",
"_highways",
"(",
"output",
")",
"if",
"self",
".",
"_projection",
":",
"output",
"=",
"self",
".",
"_projection",
"(",
"output",
")",
"return",
"output"
] | r"""
Forward computation for char_encoder
Parameters
----------
inputs: NDArray
The input tensor is of shape `(seq_len, batch_size, embedding_size)` TNC.
mask: NDArray
The mask applied to the input of shape `(seq_len, batch_size)`, the mask will
be broadcasted along the embedding dimension.
Returns
----------
output: NDArray
The output of the encoder with shape `(batch_size, output_size)` | [
"r",
"Forward",
"computation",
"for",
"char_encoder"
] | python | train |
ianmiell/shutit | shutit_class.py | https://github.com/ianmiell/shutit/blob/19cd64cdfb23515b106b40213dccff4101617076/shutit_class.py#L2657-L2664 | def get_shutit_pexpect_session_from_id(self, shutit_pexpect_id):
"""Get the pexpect session from the given identifier.
"""
shutit_global.shutit_global_object.yield_to_draw()
for key in self.shutit_pexpect_sessions:
if self.shutit_pexpect_sessions[key].pexpect_session_id == shutit_pexpect_id:
return self.shutit_pexpect_sessions[key]
return self.fail('Should not get here in get_shutit_pexpect_session_from_id',throw_exception=True) | [
"def",
"get_shutit_pexpect_session_from_id",
"(",
"self",
",",
"shutit_pexpect_id",
")",
":",
"shutit_global",
".",
"shutit_global_object",
".",
"yield_to_draw",
"(",
")",
"for",
"key",
"in",
"self",
".",
"shutit_pexpect_sessions",
":",
"if",
"self",
".",
"shutit_pexpect_sessions",
"[",
"key",
"]",
".",
"pexpect_session_id",
"==",
"shutit_pexpect_id",
":",
"return",
"self",
".",
"shutit_pexpect_sessions",
"[",
"key",
"]",
"return",
"self",
".",
"fail",
"(",
"'Should not get here in get_shutit_pexpect_session_from_id'",
",",
"throw_exception",
"=",
"True",
")"
] | Get the pexpect session from the given identifier. | [
"Get",
"the",
"pexpect",
"session",
"from",
"the",
"given",
"identifier",
"."
] | python | train |
usc-isi-i2/etk | etk/extractors/spacy_rule_extractor.py | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/extractors/spacy_rule_extractor.py#L346-L361 | def _full_shape_filter(t: List, shapes: List) -> bool:
"""
Shape filter
Args:
t: List, list of tokens
shapes: List
Returns: bool
"""
if shapes:
for a_token in t:
if a_token._.full_shape not in shapes:
return False
return True | [
"def",
"_full_shape_filter",
"(",
"t",
":",
"List",
",",
"shapes",
":",
"List",
")",
"->",
"bool",
":",
"if",
"shapes",
":",
"for",
"a_token",
"in",
"t",
":",
"if",
"a_token",
".",
"_",
".",
"full_shape",
"not",
"in",
"shapes",
":",
"return",
"False",
"return",
"True"
] | Shape filter
Args:
t: List, list of tokens
shapes: List
Returns: bool | [
"Shape",
"filter",
"Args",
":",
"t",
":",
"List",
"list",
"of",
"tokens",
"shapes",
":",
"List"
] | python | train |
tcalmant/ipopo | pelix/rsa/providers/distribution/__init__.py | https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/rsa/providers/distribution/__init__.py#L513-L529 | def _find_export(self, func):
# type: (Callable[[Tuple[Any, EndpointDescription]], bool]) -> Optional[Tuple[Any, EndpointDescription]]
"""
Look for an export using the given lookup method
The lookup method must accept a single parameter, which is a tuple
containing a service instance and endpoint description.
:param func: A function to look for the excepted export
:return: The found tuple or None
"""
with self._exported_instances_lock:
for val in self._exported_services.values():
if func(val):
return val
return None | [
"def",
"_find_export",
"(",
"self",
",",
"func",
")",
":",
"# type: (Callable[[Tuple[Any, EndpointDescription]], bool]) -> Optional[Tuple[Any, EndpointDescription]]",
"with",
"self",
".",
"_exported_instances_lock",
":",
"for",
"val",
"in",
"self",
".",
"_exported_services",
".",
"values",
"(",
")",
":",
"if",
"func",
"(",
"val",
")",
":",
"return",
"val",
"return",
"None"
] | Look for an export using the given lookup method
The lookup method must accept a single parameter, which is a tuple
containing a service instance and endpoint description.
:param func: A function to look for the excepted export
:return: The found tuple or None | [
"Look",
"for",
"an",
"export",
"using",
"the",
"given",
"lookup",
"method"
] | python | train |
jazzband/django-mongonaut | mongonaut/sites.py | https://github.com/jazzband/django-mongonaut/blob/5485b2e029dff8ae267a4cb39c92d0a72cb5b144/mongonaut/sites.py#L53-L55 | def has_delete_permission(self, request):
""" Can delete this object """
return request.user.is_authenticated and request.user.is_active and request.user.is_superuser | [
"def",
"has_delete_permission",
"(",
"self",
",",
"request",
")",
":",
"return",
"request",
".",
"user",
".",
"is_authenticated",
"and",
"request",
".",
"user",
".",
"is_active",
"and",
"request",
".",
"user",
".",
"is_superuser"
] | Can delete this object | [
"Can",
"delete",
"this",
"object"
] | python | valid |
pytroll/pyorbital | pyorbital/tlefile.py | https://github.com/pytroll/pyorbital/blob/647007934dc827a4c698629cf32a84a5167844b2/pyorbital/tlefile.py#L91-L97 | def read(platform, tle_file=None, line1=None, line2=None):
"""Read TLE for `platform` from `tle_file`
File is read from `line1` to `line2`, from the newest file provided in the
TLES pattern, or from internet if none is provided.
"""
return Tle(platform, tle_file=tle_file, line1=line1, line2=line2) | [
"def",
"read",
"(",
"platform",
",",
"tle_file",
"=",
"None",
",",
"line1",
"=",
"None",
",",
"line2",
"=",
"None",
")",
":",
"return",
"Tle",
"(",
"platform",
",",
"tle_file",
"=",
"tle_file",
",",
"line1",
"=",
"line1",
",",
"line2",
"=",
"line2",
")"
] | Read TLE for `platform` from `tle_file`
File is read from `line1` to `line2`, from the newest file provided in the
TLES pattern, or from internet if none is provided. | [
"Read",
"TLE",
"for",
"platform",
"from",
"tle_file"
] | python | train |