repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
saltstack/salt
salt/modules/syslog_ng.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/syslog_ng.py#L458-L471
def _parse_typed_parameter_typed_value(values): ''' Creates Arguments in a TypedParametervalue. ''' type_, value = _expand_one_key_dictionary(values) _current_parameter_value.type = type_ if _is_simple_type(value): arg = Argument(value) _current_parameter_value.add_argument(arg) elif isinstance(value, list): for idx in value: arg = Argument(idx) _current_parameter_value.add_argument(arg)
[ "def", "_parse_typed_parameter_typed_value", "(", "values", ")", ":", "type_", ",", "value", "=", "_expand_one_key_dictionary", "(", "values", ")", "_current_parameter_value", ".", "type", "=", "type_", "if", "_is_simple_type", "(", "value", ")", ":", "arg", "=", "Argument", "(", "value", ")", "_current_parameter_value", ".", "add_argument", "(", "arg", ")", "elif", "isinstance", "(", "value", ",", "list", ")", ":", "for", "idx", "in", "value", ":", "arg", "=", "Argument", "(", "idx", ")", "_current_parameter_value", ".", "add_argument", "(", "arg", ")" ]
Creates Arguments in a TypedParametervalue.
[ "Creates", "Arguments", "in", "a", "TypedParametervalue", "." ]
python
train
square/pylink
pylink/jlink.py
https://github.com/square/pylink/blob/81dda0a191d923a8b2627c52cb778aba24d279d7/pylink/jlink.py#L498-L517
def supported_device(self, index=0): """Gets the device at the given ``index``. Args: self (JLink): the ``JLink`` instance index (int): the index of the device whose information to get Returns: A ``JLinkDeviceInfo`` describing the requested device. Raises: ValueError: if index is less than 0 or >= supported device count. """ if not util.is_natural(index) or index >= self.num_supported_devices(): raise ValueError('Invalid index.') info = structs.JLinkDeviceInfo() result = self._dll.JLINKARM_DEVICE_GetInfo(index, ctypes.byref(info)) return info
[ "def", "supported_device", "(", "self", ",", "index", "=", "0", ")", ":", "if", "not", "util", ".", "is_natural", "(", "index", ")", "or", "index", ">=", "self", ".", "num_supported_devices", "(", ")", ":", "raise", "ValueError", "(", "'Invalid index.'", ")", "info", "=", "structs", ".", "JLinkDeviceInfo", "(", ")", "result", "=", "self", ".", "_dll", ".", "JLINKARM_DEVICE_GetInfo", "(", "index", ",", "ctypes", ".", "byref", "(", "info", ")", ")", "return", "info" ]
Gets the device at the given ``index``. Args: self (JLink): the ``JLink`` instance index (int): the index of the device whose information to get Returns: A ``JLinkDeviceInfo`` describing the requested device. Raises: ValueError: if index is less than 0 or >= supported device count.
[ "Gets", "the", "device", "at", "the", "given", "index", "." ]
python
train
quantopian/serializable-traitlets
straitlets/traits.py
https://github.com/quantopian/serializable-traitlets/blob/dd334366d1130825aea55d3dfecd6756973594e0/straitlets/traits.py#L208-L217
def example_value(self): """ If we're an instance of a Serializable, fall back to its `example_instance()` method. """ from .serializable import Serializable inst = self._static_example_value() if inst is tr.Undefined and issubclass(self.klass, Serializable): return self.klass.example_instance() return inst
[ "def", "example_value", "(", "self", ")", ":", "from", ".", "serializable", "import", "Serializable", "inst", "=", "self", ".", "_static_example_value", "(", ")", "if", "inst", "is", "tr", ".", "Undefined", "and", "issubclass", "(", "self", ".", "klass", ",", "Serializable", ")", ":", "return", "self", ".", "klass", ".", "example_instance", "(", ")", "return", "inst" ]
If we're an instance of a Serializable, fall back to its `example_instance()` method.
[ "If", "we", "re", "an", "instance", "of", "a", "Serializable", "fall", "back", "to", "its", "example_instance", "()", "method", "." ]
python
train
tornadoweb/tornado
tornado/routing.py
https://github.com/tornadoweb/tornado/blob/b8b481770bcdb333a69afde5cce7eaa449128326/tornado/routing.py#L608-L640
def _find_groups(self) -> Tuple[Optional[str], Optional[int]]: """Returns a tuple (reverse string, group count) for a url. For example: Given the url pattern /([0-9]{4})/([a-z-]+)/, this method would return ('/%s/%s/', 2). """ pattern = self.regex.pattern if pattern.startswith("^"): pattern = pattern[1:] if pattern.endswith("$"): pattern = pattern[:-1] if self.regex.groups != pattern.count("("): # The pattern is too complicated for our simplistic matching, # so we can't support reversing it. return None, None pieces = [] for fragment in pattern.split("("): if ")" in fragment: paren_loc = fragment.index(")") if paren_loc >= 0: pieces.append("%s" + fragment[paren_loc + 1 :]) else: try: unescaped_fragment = re_unescape(fragment) except ValueError: # If we can't unescape part of it, we can't # reverse this url. return (None, None) pieces.append(unescaped_fragment) return "".join(pieces), self.regex.groups
[ "def", "_find_groups", "(", "self", ")", "->", "Tuple", "[", "Optional", "[", "str", "]", ",", "Optional", "[", "int", "]", "]", ":", "pattern", "=", "self", ".", "regex", ".", "pattern", "if", "pattern", ".", "startswith", "(", "\"^\"", ")", ":", "pattern", "=", "pattern", "[", "1", ":", "]", "if", "pattern", ".", "endswith", "(", "\"$\"", ")", ":", "pattern", "=", "pattern", "[", ":", "-", "1", "]", "if", "self", ".", "regex", ".", "groups", "!=", "pattern", ".", "count", "(", "\"(\"", ")", ":", "# The pattern is too complicated for our simplistic matching,", "# so we can't support reversing it.", "return", "None", ",", "None", "pieces", "=", "[", "]", "for", "fragment", "in", "pattern", ".", "split", "(", "\"(\"", ")", ":", "if", "\")\"", "in", "fragment", ":", "paren_loc", "=", "fragment", ".", "index", "(", "\")\"", ")", "if", "paren_loc", ">=", "0", ":", "pieces", ".", "append", "(", "\"%s\"", "+", "fragment", "[", "paren_loc", "+", "1", ":", "]", ")", "else", ":", "try", ":", "unescaped_fragment", "=", "re_unescape", "(", "fragment", ")", "except", "ValueError", ":", "# If we can't unescape part of it, we can't", "# reverse this url.", "return", "(", "None", ",", "None", ")", "pieces", ".", "append", "(", "unescaped_fragment", ")", "return", "\"\"", ".", "join", "(", "pieces", ")", ",", "self", ".", "regex", ".", "groups" ]
Returns a tuple (reverse string, group count) for a url. For example: Given the url pattern /([0-9]{4})/([a-z-]+)/, this method would return ('/%s/%s/', 2).
[ "Returns", "a", "tuple", "(", "reverse", "string", "group", "count", ")", "for", "a", "url", "." ]
python
train
xenon-middleware/pyxenon
xenon/oop.py
https://github.com/xenon-middleware/pyxenon/blob/d61109ad339ee9bb9f0723471d532727b0f235ad/xenon/oop.py#L118-L127
def request_type(self): """Retrieve the type of the request, by fetching it from `xenon.proto.xenon_pb2`.""" if self.static and not self.uses_request: return getattr(xenon_pb2, 'Empty') if not self.uses_request: return None return getattr(xenon_pb2, self.request_name)
[ "def", "request_type", "(", "self", ")", ":", "if", "self", ".", "static", "and", "not", "self", ".", "uses_request", ":", "return", "getattr", "(", "xenon_pb2", ",", "'Empty'", ")", "if", "not", "self", ".", "uses_request", ":", "return", "None", "return", "getattr", "(", "xenon_pb2", ",", "self", ".", "request_name", ")" ]
Retrieve the type of the request, by fetching it from `xenon.proto.xenon_pb2`.
[ "Retrieve", "the", "type", "of", "the", "request", "by", "fetching", "it", "from", "xenon", ".", "proto", ".", "xenon_pb2", "." ]
python
train
JamesRamm/longclaw
longclaw/orders/wagtail_hooks.py
https://github.com/JamesRamm/longclaw/blob/8bbf2e6d703271b815ec111813c7c5d1d4e4e810/longclaw/orders/wagtail_hooks.py#L111-L122
def get_admin_urls_for_registration(self): """ Utilised by Wagtail's 'register_admin_urls' hook to register urls for our the views that class offers. """ urls = super(OrderModelAdmin, self).get_admin_urls_for_registration() urls = urls + ( url(self.url_helper.get_action_url_pattern('detail'), self.detail_view, name=self.url_helper.get_action_url_name('detail')), ) return urls
[ "def", "get_admin_urls_for_registration", "(", "self", ")", ":", "urls", "=", "super", "(", "OrderModelAdmin", ",", "self", ")", ".", "get_admin_urls_for_registration", "(", ")", "urls", "=", "urls", "+", "(", "url", "(", "self", ".", "url_helper", ".", "get_action_url_pattern", "(", "'detail'", ")", ",", "self", ".", "detail_view", ",", "name", "=", "self", ".", "url_helper", ".", "get_action_url_name", "(", "'detail'", ")", ")", ",", ")", "return", "urls" ]
Utilised by Wagtail's 'register_admin_urls' hook to register urls for our the views that class offers.
[ "Utilised", "by", "Wagtail", "s", "register_admin_urls", "hook", "to", "register", "urls", "for", "our", "the", "views", "that", "class", "offers", "." ]
python
train
philgyford/django-spectator
spectator/events/templatetags/spectator_events.py
https://github.com/philgyford/django-spectator/blob/f3c72004f9caa1fde0f5a3b2f0d2bf285fc01ada/spectator/events/templatetags/spectator_events.py#L40-L58
def annual_event_counts_card(kind='all', current_year=None): """ Displays years and the number of events per year. kind is an Event kind (like 'cinema', 'gig', etc.) or 'all' (default). current_year is an optional date object representing the year we're already showing information about. """ if kind == 'all': card_title = 'Events per year' else: card_title = '{} per year'.format(Event.get_kind_name_plural(kind)) return { 'card_title': card_title, 'kind': kind, 'years': annual_event_counts(kind=kind), 'current_year': current_year }
[ "def", "annual_event_counts_card", "(", "kind", "=", "'all'", ",", "current_year", "=", "None", ")", ":", "if", "kind", "==", "'all'", ":", "card_title", "=", "'Events per year'", "else", ":", "card_title", "=", "'{} per year'", ".", "format", "(", "Event", ".", "get_kind_name_plural", "(", "kind", ")", ")", "return", "{", "'card_title'", ":", "card_title", ",", "'kind'", ":", "kind", ",", "'years'", ":", "annual_event_counts", "(", "kind", "=", "kind", ")", ",", "'current_year'", ":", "current_year", "}" ]
Displays years and the number of events per year. kind is an Event kind (like 'cinema', 'gig', etc.) or 'all' (default). current_year is an optional date object representing the year we're already showing information about.
[ "Displays", "years", "and", "the", "number", "of", "events", "per", "year", "." ]
python
train
sdispater/pendulum
pendulum/__init__.py
https://github.com/sdispater/pendulum/blob/94d28b0d3cb524ae02361bd1ed7ea03e2e655e4e/pendulum/__init__.py#L308-L314
def period( start, end, absolute=False # type: DateTime # type: DateTime # type: bool ): # type: (...) -> Period """ Create a Period instance. """ return Period(start, end, absolute=absolute)
[ "def", "period", "(", "start", ",", "end", ",", "absolute", "=", "False", "# type: DateTime # type: DateTime # type: bool", ")", ":", "# type: (...) -> Period", "return", "Period", "(", "start", ",", "end", ",", "absolute", "=", "absolute", ")" ]
Create a Period instance.
[ "Create", "a", "Period", "instance", "." ]
python
train
xiongchiamiov/pyfixit
pyfixit/step.py
https://github.com/xiongchiamiov/pyfixit/blob/808a0c852a26e4211b2e3a72da972ab34a586dc4/pyfixit/step.py#L38-L51
def refresh(self): '''Refetch instance data from the API. ''' # There's no GET endpoint for steps, so get the parent guide and loop # through its steps until we find the right one. response = requests.get('%s/guides/%s' % (API_BASE_URL, self.guideid)) attributes = response.json() for step in attributes['steps']: if step['stepid'] == self.stepid: self._update(step) return raise Exception('Step with id %s not found in guide %s.' \ % (self.stepid, self.guideid))
[ "def", "refresh", "(", "self", ")", ":", "# There's no GET endpoint for steps, so get the parent guide and loop", "# through its steps until we find the right one.", "response", "=", "requests", ".", "get", "(", "'%s/guides/%s'", "%", "(", "API_BASE_URL", ",", "self", ".", "guideid", ")", ")", "attributes", "=", "response", ".", "json", "(", ")", "for", "step", "in", "attributes", "[", "'steps'", "]", ":", "if", "step", "[", "'stepid'", "]", "==", "self", ".", "stepid", ":", "self", ".", "_update", "(", "step", ")", "return", "raise", "Exception", "(", "'Step with id %s not found in guide %s.'", "%", "(", "self", ".", "stepid", ",", "self", ".", "guideid", ")", ")" ]
Refetch instance data from the API.
[ "Refetch", "instance", "data", "from", "the", "API", "." ]
python
train
bitesofcode/projexui
projexui/widgets/xcalendarwidget/xcalendarscene.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xcalendarwidget/xcalendarscene.py#L70-L81
def addItem( self, item ): """ Adds the item to the scene and redraws the item. :param item | <QGraphicsItem> """ result = super(XCalendarScene, self).addItem(item) if ( isinstance(item, XCalendarItem) ): item.rebuild() return result
[ "def", "addItem", "(", "self", ",", "item", ")", ":", "result", "=", "super", "(", "XCalendarScene", ",", "self", ")", ".", "addItem", "(", "item", ")", "if", "(", "isinstance", "(", "item", ",", "XCalendarItem", ")", ")", ":", "item", ".", "rebuild", "(", ")", "return", "result" ]
Adds the item to the scene and redraws the item. :param item | <QGraphicsItem>
[ "Adds", "the", "item", "to", "the", "scene", "and", "redraws", "the", "item", ".", ":", "param", "item", "|", "<QGraphicsItem", ">" ]
python
train
p3trus/slave
slave/protocol.py
https://github.com/p3trus/slave/blob/bdc74e73bd0f47b74a090c43aa2283c469cde3be/slave/protocol.py#L177-L185
def clear(self, transport): """Issues a device clear command.""" logger.debug('IEC60488 clear') with transport: try: transport.clear() except AttributeError: clear_msg = self.create_message('*CLS') transport.write(clear_msg)
[ "def", "clear", "(", "self", ",", "transport", ")", ":", "logger", ".", "debug", "(", "'IEC60488 clear'", ")", "with", "transport", ":", "try", ":", "transport", ".", "clear", "(", ")", "except", "AttributeError", ":", "clear_msg", "=", "self", ".", "create_message", "(", "'*CLS'", ")", "transport", ".", "write", "(", "clear_msg", ")" ]
Issues a device clear command.
[ "Issues", "a", "device", "clear", "command", "." ]
python
train
wbond/asn1crypto
asn1crypto/core.py
https://github.com/wbond/asn1crypto/blob/ecda20176f55d37021cbca1f6da9083a8e491197/asn1crypto/core.py#L2510-L2523
def native(self): """ The native Python datatype representation of this value :return: An integer or None """ if self.contents is None: return None if self._native is None: self._native = int_from_bytes(self._merge_chunks()) return self._native
[ "def", "native", "(", "self", ")", ":", "if", "self", ".", "contents", "is", "None", ":", "return", "None", "if", "self", ".", "_native", "is", "None", ":", "self", ".", "_native", "=", "int_from_bytes", "(", "self", ".", "_merge_chunks", "(", ")", ")", "return", "self", ".", "_native" ]
The native Python datatype representation of this value :return: An integer or None
[ "The", "native", "Python", "datatype", "representation", "of", "this", "value" ]
python
train
eventifyio/eventify
eventify/drivers/base.py
https://github.com/eventifyio/eventify/blob/0e519964a56bd07a879b266f21f177749c63aaed/eventify/drivers/base.py#L20-L66
async def onConnect(self): """ Configure the component """ # Add extra attribute # This allows for following crossbar/autobahn spec # without changing legacy configuration if not hasattr(self.config, 'extra'): original_config = {'config': self.config} self.config = objdict(self.config) setattr(self.config, 'extra', original_config) self.config.extra['handlers'] = self.handlers # setup transport host self.transport_host = self.config.extra['config']['transport_host'] # subscription setup self.subscribe_options = SubscribeOptions(**self.config.extra['config']['sub_options']) self.replay_events = self.config.extra['config']['replay_events'] # publishing setup self.publish_topic = self.config.extra['config']['publish_topic']['topic'] self.publish_options = PublishOptions(**self.config.extra['config']['pub_options']) # setup callback self.handlers = self.config.extra['handlers'] # optional subscribed topics from config.json self.subscribed_topics = self.config.extra['config']['subscribed_topics'] # put name on session self.name = self.config.extra['config']['name'] # setup db pool - optionally if self.config.extra['config']['pub_options']['retain'] is True: self.pool = await asyncpg.create_pool( user=EVENT_DB_USER, password=EVENT_DB_PASS, host=EVENT_DB_HOST, database=EVENT_DB_NAME ) # Handle non crossbar drivers try: self.join(self.config.realm) except AttributeError: pass
[ "async", "def", "onConnect", "(", "self", ")", ":", "# Add extra attribute", "# This allows for following crossbar/autobahn spec", "# without changing legacy configuration", "if", "not", "hasattr", "(", "self", ".", "config", ",", "'extra'", ")", ":", "original_config", "=", "{", "'config'", ":", "self", ".", "config", "}", "self", ".", "config", "=", "objdict", "(", "self", ".", "config", ")", "setattr", "(", "self", ".", "config", ",", "'extra'", ",", "original_config", ")", "self", ".", "config", ".", "extra", "[", "'handlers'", "]", "=", "self", ".", "handlers", "# setup transport host", "self", ".", "transport_host", "=", "self", ".", "config", ".", "extra", "[", "'config'", "]", "[", "'transport_host'", "]", "# subscription setup", "self", ".", "subscribe_options", "=", "SubscribeOptions", "(", "*", "*", "self", ".", "config", ".", "extra", "[", "'config'", "]", "[", "'sub_options'", "]", ")", "self", ".", "replay_events", "=", "self", ".", "config", ".", "extra", "[", "'config'", "]", "[", "'replay_events'", "]", "# publishing setup", "self", ".", "publish_topic", "=", "self", ".", "config", ".", "extra", "[", "'config'", "]", "[", "'publish_topic'", "]", "[", "'topic'", "]", "self", ".", "publish_options", "=", "PublishOptions", "(", "*", "*", "self", ".", "config", ".", "extra", "[", "'config'", "]", "[", "'pub_options'", "]", ")", "# setup callback", "self", ".", "handlers", "=", "self", ".", "config", ".", "extra", "[", "'handlers'", "]", "# optional subscribed topics from config.json", "self", ".", "subscribed_topics", "=", "self", ".", "config", ".", "extra", "[", "'config'", "]", "[", "'subscribed_topics'", "]", "# put name on session", "self", ".", "name", "=", "self", ".", "config", ".", "extra", "[", "'config'", "]", "[", "'name'", "]", "# setup db pool - optionally", "if", "self", ".", "config", ".", "extra", "[", "'config'", "]", "[", "'pub_options'", "]", "[", "'retain'", "]", "is", "True", ":", "self", ".", "pool", "=", "await", "asyncpg", ".", "create_pool", "(", "user", "=", "EVENT_DB_USER", ",", "password", "=", "EVENT_DB_PASS", ",", "host", "=", "EVENT_DB_HOST", ",", "database", "=", "EVENT_DB_NAME", ")", "# Handle non crossbar drivers", "try", ":", "self", ".", "join", "(", "self", ".", "config", ".", "realm", ")", "except", "AttributeError", ":", "pass" ]
Configure the component
[ "Configure", "the", "component" ]
python
train
wummel/patool
patoolib/programs/py_lzma.py
https://github.com/wummel/patool/blob/d7e64d9fd60faaa4b3f824bd97c43ce59b185c40/patoolib/programs/py_lzma.py#L95-L97
def create_xz(archive, compression, cmd, verbosity, interactive, filenames): """Create an XZ archive with the lzma Python module.""" return _create(archive, compression, cmd, 'xz', verbosity, filenames)
[ "def", "create_xz", "(", "archive", ",", "compression", ",", "cmd", ",", "verbosity", ",", "interactive", ",", "filenames", ")", ":", "return", "_create", "(", "archive", ",", "compression", ",", "cmd", ",", "'xz'", ",", "verbosity", ",", "filenames", ")" ]
Create an XZ archive with the lzma Python module.
[ "Create", "an", "XZ", "archive", "with", "the", "lzma", "Python", "module", "." ]
python
train
ricequant/rqalpha
rqalpha/model/portfolio.py
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/model/portfolio.py#L190-L194
def cash(self): """ [float] 可用资金 """ return sum(account.cash for account in six.itervalues(self._accounts))
[ "def", "cash", "(", "self", ")", ":", "return", "sum", "(", "account", ".", "cash", "for", "account", "in", "six", ".", "itervalues", "(", "self", ".", "_accounts", ")", ")" ]
[float] 可用资金
[ "[", "float", "]", "可用资金" ]
python
train
ladybug-tools/ladybug
ladybug/rootfind.py
https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/rootfind.py#L6-L53
def secant(a, b, fn, epsilon): """ One of the fasest root-finding algorithms. The method calculates the slope of the function fn and this enables it to converge to a solution very fast. However, if started too far away from a root, the method may not converge (returning a None). For this reason, it is recommended that this function be used first in any guess-and-check workflow and, if it fails to find a root, the bisect() method should be used. Args: a: The lowest possible boundary of the value you are tying to find. b: The highest possible boundary of the value you are tying to find. fn: A function representing the relationship between the value you are trying to find and the target condition you are trying to satisfy. It should typically be structured in the following way: `def fn(value_trying_to_find): funct(value_trying_to_find) - target_desired_from_funct` ...but the subtraction should be swtiched if value_trying_to_find has a negative relationship with the funct. epsilon: The acceptable error in the target_desired_from_funct. Returns: root: The value that gives the target_desired_from_funct. References ---------- [1] Wikipedia contributors. (2018, December 29). Root-finding algorithm. In Wikipedia, The Free Encyclopedia. Retrieved 18:16, December 30, 2018, from https://en.wikipedia.org/wiki/Root-finding_algorithm#Secant_method """ f1 = fn(a) if abs(f1) <= epsilon: return a f2 = fn(b) if abs(f2) <= epsilon: return b for i in range(100): slope = (f2 - f1) / (b - a) c = b - f2 / slope f3 = fn(c) if abs(f3) < epsilon: return c a = b b = c f1 = f2 f2 = f3 return None
[ "def", "secant", "(", "a", ",", "b", ",", "fn", ",", "epsilon", ")", ":", "f1", "=", "fn", "(", "a", ")", "if", "abs", "(", "f1", ")", "<=", "epsilon", ":", "return", "a", "f2", "=", "fn", "(", "b", ")", "if", "abs", "(", "f2", ")", "<=", "epsilon", ":", "return", "b", "for", "i", "in", "range", "(", "100", ")", ":", "slope", "=", "(", "f2", "-", "f1", ")", "/", "(", "b", "-", "a", ")", "c", "=", "b", "-", "f2", "/", "slope", "f3", "=", "fn", "(", "c", ")", "if", "abs", "(", "f3", ")", "<", "epsilon", ":", "return", "c", "a", "=", "b", "b", "=", "c", "f1", "=", "f2", "f2", "=", "f3", "return", "None" ]
One of the fasest root-finding algorithms. The method calculates the slope of the function fn and this enables it to converge to a solution very fast. However, if started too far away from a root, the method may not converge (returning a None). For this reason, it is recommended that this function be used first in any guess-and-check workflow and, if it fails to find a root, the bisect() method should be used. Args: a: The lowest possible boundary of the value you are tying to find. b: The highest possible boundary of the value you are tying to find. fn: A function representing the relationship between the value you are trying to find and the target condition you are trying to satisfy. It should typically be structured in the following way: `def fn(value_trying_to_find): funct(value_trying_to_find) - target_desired_from_funct` ...but the subtraction should be swtiched if value_trying_to_find has a negative relationship with the funct. epsilon: The acceptable error in the target_desired_from_funct. Returns: root: The value that gives the target_desired_from_funct. References ---------- [1] Wikipedia contributors. (2018, December 29). Root-finding algorithm. In Wikipedia, The Free Encyclopedia. Retrieved 18:16, December 30, 2018, from https://en.wikipedia.org/wiki/Root-finding_algorithm#Secant_method
[ "One", "of", "the", "fasest", "root", "-", "finding", "algorithms", ".", "The", "method", "calculates", "the", "slope", "of", "the", "function", "fn", "and", "this", "enables", "it", "to", "converge", "to", "a", "solution", "very", "fast", ".", "However", "if", "started", "too", "far", "away", "from", "a", "root", "the", "method", "may", "not", "converge", "(", "returning", "a", "None", ")", ".", "For", "this", "reason", "it", "is", "recommended", "that", "this", "function", "be", "used", "first", "in", "any", "guess", "-", "and", "-", "check", "workflow", "and", "if", "it", "fails", "to", "find", "a", "root", "the", "bisect", "()", "method", "should", "be", "used", "." ]
python
train
gem/oq-engine
openquake/hazardlib/gsim/cauzzi_2014.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/cauzzi_2014.py#L89-L103
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types): """ See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values. """ # extract dictionaries of coefficients specific to required # intensity measure type C = self.COEFFS[imt] mean = self._compute_mean(C, rup, dists, sites, imt) stddevs = self._get_stddevs(C, stddev_types, sites.vs30.shape[0]) return mean, stddevs
[ "def", "get_mean_and_stddevs", "(", "self", ",", "sites", ",", "rup", ",", "dists", ",", "imt", ",", "stddev_types", ")", ":", "# extract dictionaries of coefficients specific to required", "# intensity measure type", "C", "=", "self", ".", "COEFFS", "[", "imt", "]", "mean", "=", "self", ".", "_compute_mean", "(", "C", ",", "rup", ",", "dists", ",", "sites", ",", "imt", ")", "stddevs", "=", "self", ".", "_get_stddevs", "(", "C", ",", "stddev_types", ",", "sites", ".", "vs30", ".", "shape", "[", "0", "]", ")", "return", "mean", ",", "stddevs" ]
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values.
[ "See", ":", "meth", ":", "superclass", "method", "<", ".", "base", ".", "GroundShakingIntensityModel", ".", "get_mean_and_stddevs", ">", "for", "spec", "of", "input", "and", "result", "values", "." ]
python
train
CI-WATER/gsshapy
gsshapy/orm/prj.py
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/prj.py#L464-L509
def readOutput(self, directory, projectFileName, session, spatial=False, spatialReferenceID=None): """ Read only output files for a GSSHA project to the database. Use this method to read a project when only post-processing tasks need to be performed. Args: directory (str): Directory containing all GSSHA model files. This method assumes that all files are located in the same directory. projectFileName (str): Name of the project file for the GSSHA model which will be read (e.g.: 'example.prj'). session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database spatial (bool, optional): If True, spatially enabled objects will be read in as PostGIS spatial objects. Defaults to False. spatialReferenceID (int, optional): Integer id of spatial reference system for the model. If no id is provided GsshaPy will attempt to automatically lookup the spatial reference ID. If this process fails, default srid will be used (4326 for WGS 84). """ self.project_directory = directory with tmp_chdir(directory): # Add project file to session session.add(self) # Read Project File self.read(directory, projectFileName, session, spatial, spatialReferenceID) # Get the batch directory for output batchDirectory = self._getBatchDirectory(directory) # Read Mask (dependency of some output files) maskMap = WatershedMaskFile() maskMapFilename = self.getCard('WATERSHED_MASK').value.strip('"') maskMap.read(session=session, directory=directory, filename=maskMapFilename, spatial=spatial) maskMap.projectFile = self # Automatically derive the spatial reference system, if possible if spatialReferenceID is None: spatialReferenceID = self._automaticallyDeriveSpatialReferenceId(directory) # Read Output Files self._readXput(self.OUTPUT_FILES, batchDirectory, session, spatial=spatial, spatialReferenceID=spatialReferenceID) # Read WMS Dataset Files self._readWMSDatasets(self.WMS_DATASETS, batchDirectory, session, spatial=spatial, spatialReferenceID=spatialReferenceID) # Commit to database self._commit(session, self.COMMIT_ERROR_MESSAGE)
[ "def", "readOutput", "(", "self", ",", "directory", ",", "projectFileName", ",", "session", ",", "spatial", "=", "False", ",", "spatialReferenceID", "=", "None", ")", ":", "self", ".", "project_directory", "=", "directory", "with", "tmp_chdir", "(", "directory", ")", ":", "# Add project file to session", "session", ".", "add", "(", "self", ")", "# Read Project File", "self", ".", "read", "(", "directory", ",", "projectFileName", ",", "session", ",", "spatial", ",", "spatialReferenceID", ")", "# Get the batch directory for output", "batchDirectory", "=", "self", ".", "_getBatchDirectory", "(", "directory", ")", "# Read Mask (dependency of some output files)", "maskMap", "=", "WatershedMaskFile", "(", ")", "maskMapFilename", "=", "self", ".", "getCard", "(", "'WATERSHED_MASK'", ")", ".", "value", ".", "strip", "(", "'\"'", ")", "maskMap", ".", "read", "(", "session", "=", "session", ",", "directory", "=", "directory", ",", "filename", "=", "maskMapFilename", ",", "spatial", "=", "spatial", ")", "maskMap", ".", "projectFile", "=", "self", "# Automatically derive the spatial reference system, if possible", "if", "spatialReferenceID", "is", "None", ":", "spatialReferenceID", "=", "self", ".", "_automaticallyDeriveSpatialReferenceId", "(", "directory", ")", "# Read Output Files", "self", ".", "_readXput", "(", "self", ".", "OUTPUT_FILES", ",", "batchDirectory", ",", "session", ",", "spatial", "=", "spatial", ",", "spatialReferenceID", "=", "spatialReferenceID", ")", "# Read WMS Dataset Files", "self", ".", "_readWMSDatasets", "(", "self", ".", "WMS_DATASETS", ",", "batchDirectory", ",", "session", ",", "spatial", "=", "spatial", ",", "spatialReferenceID", "=", "spatialReferenceID", ")", "# Commit to database", "self", ".", "_commit", "(", "session", ",", "self", ".", "COMMIT_ERROR_MESSAGE", ")" ]
Read only output files for a GSSHA project to the database. Use this method to read a project when only post-processing tasks need to be performed. Args: directory (str): Directory containing all GSSHA model files. This method assumes that all files are located in the same directory. projectFileName (str): Name of the project file for the GSSHA model which will be read (e.g.: 'example.prj'). session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database spatial (bool, optional): If True, spatially enabled objects will be read in as PostGIS spatial objects. Defaults to False. spatialReferenceID (int, optional): Integer id of spatial reference system for the model. If no id is provided GsshaPy will attempt to automatically lookup the spatial reference ID. If this process fails, default srid will be used (4326 for WGS 84).
[ "Read", "only", "output", "files", "for", "a", "GSSHA", "project", "to", "the", "database", "." ]
python
train
tensorlayer/tensorlayer
tensorlayer/layers/core.py
https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/layers/core.py#L258-L286
def _get_init_args(self, skip=4): """Get all arguments of current layer for saving the graph.""" stack = inspect.stack() if len(stack) < skip + 1: raise ValueError("The length of the inspection stack is shorter than the requested start position.") args, _, _, values = inspect.getargvalues(stack[skip][0]) params = {} for arg in args: # some args dont need to be saved into the graph. e.g. the input placeholder if values[arg] is not None and arg not in ['self', 'prev_layer', 'inputs']: val = values[arg] # change function (e.g. act) into dictionary of module path and function name if inspect.isfunction(val): params[arg] = {"module_path": val.__module__, "func_name": val.__name__} # ignore more args e.g. TF class elif arg.endswith('init'): continue # for other data type, save them directly else: params[arg] = val return params
[ "def", "_get_init_args", "(", "self", ",", "skip", "=", "4", ")", ":", "stack", "=", "inspect", ".", "stack", "(", ")", "if", "len", "(", "stack", ")", "<", "skip", "+", "1", ":", "raise", "ValueError", "(", "\"The length of the inspection stack is shorter than the requested start position.\"", ")", "args", ",", "_", ",", "_", ",", "values", "=", "inspect", ".", "getargvalues", "(", "stack", "[", "skip", "]", "[", "0", "]", ")", "params", "=", "{", "}", "for", "arg", "in", "args", ":", "# some args dont need to be saved into the graph. e.g. the input placeholder", "if", "values", "[", "arg", "]", "is", "not", "None", "and", "arg", "not", "in", "[", "'self'", ",", "'prev_layer'", ",", "'inputs'", "]", ":", "val", "=", "values", "[", "arg", "]", "# change function (e.g. act) into dictionary of module path and function name", "if", "inspect", ".", "isfunction", "(", "val", ")", ":", "params", "[", "arg", "]", "=", "{", "\"module_path\"", ":", "val", ".", "__module__", ",", "\"func_name\"", ":", "val", ".", "__name__", "}", "# ignore more args e.g. TF class", "elif", "arg", ".", "endswith", "(", "'init'", ")", ":", "continue", "# for other data type, save them directly", "else", ":", "params", "[", "arg", "]", "=", "val", "return", "params" ]
Get all arguments of current layer for saving the graph.
[ "Get", "all", "arguments", "of", "current", "layer", "for", "saving", "the", "graph", "." ]
python
valid
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/schema.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/schema.py#L132-L147
def _key(self): """A tuple key that uniquely describes this field. Used to compute this instance's hashcode and evaluate equality. Returns: tuple: The contents of this :class:`~google.cloud.bigquery.schema.SchemaField`. """ return ( self._name, self._field_type.upper(), self._mode.upper(), self._description, self._fields, )
[ "def", "_key", "(", "self", ")", ":", "return", "(", "self", ".", "_name", ",", "self", ".", "_field_type", ".", "upper", "(", ")", ",", "self", ".", "_mode", ".", "upper", "(", ")", ",", "self", ".", "_description", ",", "self", ".", "_fields", ",", ")" ]
A tuple key that uniquely describes this field. Used to compute this instance's hashcode and evaluate equality. Returns: tuple: The contents of this :class:`~google.cloud.bigquery.schema.SchemaField`.
[ "A", "tuple", "key", "that", "uniquely", "describes", "this", "field", "." ]
python
train
wummel/linkchecker
linkcheck/logconf.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/logconf.py#L67-L72
def add_loghandler (handler): """Add log handler to root logger and LOG_ROOT and set formatting.""" format = "%(levelname)s %(name)s %(asctime)s %(threadName)s %(message)s" handler.setFormatter(logging.Formatter(format)) logging.getLogger(LOG_ROOT).addHandler(handler) logging.getLogger().addHandler(handler)
[ "def", "add_loghandler", "(", "handler", ")", ":", "format", "=", "\"%(levelname)s %(name)s %(asctime)s %(threadName)s %(message)s\"", "handler", ".", "setFormatter", "(", "logging", ".", "Formatter", "(", "format", ")", ")", "logging", ".", "getLogger", "(", "LOG_ROOT", ")", ".", "addHandler", "(", "handler", ")", "logging", ".", "getLogger", "(", ")", ".", "addHandler", "(", "handler", ")" ]
Add log handler to root logger and LOG_ROOT and set formatting.
[ "Add", "log", "handler", "to", "root", "logger", "and", "LOG_ROOT", "and", "set", "formatting", "." ]
python
train
modlinltd/django-advanced-filters
advanced_filters/q_serializer.py
https://github.com/modlinltd/django-advanced-filters/blob/ba51e6946d1652796a82b2b95cceffbe1190a227/advanced_filters/q_serializer.py#L87-L117
def get_field_values_list(self, d): """ Iterate over a (possibly nested) dict, and return a list of all children queries, as a dict of the following structure: { 'field': 'some_field__iexact', 'value': 'some_value', 'value_from': 'optional_range_val1', 'value_to': 'optional_range_val2', 'negate': True, } OR relations are expressed as an extra "line" between queries. """ fields = [] children = d.get('children', []) for child in children: if isinstance(child, dict): fields.extend(self.get_field_values_list(child)) else: f = {'field': child[0], 'value': child[1]} if self._is_range(child): f['value_from'] = child[1][0] f['value_to'] = child[1][1] f['negate'] = d.get('negated', False) fields.append(f) # add _OR line if d['connector'] == 'OR' and children[-1] != child: fields.append({'field': '_OR', 'value': 'null'}) return fields
[ "def", "get_field_values_list", "(", "self", ",", "d", ")", ":", "fields", "=", "[", "]", "children", "=", "d", ".", "get", "(", "'children'", ",", "[", "]", ")", "for", "child", "in", "children", ":", "if", "isinstance", "(", "child", ",", "dict", ")", ":", "fields", ".", "extend", "(", "self", ".", "get_field_values_list", "(", "child", ")", ")", "else", ":", "f", "=", "{", "'field'", ":", "child", "[", "0", "]", ",", "'value'", ":", "child", "[", "1", "]", "}", "if", "self", ".", "_is_range", "(", "child", ")", ":", "f", "[", "'value_from'", "]", "=", "child", "[", "1", "]", "[", "0", "]", "f", "[", "'value_to'", "]", "=", "child", "[", "1", "]", "[", "1", "]", "f", "[", "'negate'", "]", "=", "d", ".", "get", "(", "'negated'", ",", "False", ")", "fields", ".", "append", "(", "f", ")", "# add _OR line", "if", "d", "[", "'connector'", "]", "==", "'OR'", "and", "children", "[", "-", "1", "]", "!=", "child", ":", "fields", ".", "append", "(", "{", "'field'", ":", "'_OR'", ",", "'value'", ":", "'null'", "}", ")", "return", "fields" ]
Iterate over a (possibly nested) dict, and return a list of all children queries, as a dict of the following structure: { 'field': 'some_field__iexact', 'value': 'some_value', 'value_from': 'optional_range_val1', 'value_to': 'optional_range_val2', 'negate': True, } OR relations are expressed as an extra "line" between queries.
[ "Iterate", "over", "a", "(", "possibly", "nested", ")", "dict", "and", "return", "a", "list", "of", "all", "children", "queries", "as", "a", "dict", "of", "the", "following", "structure", ":", "{", "field", ":", "some_field__iexact", "value", ":", "some_value", "value_from", ":", "optional_range_val1", "value_to", ":", "optional_range_val2", "negate", ":", "True", "}" ]
python
train
rwl/pylon
pyreto/util.py
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pyreto/util.py#L129-L147
def plotGenCost(generators): """ Plots the costs of the given generators. """ figure() plots = [] for generator in generators: if generator.pcost_model == PW_LINEAR: x = [x for x, _ in generator.p_cost] y = [y for _, y in generator.p_cost] elif generator.pcost_model == POLYNOMIAL: x = scipy.arange(generator.p_min, generator.p_max, 5) y = scipy.polyval(scipy.array(generator.p_cost), x) else: raise plots.append(plot(x, y)) xlabel("P (MW)") ylabel("Cost ($)") legend(plots, [g.name for g in generators]) show()
[ "def", "plotGenCost", "(", "generators", ")", ":", "figure", "(", ")", "plots", "=", "[", "]", "for", "generator", "in", "generators", ":", "if", "generator", ".", "pcost_model", "==", "PW_LINEAR", ":", "x", "=", "[", "x", "for", "x", ",", "_", "in", "generator", ".", "p_cost", "]", "y", "=", "[", "y", "for", "_", ",", "y", "in", "generator", ".", "p_cost", "]", "elif", "generator", ".", "pcost_model", "==", "POLYNOMIAL", ":", "x", "=", "scipy", ".", "arange", "(", "generator", ".", "p_min", ",", "generator", ".", "p_max", ",", "5", ")", "y", "=", "scipy", ".", "polyval", "(", "scipy", ".", "array", "(", "generator", ".", "p_cost", ")", ",", "x", ")", "else", ":", "raise", "plots", ".", "append", "(", "plot", "(", "x", ",", "y", ")", ")", "xlabel", "(", "\"P (MW)\"", ")", "ylabel", "(", "\"Cost ($)\"", ")", "legend", "(", "plots", ",", "[", "g", ".", "name", "for", "g", "in", "generators", "]", ")", "show", "(", ")" ]
Plots the costs of the given generators.
[ "Plots", "the", "costs", "of", "the", "given", "generators", "." ]
python
train
tradenity/python-sdk
tradenity/resources/customer.py
https://github.com/tradenity/python-sdk/blob/d13fbe23f4d6ff22554c6d8d2deaf209371adaf1/tradenity/resources/customer.py#L627-L647
def get_customer_by_id(cls, customer_id, **kwargs): """Find Customer Return single instance of Customer by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_customer_by_id(customer_id, async=True) >>> result = thread.get() :param async bool :param str customer_id: ID of customer to return (required) :return: Customer If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_customer_by_id_with_http_info(customer_id, **kwargs) else: (data) = cls._get_customer_by_id_with_http_info(customer_id, **kwargs) return data
[ "def", "get_customer_by_id", "(", "cls", ",", "customer_id", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async'", ")", ":", "return", "cls", ".", "_get_customer_by_id_with_http_info", "(", "customer_id", ",", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "cls", ".", "_get_customer_by_id_with_http_info", "(", "customer_id", ",", "*", "*", "kwargs", ")", "return", "data" ]
Find Customer Return single instance of Customer by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_customer_by_id(customer_id, async=True) >>> result = thread.get() :param async bool :param str customer_id: ID of customer to return (required) :return: Customer If the method is called asynchronously, returns the request thread.
[ "Find", "Customer" ]
python
train
clalancette/pycdlib
pycdlib/dates.py
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/dates.py#L220-L260
def new(self, tm=0.0): # type: (float) -> None ''' Create a new Volume Descriptor Date. If tm is None, then this Volume Descriptor Date will be full of zeros (meaning not specified). If tm is not None, it is expected to be a struct_time object, at which point this Volume Descriptor Date object will be filled in with data from that struct_time. Parameters: tm - struct_time object to base new VolumeDescriptorDate off of, or 0.0 for an empty VolumeDescriptorDate. Returns: Nothing. ''' if self._initialized: raise pycdlibexception.PyCdlibInternalError('This Volume Descriptor Date object is already initialized') if tm != 0.0: local = time.localtime(tm) self.year = local.tm_year self.month = local.tm_mon self.dayofmonth = local.tm_mday self.hour = local.tm_hour self.minute = local.tm_min self.second = local.tm_sec self.hundredthsofsecond = 0 self.gmtoffset = utils.gmtoffset_from_tm(tm, local) self.date_str = time.strftime(self.TIME_FMT, local).encode('utf-8') + '{:0<2}'.format(self.hundredthsofsecond).encode('utf-8') + struct.pack('=b', self.gmtoffset) else: self.year = 0 self.month = 0 self.dayofmonth = 0 self.hour = 0 self.minute = 0 self.second = 0 self.hundredthsofsecond = 0 self.gmtoffset = 0 self.date_str = self.EMPTY_STRING self._initialized = True
[ "def", "new", "(", "self", ",", "tm", "=", "0.0", ")", ":", "# type: (float) -> None", "if", "self", ".", "_initialized", ":", "raise", "pycdlibexception", ".", "PyCdlibInternalError", "(", "'This Volume Descriptor Date object is already initialized'", ")", "if", "tm", "!=", "0.0", ":", "local", "=", "time", ".", "localtime", "(", "tm", ")", "self", ".", "year", "=", "local", ".", "tm_year", "self", ".", "month", "=", "local", ".", "tm_mon", "self", ".", "dayofmonth", "=", "local", ".", "tm_mday", "self", ".", "hour", "=", "local", ".", "tm_hour", "self", ".", "minute", "=", "local", ".", "tm_min", "self", ".", "second", "=", "local", ".", "tm_sec", "self", ".", "hundredthsofsecond", "=", "0", "self", ".", "gmtoffset", "=", "utils", ".", "gmtoffset_from_tm", "(", "tm", ",", "local", ")", "self", ".", "date_str", "=", "time", ".", "strftime", "(", "self", ".", "TIME_FMT", ",", "local", ")", ".", "encode", "(", "'utf-8'", ")", "+", "'{:0<2}'", ".", "format", "(", "self", ".", "hundredthsofsecond", ")", ".", "encode", "(", "'utf-8'", ")", "+", "struct", ".", "pack", "(", "'=b'", ",", "self", ".", "gmtoffset", ")", "else", ":", "self", ".", "year", "=", "0", "self", ".", "month", "=", "0", "self", ".", "dayofmonth", "=", "0", "self", ".", "hour", "=", "0", "self", ".", "minute", "=", "0", "self", ".", "second", "=", "0", "self", ".", "hundredthsofsecond", "=", "0", "self", ".", "gmtoffset", "=", "0", "self", ".", "date_str", "=", "self", ".", "EMPTY_STRING", "self", ".", "_initialized", "=", "True" ]
Create a new Volume Descriptor Date. If tm is None, then this Volume Descriptor Date will be full of zeros (meaning not specified). If tm is not None, it is expected to be a struct_time object, at which point this Volume Descriptor Date object will be filled in with data from that struct_time. Parameters: tm - struct_time object to base new VolumeDescriptorDate off of, or 0.0 for an empty VolumeDescriptorDate. Returns: Nothing.
[ "Create", "a", "new", "Volume", "Descriptor", "Date", ".", "If", "tm", "is", "None", "then", "this", "Volume", "Descriptor", "Date", "will", "be", "full", "of", "zeros", "(", "meaning", "not", "specified", ")", ".", "If", "tm", "is", "not", "None", "it", "is", "expected", "to", "be", "a", "struct_time", "object", "at", "which", "point", "this", "Volume", "Descriptor", "Date", "object", "will", "be", "filled", "in", "with", "data", "from", "that", "struct_time", "." ]
python
train
jaysonsantos/python-binary-memcached
bmemcached/protocol.py
https://github.com/jaysonsantos/python-binary-memcached/blob/6a792829349c69204d9c5045e5c34b4231216dd6/bmemcached/protocol.py#L472-L538
def get_multi(self, keys): """ Get multiple keys from server. Since keys are converted to b'' when six.PY3 the keys need to be decoded back into string . e.g key='test' is read as b'test' and then decoded back to 'test' This encode/decode does not work when key is already a six.binary_type hence this function remembers which keys were originally sent as str so that it only decoded those keys back to string which were sent as string :param keys: A list of keys to from server. :type keys: list :return: A dict with all requested keys. :rtype: dict """ # pipeline N-1 getkq requests, followed by a regular getk to uncork the # server o_keys = keys keys, last = keys[:-1], str_to_bytes(keys[-1]) if six.PY2: msg = '' else: msg = b'' msg = msg.join([ struct.pack(self.HEADER_STRUCT + self.COMMANDS['getkq']['struct'] % (len(key)), self.MAGIC['request'], self.COMMANDS['getkq']['command'], len(key), 0, 0, 0, len(key), 0, 0, str_to_bytes(key)) for key in keys]) msg += struct.pack(self.HEADER_STRUCT + self.COMMANDS['getk']['struct'] % (len(last)), self.MAGIC['request'], self.COMMANDS['getk']['command'], len(last), 0, 0, 0, len(last), 0, 0, last) self._send(msg) d = {} opcode = -1 while opcode != self.COMMANDS['getk']['command']: (magic, opcode, keylen, extlen, datatype, status, bodylen, opaque, cas, extra_content) = self._get_response() if status == self.STATUS['success']: flags, key, value = struct.unpack('!L%ds%ds' % (keylen, bodylen - keylen - 4), extra_content) if six.PY2: d[key] = self.deserialize(value, flags), cas else: try: decoded_key = key.decode() except UnicodeDecodeError: d[key] = self.deserialize(value, flags), cas else: if decoded_key in o_keys: d[decoded_key] = self.deserialize(value, flags), cas else: d[key] = self.deserialize(value, flags), cas elif status == self.STATUS['server_disconnected']: break elif status != self.STATUS['key_not_found']: raise MemcachedException('Code: %d Message: %s' % (status, extra_content), status) return d
[ "def", "get_multi", "(", "self", ",", "keys", ")", ":", "# pipeline N-1 getkq requests, followed by a regular getk to uncork the", "# server", "o_keys", "=", "keys", "keys", ",", "last", "=", "keys", "[", ":", "-", "1", "]", ",", "str_to_bytes", "(", "keys", "[", "-", "1", "]", ")", "if", "six", ".", "PY2", ":", "msg", "=", "''", "else", ":", "msg", "=", "b''", "msg", "=", "msg", ".", "join", "(", "[", "struct", ".", "pack", "(", "self", ".", "HEADER_STRUCT", "+", "self", ".", "COMMANDS", "[", "'getkq'", "]", "[", "'struct'", "]", "%", "(", "len", "(", "key", ")", ")", ",", "self", ".", "MAGIC", "[", "'request'", "]", ",", "self", ".", "COMMANDS", "[", "'getkq'", "]", "[", "'command'", "]", ",", "len", "(", "key", ")", ",", "0", ",", "0", ",", "0", ",", "len", "(", "key", ")", ",", "0", ",", "0", ",", "str_to_bytes", "(", "key", ")", ")", "for", "key", "in", "keys", "]", ")", "msg", "+=", "struct", ".", "pack", "(", "self", ".", "HEADER_STRUCT", "+", "self", ".", "COMMANDS", "[", "'getk'", "]", "[", "'struct'", "]", "%", "(", "len", "(", "last", ")", ")", ",", "self", ".", "MAGIC", "[", "'request'", "]", ",", "self", ".", "COMMANDS", "[", "'getk'", "]", "[", "'command'", "]", ",", "len", "(", "last", ")", ",", "0", ",", "0", ",", "0", ",", "len", "(", "last", ")", ",", "0", ",", "0", ",", "last", ")", "self", ".", "_send", "(", "msg", ")", "d", "=", "{", "}", "opcode", "=", "-", "1", "while", "opcode", "!=", "self", ".", "COMMANDS", "[", "'getk'", "]", "[", "'command'", "]", ":", "(", "magic", ",", "opcode", ",", "keylen", ",", "extlen", ",", "datatype", ",", "status", ",", "bodylen", ",", "opaque", ",", "cas", ",", "extra_content", ")", "=", "self", ".", "_get_response", "(", ")", "if", "status", "==", "self", ".", "STATUS", "[", "'success'", "]", ":", "flags", ",", "key", ",", "value", "=", "struct", ".", "unpack", "(", "'!L%ds%ds'", "%", "(", "keylen", ",", "bodylen", "-", "keylen", "-", "4", ")", ",", "extra_content", ")", "if", "six", ".", "PY2", ":", "d", "[", "key", "]", "=", "self", ".", "deserialize", "(", "value", ",", "flags", ")", ",", "cas", "else", ":", "try", ":", "decoded_key", "=", "key", ".", "decode", "(", ")", "except", "UnicodeDecodeError", ":", "d", "[", "key", "]", "=", "self", ".", "deserialize", "(", "value", ",", "flags", ")", ",", "cas", "else", ":", "if", "decoded_key", "in", "o_keys", ":", "d", "[", "decoded_key", "]", "=", "self", ".", "deserialize", "(", "value", ",", "flags", ")", ",", "cas", "else", ":", "d", "[", "key", "]", "=", "self", ".", "deserialize", "(", "value", ",", "flags", ")", ",", "cas", "elif", "status", "==", "self", ".", "STATUS", "[", "'server_disconnected'", "]", ":", "break", "elif", "status", "!=", "self", ".", "STATUS", "[", "'key_not_found'", "]", ":", "raise", "MemcachedException", "(", "'Code: %d Message: %s'", "%", "(", "status", ",", "extra_content", ")", ",", "status", ")", "return", "d" ]
Get multiple keys from server. Since keys are converted to b'' when six.PY3 the keys need to be decoded back into string . e.g key='test' is read as b'test' and then decoded back to 'test' This encode/decode does not work when key is already a six.binary_type hence this function remembers which keys were originally sent as str so that it only decoded those keys back to string which were sent as string :param keys: A list of keys to from server. :type keys: list :return: A dict with all requested keys. :rtype: dict
[ "Get", "multiple", "keys", "from", "server", "." ]
python
train
datadesk/slackdown
slackdown/__init__.py
https://github.com/datadesk/slackdown/blob/2c5c2faf2673d0d58183f590f234d2c7e1fe8508/slackdown/__init__.py#L297-L313
def clean(self): """ Goes through the txt input and cleans up any problematic HTML. """ # Calls handle_starttag, handle_endtag, and handle_data self.feed() # Clean up any parent tags left open if self.current_parent_element['tag'] != '': self.cleaned_html += '</{}>'.format(self.current_parent_element['tag']) # Remove empty <p> added after lists self.cleaned_html = re.sub(r'(</[u|o]l>)<p></p>', r'\g<1>', self.cleaned_html) self._remove_pre_formatting() return self.cleaned_html
[ "def", "clean", "(", "self", ")", ":", "# Calls handle_starttag, handle_endtag, and handle_data", "self", ".", "feed", "(", ")", "# Clean up any parent tags left open", "if", "self", ".", "current_parent_element", "[", "'tag'", "]", "!=", "''", ":", "self", ".", "cleaned_html", "+=", "'</{}>'", ".", "format", "(", "self", ".", "current_parent_element", "[", "'tag'", "]", ")", "# Remove empty <p> added after lists", "self", ".", "cleaned_html", "=", "re", ".", "sub", "(", "r'(</[u|o]l>)<p></p>'", ",", "r'\\g<1>'", ",", "self", ".", "cleaned_html", ")", "self", ".", "_remove_pre_formatting", "(", ")", "return", "self", ".", "cleaned_html" ]
Goes through the txt input and cleans up any problematic HTML.
[ "Goes", "through", "the", "txt", "input", "and", "cleans", "up", "any", "problematic", "HTML", "." ]
python
train
PMEAL/OpenPNM
openpnm/models/phases/thermal_conductivity.py
https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/models/phases/thermal_conductivity.py#L4-L54
def water(target, temperature='pore.temperature', salinity='pore.salinity'): r""" Calculates thermal conductivity of pure water or seawater at atmospheric pressure using the correlation given by Jamieson and Tudhope. Values at temperature higher the normal boiling temperature are calculated at the saturation pressure. Parameters ---------- target : OpenPNM Object The object for which these values are being calculated. This controls the length of the calculated array, and also provides access to other necessary thermofluid properties. temperature : string The dictionary key containing the temperature values. Temperature must be in Kelvin for this emperical equation to work salinity : string The dictionary key containing the salinity values. Salinity must be expressed in g of salt per kg of solution (ppt). Returns ------- The thermal conductivity of water/seawater in [W/m.K] Notes ----- T must be in K, and S in g of salt per kg of phase, or ppt (parts per thousand) VALIDITY: 273 < T < 453 K; 0 < S < 160 g/kg; ACCURACY: 3 % References ---------- D. T. Jamieson, and J. S. Tudhope, Desalination, 8, 393-401, 1970. """ T = target[temperature] if salinity in target.keys(): S = target[salinity] else: S = 0 T68 = 1.00024*T # convert from T_90 to T_68 SP = S/1.00472 # convert from S to S_P k_sw = 0.001*(10**(sp.log10(240+0.0002*SP) + 0.434*(2.3-(343.5+0.037*SP)/T68) * ((1-T68/(647.3+0.03*SP)))**(1/3))) value = k_sw return value
[ "def", "water", "(", "target", ",", "temperature", "=", "'pore.temperature'", ",", "salinity", "=", "'pore.salinity'", ")", ":", "T", "=", "target", "[", "temperature", "]", "if", "salinity", "in", "target", ".", "keys", "(", ")", ":", "S", "=", "target", "[", "salinity", "]", "else", ":", "S", "=", "0", "T68", "=", "1.00024", "*", "T", "# convert from T_90 to T_68", "SP", "=", "S", "/", "1.00472", "# convert from S to S_P", "k_sw", "=", "0.001", "*", "(", "10", "**", "(", "sp", ".", "log10", "(", "240", "+", "0.0002", "*", "SP", ")", "+", "0.434", "*", "(", "2.3", "-", "(", "343.5", "+", "0.037", "*", "SP", ")", "/", "T68", ")", "*", "(", "(", "1", "-", "T68", "/", "(", "647.3", "+", "0.03", "*", "SP", ")", ")", ")", "**", "(", "1", "/", "3", ")", ")", ")", "value", "=", "k_sw", "return", "value" ]
r""" Calculates thermal conductivity of pure water or seawater at atmospheric pressure using the correlation given by Jamieson and Tudhope. Values at temperature higher the normal boiling temperature are calculated at the saturation pressure. Parameters ---------- target : OpenPNM Object The object for which these values are being calculated. This controls the length of the calculated array, and also provides access to other necessary thermofluid properties. temperature : string The dictionary key containing the temperature values. Temperature must be in Kelvin for this emperical equation to work salinity : string The dictionary key containing the salinity values. Salinity must be expressed in g of salt per kg of solution (ppt). Returns ------- The thermal conductivity of water/seawater in [W/m.K] Notes ----- T must be in K, and S in g of salt per kg of phase, or ppt (parts per thousand) VALIDITY: 273 < T < 453 K; 0 < S < 160 g/kg; ACCURACY: 3 % References ---------- D. T. Jamieson, and J. S. Tudhope, Desalination, 8, 393-401, 1970.
[ "r", "Calculates", "thermal", "conductivity", "of", "pure", "water", "or", "seawater", "at", "atmospheric", "pressure", "using", "the", "correlation", "given", "by", "Jamieson", "and", "Tudhope", ".", "Values", "at", "temperature", "higher", "the", "normal", "boiling", "temperature", "are", "calculated", "at", "the", "saturation", "pressure", "." ]
python
train
LogicalDash/LiSE
allegedb/allegedb/graph.py
https://github.com/LogicalDash/LiSE/blob/fe6fd4f0a7c1780e065f4c9babb9bc443af6bb84/allegedb/allegedb/graph.py#L65-L75
def disconnect(self, func): """No longer call the function when something changes here.""" if id(self) not in _alleged_receivers: return l = _alleged_receivers[id(self)] try: l.remove(func) except ValueError: return if not l: del _alleged_receivers[id(self)]
[ "def", "disconnect", "(", "self", ",", "func", ")", ":", "if", "id", "(", "self", ")", "not", "in", "_alleged_receivers", ":", "return", "l", "=", "_alleged_receivers", "[", "id", "(", "self", ")", "]", "try", ":", "l", ".", "remove", "(", "func", ")", "except", "ValueError", ":", "return", "if", "not", "l", ":", "del", "_alleged_receivers", "[", "id", "(", "self", ")", "]" ]
No longer call the function when something changes here.
[ "No", "longer", "call", "the", "function", "when", "something", "changes", "here", "." ]
python
train
marshallward/f90nml
f90nml/tokenizer.py
https://github.com/marshallward/f90nml/blob/4932cabc5221afc844ee6a5b4a05ceb8bd4a2711/f90nml/tokenizer.py#L186-L190
def update_chars(self): """Update the current charters in the tokenizer.""" # NOTE: We spoof non-Unix files by returning '\n' on StopIteration self.prior_char, self.char = self.char, next(self.characters, '\n') self.idx += 1
[ "def", "update_chars", "(", "self", ")", ":", "# NOTE: We spoof non-Unix files by returning '\\n' on StopIteration", "self", ".", "prior_char", ",", "self", ".", "char", "=", "self", ".", "char", ",", "next", "(", "self", ".", "characters", ",", "'\\n'", ")", "self", ".", "idx", "+=", "1" ]
Update the current charters in the tokenizer.
[ "Update", "the", "current", "charters", "in", "the", "tokenizer", "." ]
python
train
chaoss/grimoirelab-manuscripts
manuscripts2/metrics/git.py
https://github.com/chaoss/grimoirelab-manuscripts/blob/94a3ad4f11bfbcd6c5190e01cb5d3e47a5187cd9/manuscripts2/metrics/git.py#L145-L166
def overview(index, start, end): """Compute metrics in the overview section for enriched git indexes. Returns a dictionary. Each key in the dictionary is the name of a metric, the value is the value of that metric. Value can be a complex object (eg, a time series). :param index: index object :param start: start date to get the data from :param end: end date to get the data upto :return: dictionary with the value of the metrics """ results = { "activity_metrics": [Commits(index, start, end)], "author_metrics": [Authors(index, start, end)], "bmi_metrics": [], "time_to_close_metrics": [], "projects_metrics": [] } return results
[ "def", "overview", "(", "index", ",", "start", ",", "end", ")", ":", "results", "=", "{", "\"activity_metrics\"", ":", "[", "Commits", "(", "index", ",", "start", ",", "end", ")", "]", ",", "\"author_metrics\"", ":", "[", "Authors", "(", "index", ",", "start", ",", "end", ")", "]", ",", "\"bmi_metrics\"", ":", "[", "]", ",", "\"time_to_close_metrics\"", ":", "[", "]", ",", "\"projects_metrics\"", ":", "[", "]", "}", "return", "results" ]
Compute metrics in the overview section for enriched git indexes. Returns a dictionary. Each key in the dictionary is the name of a metric, the value is the value of that metric. Value can be a complex object (eg, a time series). :param index: index object :param start: start date to get the data from :param end: end date to get the data upto :return: dictionary with the value of the metrics
[ "Compute", "metrics", "in", "the", "overview", "section", "for", "enriched", "git", "indexes", "." ]
python
train
smarie/python-valid8
valid8/base.py
https://github.com/smarie/python-valid8/blob/5e15d1de11602933c5114eb9f73277ad91d97800/valid8/base.py#L406-L410
def get_context_for_help_msgs(self, context_dict): """ We override this method from HelpMsgMixIn to replace wrapped_func with its name """ context_dict = copy(context_dict) context_dict['wrapped_func'] = get_callable_name(context_dict['wrapped_func']) return context_dict
[ "def", "get_context_for_help_msgs", "(", "self", ",", "context_dict", ")", ":", "context_dict", "=", "copy", "(", "context_dict", ")", "context_dict", "[", "'wrapped_func'", "]", "=", "get_callable_name", "(", "context_dict", "[", "'wrapped_func'", "]", ")", "return", "context_dict" ]
We override this method from HelpMsgMixIn to replace wrapped_func with its name
[ "We", "override", "this", "method", "from", "HelpMsgMixIn", "to", "replace", "wrapped_func", "with", "its", "name" ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/external/decorators/_decorators.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/external/decorators/_decorators.py#L99-L173
def skipif(skip_condition, msg=None): """ Make function raise SkipTest exception if a given condition is true. If the condition is a callable, it is used at runtime to dynamically make the decision. This is useful for tests that may require costly imports, to delay the cost until the test suite is actually executed. Parameters ---------- skip_condition : bool or callable Flag to determine whether to skip the decorated test. msg : str, optional Message to give on raising a SkipTest exception. Default is None. Returns ------- decorator : function Decorator which, when applied to a function, causes SkipTest to be raised when `skip_condition` is True, and the function to be called normally otherwise. Notes ----- The decorator itself is decorated with the ``nose.tools.make_decorator`` function in order to transmit function name, and various other metadata. """ def skip_decorator(f): # Local import to avoid a hard nose dependency and only incur the # import time overhead at actual test-time. import nose # Allow for both boolean or callable skip conditions. if callable(skip_condition): skip_val = lambda : skip_condition() else: skip_val = lambda : skip_condition def get_msg(func,msg=None): """Skip message with information about function being skipped.""" if msg is None: out = 'Test skipped due to test condition' else: out = '\n'+msg return "Skipping test: %s%s" % (func.__name__,out) # We need to define *two* skippers because Python doesn't allow both # return with value and yield inside the same function. def skipper_func(*args, **kwargs): """Skipper for normal test functions.""" if skip_val(): raise nose.SkipTest(get_msg(f,msg)) else: return f(*args, **kwargs) def skipper_gen(*args, **kwargs): """Skipper for test generators.""" if skip_val(): raise nose.SkipTest(get_msg(f,msg)) else: for x in f(*args, **kwargs): yield x # Choose the right skipper to use when building the actual decorator. if nose.util.isgenerator(f): skipper = skipper_gen else: skipper = skipper_func return nose.tools.make_decorator(f)(skipper) return skip_decorator
[ "def", "skipif", "(", "skip_condition", ",", "msg", "=", "None", ")", ":", "def", "skip_decorator", "(", "f", ")", ":", "# Local import to avoid a hard nose dependency and only incur the", "# import time overhead at actual test-time.", "import", "nose", "# Allow for both boolean or callable skip conditions.", "if", "callable", "(", "skip_condition", ")", ":", "skip_val", "=", "lambda", ":", "skip_condition", "(", ")", "else", ":", "skip_val", "=", "lambda", ":", "skip_condition", "def", "get_msg", "(", "func", ",", "msg", "=", "None", ")", ":", "\"\"\"Skip message with information about function being skipped.\"\"\"", "if", "msg", "is", "None", ":", "out", "=", "'Test skipped due to test condition'", "else", ":", "out", "=", "'\\n'", "+", "msg", "return", "\"Skipping test: %s%s\"", "%", "(", "func", ".", "__name__", ",", "out", ")", "# We need to define *two* skippers because Python doesn't allow both", "# return with value and yield inside the same function.", "def", "skipper_func", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"Skipper for normal test functions.\"\"\"", "if", "skip_val", "(", ")", ":", "raise", "nose", ".", "SkipTest", "(", "get_msg", "(", "f", ",", "msg", ")", ")", "else", ":", "return", "f", "(", "*", "args", ",", "*", "*", "kwargs", ")", "def", "skipper_gen", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"Skipper for test generators.\"\"\"", "if", "skip_val", "(", ")", ":", "raise", "nose", ".", "SkipTest", "(", "get_msg", "(", "f", ",", "msg", ")", ")", "else", ":", "for", "x", "in", "f", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "yield", "x", "# Choose the right skipper to use when building the actual decorator.", "if", "nose", ".", "util", ".", "isgenerator", "(", "f", ")", ":", "skipper", "=", "skipper_gen", "else", ":", "skipper", "=", "skipper_func", "return", "nose", ".", "tools", ".", "make_decorator", "(", "f", ")", "(", "skipper", ")", "return", "skip_decorator" ]
Make function raise SkipTest exception if a given condition is true. If the condition is a callable, it is used at runtime to dynamically make the decision. This is useful for tests that may require costly imports, to delay the cost until the test suite is actually executed. Parameters ---------- skip_condition : bool or callable Flag to determine whether to skip the decorated test. msg : str, optional Message to give on raising a SkipTest exception. Default is None. Returns ------- decorator : function Decorator which, when applied to a function, causes SkipTest to be raised when `skip_condition` is True, and the function to be called normally otherwise. Notes ----- The decorator itself is decorated with the ``nose.tools.make_decorator`` function in order to transmit function name, and various other metadata.
[ "Make", "function", "raise", "SkipTest", "exception", "if", "a", "given", "condition", "is", "true", "." ]
python
test
bulkan/robotframework-requests
src/RequestsLibrary/RequestsKeywords.py
https://github.com/bulkan/robotframework-requests/blob/11baa3277f1cb728712e26d996200703c15254a8/src/RequestsLibrary/RequestsKeywords.py#L1174-L1182
def _get_url(self, session, uri): """ Helper method to get the full url """ url = session.url if uri: slash = '' if uri.startswith('/') else '/' url = "%s%s%s" % (session.url, slash, uri) return url
[ "def", "_get_url", "(", "self", ",", "session", ",", "uri", ")", ":", "url", "=", "session", ".", "url", "if", "uri", ":", "slash", "=", "''", "if", "uri", ".", "startswith", "(", "'/'", ")", "else", "'/'", "url", "=", "\"%s%s%s\"", "%", "(", "session", ".", "url", ",", "slash", ",", "uri", ")", "return", "url" ]
Helper method to get the full url
[ "Helper", "method", "to", "get", "the", "full", "url" ]
python
train
ask/bundle
bundle/utils.py
https://github.com/ask/bundle/blob/bcd8f685f1039beeacce9fdf78dccf2d2e34d81f/bundle/utils.py#L17-L24
def quote(text, ws=plain): """Quote special characters in shell command arguments. E.g ``--foo bar>=10.1`` becomes "--foo bar\>\=10\.1``. """ return "".join(chr in ws and chr or '\\' + chr for chr in text)
[ "def", "quote", "(", "text", ",", "ws", "=", "plain", ")", ":", "return", "\"\"", ".", "join", "(", "chr", "in", "ws", "and", "chr", "or", "'\\\\'", "+", "chr", "for", "chr", "in", "text", ")" ]
Quote special characters in shell command arguments. E.g ``--foo bar>=10.1`` becomes "--foo bar\>\=10\.1``.
[ "Quote", "special", "characters", "in", "shell", "command", "arguments", "." ]
python
train
tensorflow/tensorboard
tensorboard/plugins/interactive_inference/interactive_inference_plugin.py
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/interactive_inference/interactive_inference_plugin.py#L301-L314
def _eligible_features_from_example_handler(self, request): """Returns a list of JSON objects for each feature in the example. Args: request: A request for features. Returns: A list with a JSON object for each feature. Numeric features are represented as {name: observedMin: observedMax:}. Categorical features are repesented as {name: samples:[]}. """ features_list = inference_utils.get_eligible_features( self.examples[0: NUM_EXAMPLES_TO_SCAN], NUM_MUTANTS) return http_util.Respond(request, features_list, 'application/json')
[ "def", "_eligible_features_from_example_handler", "(", "self", ",", "request", ")", ":", "features_list", "=", "inference_utils", ".", "get_eligible_features", "(", "self", ".", "examples", "[", "0", ":", "NUM_EXAMPLES_TO_SCAN", "]", ",", "NUM_MUTANTS", ")", "return", "http_util", ".", "Respond", "(", "request", ",", "features_list", ",", "'application/json'", ")" ]
Returns a list of JSON objects for each feature in the example. Args: request: A request for features. Returns: A list with a JSON object for each feature. Numeric features are represented as {name: observedMin: observedMax:}. Categorical features are repesented as {name: samples:[]}.
[ "Returns", "a", "list", "of", "JSON", "objects", "for", "each", "feature", "in", "the", "example", "." ]
python
train
MrYsLab/pymata-aio
pymata_aio/pymata_iot.py
https://github.com/MrYsLab/pymata-aio/blob/015081a4628b9d47dfe3f8d6c698ff903f107810/pymata_aio/pymata_iot.py#L127-L140
async def digital_read(self, command): """ This method reads and returns the last reported value for a digital pin. Normally not used since digital pin updates will be provided automatically as they occur with the digital_message_reply being sent to the client after set_pin_mode is called.. (see enable_digital_reporting for message format) :param command: {"method": "digital_read", "params": [PIN]} :returns: {"method": "digital_read_reply", "params": [PIN, DIGITAL_DATA_VALUE]} """ pin = int(command[0]) data_val = await self.core.digital_read(pin) reply = json.dumps({"method": "digital_read_reply", "params": [pin, data_val]}) await self.websocket.send(reply)
[ "async", "def", "digital_read", "(", "self", ",", "command", ")", ":", "pin", "=", "int", "(", "command", "[", "0", "]", ")", "data_val", "=", "await", "self", ".", "core", ".", "digital_read", "(", "pin", ")", "reply", "=", "json", ".", "dumps", "(", "{", "\"method\"", ":", "\"digital_read_reply\"", ",", "\"params\"", ":", "[", "pin", ",", "data_val", "]", "}", ")", "await", "self", ".", "websocket", ".", "send", "(", "reply", ")" ]
This method reads and returns the last reported value for a digital pin. Normally not used since digital pin updates will be provided automatically as they occur with the digital_message_reply being sent to the client after set_pin_mode is called.. (see enable_digital_reporting for message format) :param command: {"method": "digital_read", "params": [PIN]} :returns: {"method": "digital_read_reply", "params": [PIN, DIGITAL_DATA_VALUE]}
[ "This", "method", "reads", "and", "returns", "the", "last", "reported", "value", "for", "a", "digital", "pin", ".", "Normally", "not", "used", "since", "digital", "pin", "updates", "will", "be", "provided", "automatically", "as", "they", "occur", "with", "the", "digital_message_reply", "being", "sent", "to", "the", "client", "after", "set_pin_mode", "is", "called", "..", "(", "see", "enable_digital_reporting", "for", "message", "format", ")" ]
python
train
serge-sans-paille/pythran
pythran/analyses/range_values.py
https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/analyses/range_values.py#L306-L324
def visit_IfExp(self, node): """ Use worst case for both possible values. >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse(''' ... def foo(): ... a = 2 or 3 ... b = 4 or 5 ... c = a if a else b''') >>> pm = passmanager.PassManager("test") >>> res = pm.gather(RangeValues, node) >>> res['c'] Interval(low=2, high=5) """ self.visit(node.test) body_res = self.visit(node.body) orelse_res = self.visit(node.orelse) return self.add(node, orelse_res.union(body_res))
[ "def", "visit_IfExp", "(", "self", ",", "node", ")", ":", "self", ".", "visit", "(", "node", ".", "test", ")", "body_res", "=", "self", ".", "visit", "(", "node", ".", "body", ")", "orelse_res", "=", "self", ".", "visit", "(", "node", ".", "orelse", ")", "return", "self", ".", "add", "(", "node", ",", "orelse_res", ".", "union", "(", "body_res", ")", ")" ]
Use worst case for both possible values. >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse(''' ... def foo(): ... a = 2 or 3 ... b = 4 or 5 ... c = a if a else b''') >>> pm = passmanager.PassManager("test") >>> res = pm.gather(RangeValues, node) >>> res['c'] Interval(low=2, high=5)
[ "Use", "worst", "case", "for", "both", "possible", "values", "." ]
python
train
openstack/horizon
openstack_dashboard/dashboards/project/instances/utils.py
https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_dashboard/dashboards/project/instances/utils.py#L143-L168
def keypair_field_data(request, include_empty_option=False): """Returns a list of tuples of all keypairs. Generates a list of keypairs available to the user (request). And returns a list of (id, name) tuples. :param request: django http request object :param include_empty_option: flag to include a empty tuple in the front of the list :return: list of (id, name) tuples """ keypair_list = [] try: keypairs = api.nova.keypair_list(request) keypair_list = [(kp.name, kp.name) for kp in keypairs] except Exception: exceptions.handle(request, _('Unable to retrieve key pairs.')) if not keypair_list: if include_empty_option: return [("", _("No key pairs available")), ] return [] if include_empty_option: return [("", _("Select a key pair")), ] + keypair_list return keypair_list
[ "def", "keypair_field_data", "(", "request", ",", "include_empty_option", "=", "False", ")", ":", "keypair_list", "=", "[", "]", "try", ":", "keypairs", "=", "api", ".", "nova", ".", "keypair_list", "(", "request", ")", "keypair_list", "=", "[", "(", "kp", ".", "name", ",", "kp", ".", "name", ")", "for", "kp", "in", "keypairs", "]", "except", "Exception", ":", "exceptions", ".", "handle", "(", "request", ",", "_", "(", "'Unable to retrieve key pairs.'", ")", ")", "if", "not", "keypair_list", ":", "if", "include_empty_option", ":", "return", "[", "(", "\"\"", ",", "_", "(", "\"No key pairs available\"", ")", ")", ",", "]", "return", "[", "]", "if", "include_empty_option", ":", "return", "[", "(", "\"\"", ",", "_", "(", "\"Select a key pair\"", ")", ")", ",", "]", "+", "keypair_list", "return", "keypair_list" ]
Returns a list of tuples of all keypairs. Generates a list of keypairs available to the user (request). And returns a list of (id, name) tuples. :param request: django http request object :param include_empty_option: flag to include a empty tuple in the front of the list :return: list of (id, name) tuples
[ "Returns", "a", "list", "of", "tuples", "of", "all", "keypairs", "." ]
python
train
google/grr
grr/server/grr_response_server/throttle.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/throttle.py#L48-L82
def _LoadFlows(self, client_id, min_create_time, token): """Yields all flows for the given client_id and time range. Args: client_id: client URN min_create_time: minimum creation time (inclusive) token: acl token Yields: flow_objects.Flow objects """ if data_store.RelationalDBEnabled(): if isinstance(client_id, rdfvalue.RDFURN): client_id = client_id.Basename() flow_list = data_store.REL_DB.ReadAllFlowObjects( client_id=client_id, min_create_time=min_create_time, include_child_flows=False) for flow_obj in flow_list: yield flow_obj else: now = rdfvalue.RDFDatetime.Now() client_id_urn = rdf_client.ClientURN(client_id) flows_dir = aff4.FACTORY.Open(client_id_urn.Add("flows"), token=token) # Save DB roundtrips by checking both conditions at once. flow_list = flows_dir.ListChildren( age=(min_create_time.AsMicrosecondsSinceEpoch(), now.AsMicrosecondsSinceEpoch())) for flow_obj in aff4.FACTORY.MultiOpen(flow_list, token=token): yield rdf_flow_objects.Flow( args=flow_obj.args, flow_class_name=flow_obj.runner_args.flow_name, flow_id=flow_obj.urn.Basename(), create_time=flow_obj.context.create_time, creator=flow_obj.creator, )
[ "def", "_LoadFlows", "(", "self", ",", "client_id", ",", "min_create_time", ",", "token", ")", ":", "if", "data_store", ".", "RelationalDBEnabled", "(", ")", ":", "if", "isinstance", "(", "client_id", ",", "rdfvalue", ".", "RDFURN", ")", ":", "client_id", "=", "client_id", ".", "Basename", "(", ")", "flow_list", "=", "data_store", ".", "REL_DB", ".", "ReadAllFlowObjects", "(", "client_id", "=", "client_id", ",", "min_create_time", "=", "min_create_time", ",", "include_child_flows", "=", "False", ")", "for", "flow_obj", "in", "flow_list", ":", "yield", "flow_obj", "else", ":", "now", "=", "rdfvalue", ".", "RDFDatetime", ".", "Now", "(", ")", "client_id_urn", "=", "rdf_client", ".", "ClientURN", "(", "client_id", ")", "flows_dir", "=", "aff4", ".", "FACTORY", ".", "Open", "(", "client_id_urn", ".", "Add", "(", "\"flows\"", ")", ",", "token", "=", "token", ")", "# Save DB roundtrips by checking both conditions at once.", "flow_list", "=", "flows_dir", ".", "ListChildren", "(", "age", "=", "(", "min_create_time", ".", "AsMicrosecondsSinceEpoch", "(", ")", ",", "now", ".", "AsMicrosecondsSinceEpoch", "(", ")", ")", ")", "for", "flow_obj", "in", "aff4", ".", "FACTORY", ".", "MultiOpen", "(", "flow_list", ",", "token", "=", "token", ")", ":", "yield", "rdf_flow_objects", ".", "Flow", "(", "args", "=", "flow_obj", ".", "args", ",", "flow_class_name", "=", "flow_obj", ".", "runner_args", ".", "flow_name", ",", "flow_id", "=", "flow_obj", ".", "urn", ".", "Basename", "(", ")", ",", "create_time", "=", "flow_obj", ".", "context", ".", "create_time", ",", "creator", "=", "flow_obj", ".", "creator", ",", ")" ]
Yields all flows for the given client_id and time range. Args: client_id: client URN min_create_time: minimum creation time (inclusive) token: acl token Yields: flow_objects.Flow objects
[ "Yields", "all", "flows", "for", "the", "given", "client_id", "and", "time", "range", "." ]
python
train
rosenbrockc/acorn
acorn/logging/decoration.py
https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/acorn/logging/decoration.py#L606-L624
def pre(fqdn, parent, stackdepth, *argl, **argd): """Adds logging for a call to the specified function that is being handled by an external module. Args: fqdn (str): fully-qualified domain name of the function being logged. parent: *object* that the function belongs to. stackdepth (int): maximum stack depth before entries are ignored. argl (list): positional arguments passed to the function call. argd (dict): keyword arguments passed to the function call. """ global _atdepth_call, _cstack_call #We add +1 to stackdepth because this method had to be called in #addition to the wrapper method, so we would be off by 1. pcres = _pre_call(_atdepth_call, parent, fqdn, stackdepth+1, *argl, **argd) entry, _atdepth_call, reduced, bound, ekey = pcres _cstack_call.append(fqdn) return (entry, bound, ekey)
[ "def", "pre", "(", "fqdn", ",", "parent", ",", "stackdepth", ",", "*", "argl", ",", "*", "*", "argd", ")", ":", "global", "_atdepth_call", ",", "_cstack_call", "#We add +1 to stackdepth because this method had to be called in", "#addition to the wrapper method, so we would be off by 1.", "pcres", "=", "_pre_call", "(", "_atdepth_call", ",", "parent", ",", "fqdn", ",", "stackdepth", "+", "1", ",", "*", "argl", ",", "*", "*", "argd", ")", "entry", ",", "_atdepth_call", ",", "reduced", ",", "bound", ",", "ekey", "=", "pcres", "_cstack_call", ".", "append", "(", "fqdn", ")", "return", "(", "entry", ",", "bound", ",", "ekey", ")" ]
Adds logging for a call to the specified function that is being handled by an external module. Args: fqdn (str): fully-qualified domain name of the function being logged. parent: *object* that the function belongs to. stackdepth (int): maximum stack depth before entries are ignored. argl (list): positional arguments passed to the function call. argd (dict): keyword arguments passed to the function call.
[ "Adds", "logging", "for", "a", "call", "to", "the", "specified", "function", "that", "is", "being", "handled", "by", "an", "external", "module", "." ]
python
train
secure-systems-lab/securesystemslib
securesystemslib/formats.py
https://github.com/secure-systems-lab/securesystemslib/blob/beb3109d5bb462e5a60eed88fb40ed1167bd354e/securesystemslib/formats.py#L618-L643
def format_base64(data): """ <Purpose> Return the base64 encoding of 'data' with whitespace and '=' signs omitted. <Arguments> data: Binary or buffer of data to convert. <Exceptions> securesystemslib.exceptions.FormatError, if the base64 encoding fails or the argument is invalid. <Side Effects> None. <Returns> A base64-encoded string. """ try: return binascii.b2a_base64(data).decode('utf-8').rstrip('=\n ') except (TypeError, binascii.Error) as e: raise securesystemslib.exceptions.FormatError('Invalid base64' ' encoding: ' + str(e))
[ "def", "format_base64", "(", "data", ")", ":", "try", ":", "return", "binascii", ".", "b2a_base64", "(", "data", ")", ".", "decode", "(", "'utf-8'", ")", ".", "rstrip", "(", "'=\\n '", ")", "except", "(", "TypeError", ",", "binascii", ".", "Error", ")", "as", "e", ":", "raise", "securesystemslib", ".", "exceptions", ".", "FormatError", "(", "'Invalid base64'", "' encoding: '", "+", "str", "(", "e", ")", ")" ]
<Purpose> Return the base64 encoding of 'data' with whitespace and '=' signs omitted. <Arguments> data: Binary or buffer of data to convert. <Exceptions> securesystemslib.exceptions.FormatError, if the base64 encoding fails or the argument is invalid. <Side Effects> None. <Returns> A base64-encoded string.
[ "<Purpose", ">", "Return", "the", "base64", "encoding", "of", "data", "with", "whitespace", "and", "=", "signs", "omitted", "." ]
python
train
saltstack/salt
salt/modules/x509.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/x509.py#L1109-L1585
def create_certificate( path=None, text=False, overwrite=True, ca_server=None, **kwargs): ''' Create an X509 certificate. path: Path to write the certificate to. text: If ``True``, return the PEM text without writing to a file. Default ``False``. overwrite: If True(default), create_certificate will overwrite the entire pem file. Set False to preserve existing private keys and dh params that may exist in the pem file. kwargs: Any of the properties below can be included as additional keyword arguments. ca_server: Request a remotely signed certificate from ca_server. For this to work, a ``signing_policy`` must be specified, and that same policy must be configured on the ca_server (name or list of ca server). See ``signing_policy`` for details. Also the salt master must permit peers to call the ``sign_remote_certificate`` function. Example: /etc/salt/master.d/peer.conf .. code-block:: yaml peer: .*: - x509.sign_remote_certificate subject properties: Any of the values below can be included to set subject properties Any other subject properties supported by OpenSSL should also work. C: 2 letter Country code CN: Certificate common name, typically the FQDN. Email: Email address GN: Given Name L: Locality O: Organization OU: Organization Unit SN: SurName ST: State or Province signing_private_key: A path or string of the private key in PEM format that will be used to sign this certificate. If neither ``signing_cert``, ``public_key``, or ``csr`` are included, it will be assumed that this is a self-signed certificate, and the public key matching ``signing_private_key`` will be used to create the certificate. signing_private_key_passphrase: Passphrase used to decrypt the signing_private_key. signing_cert: A certificate matching the private key that will be used to sign this certificate. This is used to populate the issuer values in the resulting certificate. Do not include this value for self-signed certificates. public_key: The public key to be included in this certificate. This can be sourced from a public key, certificate, csr or private key. If a private key is used, the matching public key from the private key will be generated before any processing is done. This means you can request a certificate from a remote CA using a private key file as your public_key and only the public key will be sent across the network to the CA. If neither ``public_key`` or ``csr`` are specified, it will be assumed that this is a self-signed certificate, and the public key derived from ``signing_private_key`` will be used. Specify either ``public_key`` or ``csr``, not both. Because you can input a CSR as a public key or as a CSR, it is important to understand the difference. If you import a CSR as a public key, only the public key will be added to the certificate, subject or extension information in the CSR will be lost. public_key_passphrase: If the public key is supplied as a private key, this is the passphrase used to decrypt it. csr: A file or PEM string containing a certificate signing request. This will be used to supply the subject, extensions and public key of a certificate. Any subject or extensions specified explicitly will overwrite any in the CSR. basicConstraints: X509v3 Basic Constraints extension. extensions: The following arguments set X509v3 Extension values. If the value starts with ``critical``, the extension will be marked as critical. Some special extensions are ``subjectKeyIdentifier`` and ``authorityKeyIdentifier``. ``subjectKeyIdentifier`` can be an explicit value or it can be the special string ``hash``. ``hash`` will set the subjectKeyIdentifier equal to the SHA1 hash of the modulus of the public key in this certificate. Note that this is not the exact same hashing method used by OpenSSL when using the hash value. ``authorityKeyIdentifier`` Use values acceptable to the openssl CLI tools. This will automatically populate ``authorityKeyIdentifier`` with the ``subjectKeyIdentifier`` of ``signing_cert``. If this is a self-signed cert these values will be the same. basicConstraints: X509v3 Basic Constraints keyUsage: X509v3 Key Usage extendedKeyUsage: X509v3 Extended Key Usage subjectKeyIdentifier: X509v3 Subject Key Identifier issuerAltName: X509v3 Issuer Alternative Name subjectAltName: X509v3 Subject Alternative Name crlDistributionPoints: X509v3 CRL distribution points issuingDistributionPoint: X509v3 Issuing Distribution Point certificatePolicies: X509v3 Certificate Policies policyConstraints: X509v3 Policy Constraints inhibitAnyPolicy: X509v3 Inhibit Any Policy nameConstraints: X509v3 Name Constraints noCheck: X509v3 OCSP No Check nsComment: Netscape Comment nsCertType: Netscape Certificate Type days_valid: The number of days this certificate should be valid. This sets the ``notAfter`` property of the certificate. Defaults to 365. version: The version of the X509 certificate. Defaults to 3. This is automatically converted to the version value, so ``version=3`` sets the certificate version field to 0x2. serial_number: The serial number to assign to this certificate. If omitted a random serial number of size ``serial_bits`` is generated. serial_bits: The number of bits to use when randomly generating a serial number. Defaults to 64. algorithm: The hashing algorithm to be used for signing this certificate. Defaults to sha256. copypath: An additional path to copy the resulting certificate to. Can be used to maintain a copy of all certificates issued for revocation purposes. prepend_cn: If set to True, the CN and a dash will be prepended to the copypath's filename. Example: /etc/pki/issued_certs/www.example.com-DE:CA:FB:AD:00:00:00:00.crt signing_policy: A signing policy that should be used to create this certificate. Signing policies should be defined in the minion configuration, or in a minion pillar. It should be a yaml formatted list of arguments which will override any arguments passed to this function. If the ``minions`` key is included in the signing policy, only minions matching that pattern (see match.glob and match.compound) will be permitted to remotely request certificates from that policy. Example: .. code-block:: yaml x509_signing_policies: www: - minions: 'www*' - signing_private_key: /etc/pki/ca.key - signing_cert: /etc/pki/ca.crt - C: US - ST: Utah - L: Salt Lake City - basicConstraints: "critical CA:false" - keyUsage: "critical cRLSign, keyCertSign" - subjectKeyIdentifier: hash - authorityKeyIdentifier: keyid,issuer:always - days_valid: 90 - copypath: /etc/pki/issued_certs/ The above signing policy can be invoked with ``signing_policy=www`` ext_mapping: Provide additional X509v3 extension mappings. This argument should be in the form of a dictionary and should include both the OID and the friendly name for the extension. .. versionadded:: Neon CLI Example: .. code-block:: bash salt '*' x509.create_certificate path=/etc/pki/myca.crt signing_private_key='/etc/pki/myca.key' csr='/etc/pki/myca.csr'} ''' if not path and not text and ('testrun' not in kwargs or kwargs['testrun'] is False): raise salt.exceptions.SaltInvocationError( 'Either path or text must be specified.') if path and text: raise salt.exceptions.SaltInvocationError( 'Either path or text must be specified, not both.') if 'public_key_passphrase' not in kwargs: kwargs['public_key_passphrase'] = None if ca_server: if 'signing_policy' not in kwargs: raise salt.exceptions.SaltInvocationError( 'signing_policy must be specified' 'if requesting remote certificate from ca_server {0}.' .format(ca_server)) if 'csr' in kwargs: kwargs['csr'] = get_pem_entry( kwargs['csr'], pem_type='CERTIFICATE REQUEST').replace('\n', '') if 'public_key' in kwargs: # Strip newlines to make passing through as cli functions easier kwargs['public_key'] = salt.utils.stringutils.to_str(get_public_key( kwargs['public_key'], passphrase=kwargs['public_key_passphrase'])).replace('\n', '') # Remove system entries in kwargs # Including listen_in and preqreuired because they are not included # in STATE_INTERNAL_KEYWORDS # for salt 2014.7.2 for ignore in list(_STATE_INTERNAL_KEYWORDS) + ['listen_in', 'preqrequired', '__prerequired__']: kwargs.pop(ignore, None) if not isinstance(ca_server, list): ca_server = [ca_server] random.shuffle(ca_server) for server in ca_server: certs = __salt__['publish.publish']( tgt=server, fun='x509.sign_remote_certificate', arg=six.text_type(kwargs)) if certs is None or not any(certs): continue else: cert_txt = certs[server] break if not any(certs): raise salt.exceptions.SaltInvocationError( 'ca_server did not respond' ' salt master must permit peers to' ' call the sign_remote_certificate function.') if path: return write_pem( text=cert_txt, overwrite=overwrite, path=path, pem_type='CERTIFICATE' ) else: return cert_txt signing_policy = {} if 'signing_policy' in kwargs: signing_policy = _get_signing_policy(kwargs['signing_policy']) if isinstance(signing_policy, list): dict_ = {} for item in signing_policy: dict_.update(item) signing_policy = dict_ # Overwrite any arguments in kwargs with signing_policy kwargs.update(signing_policy) for prop, default in six.iteritems(CERT_DEFAULTS): if prop not in kwargs: kwargs[prop] = default cert = M2Crypto.X509.X509() # X509 Version 3 has a value of 2 in the field. # Version 2 has a value of 1. # https://tools.ietf.org/html/rfc5280#section-4.1.2.1 cert.set_version(kwargs['version'] - 1) # Random serial number if not specified if 'serial_number' not in kwargs: kwargs['serial_number'] = _dec2hex( random.getrandbits(kwargs['serial_bits'])) serial_number = int(kwargs['serial_number'].replace(':', ''), 16) # With Python3 we occasionally end up with an INT that is greater than a C # long max_value. This causes an overflow error due to a bug in M2Crypto. # See issue: https://gitlab.com/m2crypto/m2crypto/issues/232 # Remove this after M2Crypto fixes the bug. if six.PY3: if salt.utils.platform.is_windows(): INT_MAX = 2147483647 if serial_number >= INT_MAX: serial_number -= int(serial_number / INT_MAX) * INT_MAX else: if serial_number >= sys.maxsize: serial_number -= int(serial_number / sys.maxsize) * sys.maxsize cert.set_serial_number(serial_number) # Set validity dates # pylint: disable=no-member not_before = M2Crypto.m2.x509_get_not_before(cert.x509) not_after = M2Crypto.m2.x509_get_not_after(cert.x509) M2Crypto.m2.x509_gmtime_adj(not_before, 0) M2Crypto.m2.x509_gmtime_adj(not_after, 60 * 60 * 24 * kwargs['days_valid']) # pylint: enable=no-member # If neither public_key or csr are included, this cert is self-signed if 'public_key' not in kwargs and 'csr' not in kwargs: kwargs['public_key'] = kwargs['signing_private_key'] if 'signing_private_key_passphrase' in kwargs: kwargs['public_key_passphrase'] = kwargs[ 'signing_private_key_passphrase'] csrexts = {} if 'csr' in kwargs: kwargs['public_key'] = kwargs['csr'] csr = _get_request_obj(kwargs['csr']) cert.set_subject(csr.get_subject()) csrexts = read_csr(kwargs['csr'])['X509v3 Extensions'] cert.set_pubkey(get_public_key(kwargs['public_key'], passphrase=kwargs['public_key_passphrase'], asObj=True)) subject = cert.get_subject() # pylint: disable=unused-variable for entry, num in six.iteritems(subject.nid): if entry in kwargs: setattr(subject, entry, kwargs[entry]) # pylint: enable=unused-variable if 'signing_cert' in kwargs: signing_cert = _get_certificate_obj(kwargs['signing_cert']) else: signing_cert = cert cert.set_issuer(signing_cert.get_subject()) if 'ext_mapping' in kwargs: EXT_NAME_MAPPINGS.update(kwargs['ext_mapping']) for extname, extlongname in six.iteritems(EXT_NAME_MAPPINGS): if (extname in kwargs or extlongname in kwargs or extname in csrexts or extlongname in csrexts) is False: continue # Use explicitly set values first, fall back to CSR values. extval = kwargs.get(extname) or kwargs.get(extlongname) or csrexts.get(extname) or csrexts.get(extlongname) critical = False if extval.startswith('critical '): critical = True extval = extval[9:] if extname == 'subjectKeyIdentifier' and 'hash' in extval: extval = extval.replace('hash', _get_pubkey_hash(cert)) issuer = None if extname == 'authorityKeyIdentifier': issuer = signing_cert if extname == 'subjectAltName': extval = extval.replace('IP Address', 'IP') ext = _new_extension( name=extname, value=extval, critical=critical, issuer=issuer) if not ext.x509_ext: log.info('Invalid X509v3 Extension. %s: %s', extname, extval) continue cert.add_ext(ext) if 'signing_private_key_passphrase' not in kwargs: kwargs['signing_private_key_passphrase'] = None if 'testrun' in kwargs and kwargs['testrun'] is True: cert_props = read_certificate(cert) cert_props['Issuer Public Key'] = get_public_key( kwargs['signing_private_key'], passphrase=kwargs['signing_private_key_passphrase']) return cert_props if not verify_private_key(private_key=kwargs['signing_private_key'], passphrase=kwargs[ 'signing_private_key_passphrase'], public_key=signing_cert): raise salt.exceptions.SaltInvocationError( 'signing_private_key: {0} ' 'does no match signing_cert: {1}'.format( kwargs['signing_private_key'], kwargs.get('signing_cert', '') ) ) cert.sign( _get_private_key_obj(kwargs['signing_private_key'], passphrase=kwargs['signing_private_key_passphrase']), kwargs['algorithm'] ) if not verify_signature(cert, signing_pub_key=signing_cert): raise salt.exceptions.SaltInvocationError( 'failed to verify certificate signature') if 'copypath' in kwargs: if 'prepend_cn' in kwargs and kwargs['prepend_cn'] is True: prepend = six.text_type(kwargs['CN']) + '-' else: prepend = '' write_pem(text=cert.as_pem(), path=os.path.join(kwargs['copypath'], prepend + kwargs['serial_number'] + '.crt'), pem_type='CERTIFICATE') if path: return write_pem( text=cert.as_pem(), overwrite=overwrite, path=path, pem_type='CERTIFICATE' ) else: return salt.utils.stringutils.to_str(cert.as_pem())
[ "def", "create_certificate", "(", "path", "=", "None", ",", "text", "=", "False", ",", "overwrite", "=", "True", ",", "ca_server", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "not", "path", "and", "not", "text", "and", "(", "'testrun'", "not", "in", "kwargs", "or", "kwargs", "[", "'testrun'", "]", "is", "False", ")", ":", "raise", "salt", ".", "exceptions", ".", "SaltInvocationError", "(", "'Either path or text must be specified.'", ")", "if", "path", "and", "text", ":", "raise", "salt", ".", "exceptions", ".", "SaltInvocationError", "(", "'Either path or text must be specified, not both.'", ")", "if", "'public_key_passphrase'", "not", "in", "kwargs", ":", "kwargs", "[", "'public_key_passphrase'", "]", "=", "None", "if", "ca_server", ":", "if", "'signing_policy'", "not", "in", "kwargs", ":", "raise", "salt", ".", "exceptions", ".", "SaltInvocationError", "(", "'signing_policy must be specified'", "'if requesting remote certificate from ca_server {0}.'", ".", "format", "(", "ca_server", ")", ")", "if", "'csr'", "in", "kwargs", ":", "kwargs", "[", "'csr'", "]", "=", "get_pem_entry", "(", "kwargs", "[", "'csr'", "]", ",", "pem_type", "=", "'CERTIFICATE REQUEST'", ")", ".", "replace", "(", "'\\n'", ",", "''", ")", "if", "'public_key'", "in", "kwargs", ":", "# Strip newlines to make passing through as cli functions easier", "kwargs", "[", "'public_key'", "]", "=", "salt", ".", "utils", ".", "stringutils", ".", "to_str", "(", "get_public_key", "(", "kwargs", "[", "'public_key'", "]", ",", "passphrase", "=", "kwargs", "[", "'public_key_passphrase'", "]", ")", ")", ".", "replace", "(", "'\\n'", ",", "''", ")", "# Remove system entries in kwargs", "# Including listen_in and preqreuired because they are not included", "# in STATE_INTERNAL_KEYWORDS", "# for salt 2014.7.2", "for", "ignore", "in", "list", "(", "_STATE_INTERNAL_KEYWORDS", ")", "+", "[", "'listen_in'", ",", "'preqrequired'", ",", "'__prerequired__'", "]", ":", "kwargs", ".", "pop", "(", "ignore", ",", "None", ")", "if", "not", "isinstance", "(", "ca_server", ",", "list", ")", ":", "ca_server", "=", "[", "ca_server", "]", "random", ".", "shuffle", "(", "ca_server", ")", "for", "server", "in", "ca_server", ":", "certs", "=", "__salt__", "[", "'publish.publish'", "]", "(", "tgt", "=", "server", ",", "fun", "=", "'x509.sign_remote_certificate'", ",", "arg", "=", "six", ".", "text_type", "(", "kwargs", ")", ")", "if", "certs", "is", "None", "or", "not", "any", "(", "certs", ")", ":", "continue", "else", ":", "cert_txt", "=", "certs", "[", "server", "]", "break", "if", "not", "any", "(", "certs", ")", ":", "raise", "salt", ".", "exceptions", ".", "SaltInvocationError", "(", "'ca_server did not respond'", "' salt master must permit peers to'", "' call the sign_remote_certificate function.'", ")", "if", "path", ":", "return", "write_pem", "(", "text", "=", "cert_txt", ",", "overwrite", "=", "overwrite", ",", "path", "=", "path", ",", "pem_type", "=", "'CERTIFICATE'", ")", "else", ":", "return", "cert_txt", "signing_policy", "=", "{", "}", "if", "'signing_policy'", "in", "kwargs", ":", "signing_policy", "=", "_get_signing_policy", "(", "kwargs", "[", "'signing_policy'", "]", ")", "if", "isinstance", "(", "signing_policy", ",", "list", ")", ":", "dict_", "=", "{", "}", "for", "item", "in", "signing_policy", ":", "dict_", ".", "update", "(", "item", ")", "signing_policy", "=", "dict_", "# Overwrite any arguments in kwargs with signing_policy", "kwargs", ".", "update", "(", "signing_policy", ")", "for", "prop", ",", "default", "in", "six", ".", "iteritems", "(", "CERT_DEFAULTS", ")", ":", "if", "prop", "not", "in", "kwargs", ":", "kwargs", "[", "prop", "]", "=", "default", "cert", "=", "M2Crypto", ".", "X509", ".", "X509", "(", ")", "# X509 Version 3 has a value of 2 in the field.", "# Version 2 has a value of 1.", "# https://tools.ietf.org/html/rfc5280#section-4.1.2.1", "cert", ".", "set_version", "(", "kwargs", "[", "'version'", "]", "-", "1", ")", "# Random serial number if not specified", "if", "'serial_number'", "not", "in", "kwargs", ":", "kwargs", "[", "'serial_number'", "]", "=", "_dec2hex", "(", "random", ".", "getrandbits", "(", "kwargs", "[", "'serial_bits'", "]", ")", ")", "serial_number", "=", "int", "(", "kwargs", "[", "'serial_number'", "]", ".", "replace", "(", "':'", ",", "''", ")", ",", "16", ")", "# With Python3 we occasionally end up with an INT that is greater than a C", "# long max_value. This causes an overflow error due to a bug in M2Crypto.", "# See issue: https://gitlab.com/m2crypto/m2crypto/issues/232", "# Remove this after M2Crypto fixes the bug.", "if", "six", ".", "PY3", ":", "if", "salt", ".", "utils", ".", "platform", ".", "is_windows", "(", ")", ":", "INT_MAX", "=", "2147483647", "if", "serial_number", ">=", "INT_MAX", ":", "serial_number", "-=", "int", "(", "serial_number", "/", "INT_MAX", ")", "*", "INT_MAX", "else", ":", "if", "serial_number", ">=", "sys", ".", "maxsize", ":", "serial_number", "-=", "int", "(", "serial_number", "/", "sys", ".", "maxsize", ")", "*", "sys", ".", "maxsize", "cert", ".", "set_serial_number", "(", "serial_number", ")", "# Set validity dates", "# pylint: disable=no-member", "not_before", "=", "M2Crypto", ".", "m2", ".", "x509_get_not_before", "(", "cert", ".", "x509", ")", "not_after", "=", "M2Crypto", ".", "m2", ".", "x509_get_not_after", "(", "cert", ".", "x509", ")", "M2Crypto", ".", "m2", ".", "x509_gmtime_adj", "(", "not_before", ",", "0", ")", "M2Crypto", ".", "m2", ".", "x509_gmtime_adj", "(", "not_after", ",", "60", "*", "60", "*", "24", "*", "kwargs", "[", "'days_valid'", "]", ")", "# pylint: enable=no-member", "# If neither public_key or csr are included, this cert is self-signed", "if", "'public_key'", "not", "in", "kwargs", "and", "'csr'", "not", "in", "kwargs", ":", "kwargs", "[", "'public_key'", "]", "=", "kwargs", "[", "'signing_private_key'", "]", "if", "'signing_private_key_passphrase'", "in", "kwargs", ":", "kwargs", "[", "'public_key_passphrase'", "]", "=", "kwargs", "[", "'signing_private_key_passphrase'", "]", "csrexts", "=", "{", "}", "if", "'csr'", "in", "kwargs", ":", "kwargs", "[", "'public_key'", "]", "=", "kwargs", "[", "'csr'", "]", "csr", "=", "_get_request_obj", "(", "kwargs", "[", "'csr'", "]", ")", "cert", ".", "set_subject", "(", "csr", ".", "get_subject", "(", ")", ")", "csrexts", "=", "read_csr", "(", "kwargs", "[", "'csr'", "]", ")", "[", "'X509v3 Extensions'", "]", "cert", ".", "set_pubkey", "(", "get_public_key", "(", "kwargs", "[", "'public_key'", "]", ",", "passphrase", "=", "kwargs", "[", "'public_key_passphrase'", "]", ",", "asObj", "=", "True", ")", ")", "subject", "=", "cert", ".", "get_subject", "(", ")", "# pylint: disable=unused-variable", "for", "entry", ",", "num", "in", "six", ".", "iteritems", "(", "subject", ".", "nid", ")", ":", "if", "entry", "in", "kwargs", ":", "setattr", "(", "subject", ",", "entry", ",", "kwargs", "[", "entry", "]", ")", "# pylint: enable=unused-variable", "if", "'signing_cert'", "in", "kwargs", ":", "signing_cert", "=", "_get_certificate_obj", "(", "kwargs", "[", "'signing_cert'", "]", ")", "else", ":", "signing_cert", "=", "cert", "cert", ".", "set_issuer", "(", "signing_cert", ".", "get_subject", "(", ")", ")", "if", "'ext_mapping'", "in", "kwargs", ":", "EXT_NAME_MAPPINGS", ".", "update", "(", "kwargs", "[", "'ext_mapping'", "]", ")", "for", "extname", ",", "extlongname", "in", "six", ".", "iteritems", "(", "EXT_NAME_MAPPINGS", ")", ":", "if", "(", "extname", "in", "kwargs", "or", "extlongname", "in", "kwargs", "or", "extname", "in", "csrexts", "or", "extlongname", "in", "csrexts", ")", "is", "False", ":", "continue", "# Use explicitly set values first, fall back to CSR values.", "extval", "=", "kwargs", ".", "get", "(", "extname", ")", "or", "kwargs", ".", "get", "(", "extlongname", ")", "or", "csrexts", ".", "get", "(", "extname", ")", "or", "csrexts", ".", "get", "(", "extlongname", ")", "critical", "=", "False", "if", "extval", ".", "startswith", "(", "'critical '", ")", ":", "critical", "=", "True", "extval", "=", "extval", "[", "9", ":", "]", "if", "extname", "==", "'subjectKeyIdentifier'", "and", "'hash'", "in", "extval", ":", "extval", "=", "extval", ".", "replace", "(", "'hash'", ",", "_get_pubkey_hash", "(", "cert", ")", ")", "issuer", "=", "None", "if", "extname", "==", "'authorityKeyIdentifier'", ":", "issuer", "=", "signing_cert", "if", "extname", "==", "'subjectAltName'", ":", "extval", "=", "extval", ".", "replace", "(", "'IP Address'", ",", "'IP'", ")", "ext", "=", "_new_extension", "(", "name", "=", "extname", ",", "value", "=", "extval", ",", "critical", "=", "critical", ",", "issuer", "=", "issuer", ")", "if", "not", "ext", ".", "x509_ext", ":", "log", ".", "info", "(", "'Invalid X509v3 Extension. %s: %s'", ",", "extname", ",", "extval", ")", "continue", "cert", ".", "add_ext", "(", "ext", ")", "if", "'signing_private_key_passphrase'", "not", "in", "kwargs", ":", "kwargs", "[", "'signing_private_key_passphrase'", "]", "=", "None", "if", "'testrun'", "in", "kwargs", "and", "kwargs", "[", "'testrun'", "]", "is", "True", ":", "cert_props", "=", "read_certificate", "(", "cert", ")", "cert_props", "[", "'Issuer Public Key'", "]", "=", "get_public_key", "(", "kwargs", "[", "'signing_private_key'", "]", ",", "passphrase", "=", "kwargs", "[", "'signing_private_key_passphrase'", "]", ")", "return", "cert_props", "if", "not", "verify_private_key", "(", "private_key", "=", "kwargs", "[", "'signing_private_key'", "]", ",", "passphrase", "=", "kwargs", "[", "'signing_private_key_passphrase'", "]", ",", "public_key", "=", "signing_cert", ")", ":", "raise", "salt", ".", "exceptions", ".", "SaltInvocationError", "(", "'signing_private_key: {0} '", "'does no match signing_cert: {1}'", ".", "format", "(", "kwargs", "[", "'signing_private_key'", "]", ",", "kwargs", ".", "get", "(", "'signing_cert'", ",", "''", ")", ")", ")", "cert", ".", "sign", "(", "_get_private_key_obj", "(", "kwargs", "[", "'signing_private_key'", "]", ",", "passphrase", "=", "kwargs", "[", "'signing_private_key_passphrase'", "]", ")", ",", "kwargs", "[", "'algorithm'", "]", ")", "if", "not", "verify_signature", "(", "cert", ",", "signing_pub_key", "=", "signing_cert", ")", ":", "raise", "salt", ".", "exceptions", ".", "SaltInvocationError", "(", "'failed to verify certificate signature'", ")", "if", "'copypath'", "in", "kwargs", ":", "if", "'prepend_cn'", "in", "kwargs", "and", "kwargs", "[", "'prepend_cn'", "]", "is", "True", ":", "prepend", "=", "six", ".", "text_type", "(", "kwargs", "[", "'CN'", "]", ")", "+", "'-'", "else", ":", "prepend", "=", "''", "write_pem", "(", "text", "=", "cert", ".", "as_pem", "(", ")", ",", "path", "=", "os", ".", "path", ".", "join", "(", "kwargs", "[", "'copypath'", "]", ",", "prepend", "+", "kwargs", "[", "'serial_number'", "]", "+", "'.crt'", ")", ",", "pem_type", "=", "'CERTIFICATE'", ")", "if", "path", ":", "return", "write_pem", "(", "text", "=", "cert", ".", "as_pem", "(", ")", ",", "overwrite", "=", "overwrite", ",", "path", "=", "path", ",", "pem_type", "=", "'CERTIFICATE'", ")", "else", ":", "return", "salt", ".", "utils", ".", "stringutils", ".", "to_str", "(", "cert", ".", "as_pem", "(", ")", ")" ]
Create an X509 certificate. path: Path to write the certificate to. text: If ``True``, return the PEM text without writing to a file. Default ``False``. overwrite: If True(default), create_certificate will overwrite the entire pem file. Set False to preserve existing private keys and dh params that may exist in the pem file. kwargs: Any of the properties below can be included as additional keyword arguments. ca_server: Request a remotely signed certificate from ca_server. For this to work, a ``signing_policy`` must be specified, and that same policy must be configured on the ca_server (name or list of ca server). See ``signing_policy`` for details. Also the salt master must permit peers to call the ``sign_remote_certificate`` function. Example: /etc/salt/master.d/peer.conf .. code-block:: yaml peer: .*: - x509.sign_remote_certificate subject properties: Any of the values below can be included to set subject properties Any other subject properties supported by OpenSSL should also work. C: 2 letter Country code CN: Certificate common name, typically the FQDN. Email: Email address GN: Given Name L: Locality O: Organization OU: Organization Unit SN: SurName ST: State or Province signing_private_key: A path or string of the private key in PEM format that will be used to sign this certificate. If neither ``signing_cert``, ``public_key``, or ``csr`` are included, it will be assumed that this is a self-signed certificate, and the public key matching ``signing_private_key`` will be used to create the certificate. signing_private_key_passphrase: Passphrase used to decrypt the signing_private_key. signing_cert: A certificate matching the private key that will be used to sign this certificate. This is used to populate the issuer values in the resulting certificate. Do not include this value for self-signed certificates. public_key: The public key to be included in this certificate. This can be sourced from a public key, certificate, csr or private key. If a private key is used, the matching public key from the private key will be generated before any processing is done. This means you can request a certificate from a remote CA using a private key file as your public_key and only the public key will be sent across the network to the CA. If neither ``public_key`` or ``csr`` are specified, it will be assumed that this is a self-signed certificate, and the public key derived from ``signing_private_key`` will be used. Specify either ``public_key`` or ``csr``, not both. Because you can input a CSR as a public key or as a CSR, it is important to understand the difference. If you import a CSR as a public key, only the public key will be added to the certificate, subject or extension information in the CSR will be lost. public_key_passphrase: If the public key is supplied as a private key, this is the passphrase used to decrypt it. csr: A file or PEM string containing a certificate signing request. This will be used to supply the subject, extensions and public key of a certificate. Any subject or extensions specified explicitly will overwrite any in the CSR. basicConstraints: X509v3 Basic Constraints extension. extensions: The following arguments set X509v3 Extension values. If the value starts with ``critical``, the extension will be marked as critical. Some special extensions are ``subjectKeyIdentifier`` and ``authorityKeyIdentifier``. ``subjectKeyIdentifier`` can be an explicit value or it can be the special string ``hash``. ``hash`` will set the subjectKeyIdentifier equal to the SHA1 hash of the modulus of the public key in this certificate. Note that this is not the exact same hashing method used by OpenSSL when using the hash value. ``authorityKeyIdentifier`` Use values acceptable to the openssl CLI tools. This will automatically populate ``authorityKeyIdentifier`` with the ``subjectKeyIdentifier`` of ``signing_cert``. If this is a self-signed cert these values will be the same. basicConstraints: X509v3 Basic Constraints keyUsage: X509v3 Key Usage extendedKeyUsage: X509v3 Extended Key Usage subjectKeyIdentifier: X509v3 Subject Key Identifier issuerAltName: X509v3 Issuer Alternative Name subjectAltName: X509v3 Subject Alternative Name crlDistributionPoints: X509v3 CRL distribution points issuingDistributionPoint: X509v3 Issuing Distribution Point certificatePolicies: X509v3 Certificate Policies policyConstraints: X509v3 Policy Constraints inhibitAnyPolicy: X509v3 Inhibit Any Policy nameConstraints: X509v3 Name Constraints noCheck: X509v3 OCSP No Check nsComment: Netscape Comment nsCertType: Netscape Certificate Type days_valid: The number of days this certificate should be valid. This sets the ``notAfter`` property of the certificate. Defaults to 365. version: The version of the X509 certificate. Defaults to 3. This is automatically converted to the version value, so ``version=3`` sets the certificate version field to 0x2. serial_number: The serial number to assign to this certificate. If omitted a random serial number of size ``serial_bits`` is generated. serial_bits: The number of bits to use when randomly generating a serial number. Defaults to 64. algorithm: The hashing algorithm to be used for signing this certificate. Defaults to sha256. copypath: An additional path to copy the resulting certificate to. Can be used to maintain a copy of all certificates issued for revocation purposes. prepend_cn: If set to True, the CN and a dash will be prepended to the copypath's filename. Example: /etc/pki/issued_certs/www.example.com-DE:CA:FB:AD:00:00:00:00.crt signing_policy: A signing policy that should be used to create this certificate. Signing policies should be defined in the minion configuration, or in a minion pillar. It should be a yaml formatted list of arguments which will override any arguments passed to this function. If the ``minions`` key is included in the signing policy, only minions matching that pattern (see match.glob and match.compound) will be permitted to remotely request certificates from that policy. Example: .. code-block:: yaml x509_signing_policies: www: - minions: 'www*' - signing_private_key: /etc/pki/ca.key - signing_cert: /etc/pki/ca.crt - C: US - ST: Utah - L: Salt Lake City - basicConstraints: "critical CA:false" - keyUsage: "critical cRLSign, keyCertSign" - subjectKeyIdentifier: hash - authorityKeyIdentifier: keyid,issuer:always - days_valid: 90 - copypath: /etc/pki/issued_certs/ The above signing policy can be invoked with ``signing_policy=www`` ext_mapping: Provide additional X509v3 extension mappings. This argument should be in the form of a dictionary and should include both the OID and the friendly name for the extension. .. versionadded:: Neon CLI Example: .. code-block:: bash salt '*' x509.create_certificate path=/etc/pki/myca.crt signing_private_key='/etc/pki/myca.key' csr='/etc/pki/myca.csr'}
[ "Create", "an", "X509", "certificate", "." ]
python
train
tevino/mongu
mongu.py
https://github.com/tevino/mongu/blob/98f15cdb9e5906062f2d5088c7bf774ab007c6e5/mongu.py#L34-L46
def register_model(self, model_cls): """Decorator for registering model.""" if not getattr(model_cls, '_database_'): raise ModelAttributeError('_database_ missing ' 'on %s!' % model_cls.__name__) if not getattr(model_cls, '_collection_'): raise ModelAttributeError('_collection_ missing ' 'on %s!' % model_cls.__name__) model_cls._mongo_client_ = self.client logging.info('Registering Model ' + model_cls.__name__) return model_cls
[ "def", "register_model", "(", "self", ",", "model_cls", ")", ":", "if", "not", "getattr", "(", "model_cls", ",", "'_database_'", ")", ":", "raise", "ModelAttributeError", "(", "'_database_ missing '", "'on %s!'", "%", "model_cls", ".", "__name__", ")", "if", "not", "getattr", "(", "model_cls", ",", "'_collection_'", ")", ":", "raise", "ModelAttributeError", "(", "'_collection_ missing '", "'on %s!'", "%", "model_cls", ".", "__name__", ")", "model_cls", ".", "_mongo_client_", "=", "self", ".", "client", "logging", ".", "info", "(", "'Registering Model '", "+", "model_cls", ".", "__name__", ")", "return", "model_cls" ]
Decorator for registering model.
[ "Decorator", "for", "registering", "model", "." ]
python
train
sernst/cauldron
cauldron/session/projects/project.py
https://github.com/sernst/cauldron/blob/4086aec9c038c402ea212c79fe8bd0d27104f9cf/cauldron/session/projects/project.py#L81-L107
def library_directories(self) -> typing.List[str]: """ The list of directories to all of the library locations """ def listify(value): return [value] if isinstance(value, str) else list(value) # If this is a project running remotely remove external library # folders as the remote shared libraries folder will contain all # of the necessary dependencies is_local_project = not self.is_remote_project folders = [ f for f in listify(self.settings.fetch('library_folders', ['libs'])) if is_local_project or not f.startswith('..') ] # Include the remote shared library folder as well folders.append('../__cauldron_shared_libs') # Include the project directory as well folders.append(self.source_directory) return [ environ.paths.clean(os.path.join(self.source_directory, folder)) for folder in folders ]
[ "def", "library_directories", "(", "self", ")", "->", "typing", ".", "List", "[", "str", "]", ":", "def", "listify", "(", "value", ")", ":", "return", "[", "value", "]", "if", "isinstance", "(", "value", ",", "str", ")", "else", "list", "(", "value", ")", "# If this is a project running remotely remove external library", "# folders as the remote shared libraries folder will contain all", "# of the necessary dependencies", "is_local_project", "=", "not", "self", ".", "is_remote_project", "folders", "=", "[", "f", "for", "f", "in", "listify", "(", "self", ".", "settings", ".", "fetch", "(", "'library_folders'", ",", "[", "'libs'", "]", ")", ")", "if", "is_local_project", "or", "not", "f", ".", "startswith", "(", "'..'", ")", "]", "# Include the remote shared library folder as well", "folders", ".", "append", "(", "'../__cauldron_shared_libs'", ")", "# Include the project directory as well", "folders", ".", "append", "(", "self", ".", "source_directory", ")", "return", "[", "environ", ".", "paths", ".", "clean", "(", "os", ".", "path", ".", "join", "(", "self", ".", "source_directory", ",", "folder", ")", ")", "for", "folder", "in", "folders", "]" ]
The list of directories to all of the library locations
[ "The", "list", "of", "directories", "to", "all", "of", "the", "library", "locations" ]
python
train
mongodb/mongo-python-driver
pymongo/collection.py
https://github.com/mongodb/mongo-python-driver/blob/c29c21449e3aae74154207058cf85fd94018d4cd/pymongo/collection.py#L1847-L1875
def __create_index(self, keys, index_options, session, **kwargs): """Internal create index helper. :Parameters: - `keys`: a list of tuples [(key, type), (key, type), ...] - `index_options`: a dict of index options. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. """ index_doc = helpers._index_document(keys) index = {"key": index_doc} collation = validate_collation_or_none( index_options.pop('collation', None)) index.update(index_options) with self._socket_for_writes(session) as sock_info: if collation is not None: if sock_info.max_wire_version < 5: raise ConfigurationError( 'Must be connected to MongoDB 3.4+ to use collations.') else: index['collation'] = collation cmd = SON([('createIndexes', self.name), ('indexes', [index])]) cmd.update(kwargs) self._command( sock_info, cmd, read_preference=ReadPreference.PRIMARY, codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, write_concern=self._write_concern_for(session), session=session)
[ "def", "__create_index", "(", "self", ",", "keys", ",", "index_options", ",", "session", ",", "*", "*", "kwargs", ")", ":", "index_doc", "=", "helpers", ".", "_index_document", "(", "keys", ")", "index", "=", "{", "\"key\"", ":", "index_doc", "}", "collation", "=", "validate_collation_or_none", "(", "index_options", ".", "pop", "(", "'collation'", ",", "None", ")", ")", "index", ".", "update", "(", "index_options", ")", "with", "self", ".", "_socket_for_writes", "(", "session", ")", "as", "sock_info", ":", "if", "collation", "is", "not", "None", ":", "if", "sock_info", ".", "max_wire_version", "<", "5", ":", "raise", "ConfigurationError", "(", "'Must be connected to MongoDB 3.4+ to use collations.'", ")", "else", ":", "index", "[", "'collation'", "]", "=", "collation", "cmd", "=", "SON", "(", "[", "(", "'createIndexes'", ",", "self", ".", "name", ")", ",", "(", "'indexes'", ",", "[", "index", "]", ")", "]", ")", "cmd", ".", "update", "(", "kwargs", ")", "self", ".", "_command", "(", "sock_info", ",", "cmd", ",", "read_preference", "=", "ReadPreference", ".", "PRIMARY", ",", "codec_options", "=", "_UNICODE_REPLACE_CODEC_OPTIONS", ",", "write_concern", "=", "self", ".", "_write_concern_for", "(", "session", ")", ",", "session", "=", "session", ")" ]
Internal create index helper. :Parameters: - `keys`: a list of tuples [(key, type), (key, type), ...] - `index_options`: a dict of index options. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`.
[ "Internal", "create", "index", "helper", "." ]
python
train
PMBio/limix-backup
limix/deprecated/core.py
https://github.com/PMBio/limix-backup/blob/1e201fdb5c694d0d5506f207f3de65d8ef66146c/limix/deprecated/core.py#L2799-L2832
def check_covariance_Kgrad_x(covar, relchange=1E-5, threshold=1E-2, check_diag=True): """ check_covariance_Kgrad_x(ACovarianceFunction covar, limix::mfloat_t relchange=1E-5, limix::mfloat_t threshold=1E-2, bool check_diag=True) -> bool Parameters ---------- covar: limix::ACovarianceFunction & relchange: limix::mfloat_t threshold: limix::mfloat_t check_diag: bool check_covariance_Kgrad_x(ACovarianceFunction covar, limix::mfloat_t relchange=1E-5, limix::mfloat_t threshold=1E-2) -> bool Parameters ---------- covar: limix::ACovarianceFunction & relchange: limix::mfloat_t threshold: limix::mfloat_t check_covariance_Kgrad_x(ACovarianceFunction covar, limix::mfloat_t relchange=1E-5) -> bool Parameters ---------- covar: limix::ACovarianceFunction & relchange: limix::mfloat_t check_covariance_Kgrad_x(ACovarianceFunction covar) -> bool Parameters ---------- covar: limix::ACovarianceFunction & """ return _core.ACovarianceFunction_check_covariance_Kgrad_x(covar, relchange, threshold, check_diag)
[ "def", "check_covariance_Kgrad_x", "(", "covar", ",", "relchange", "=", "1E-5", ",", "threshold", "=", "1E-2", ",", "check_diag", "=", "True", ")", ":", "return", "_core", ".", "ACovarianceFunction_check_covariance_Kgrad_x", "(", "covar", ",", "relchange", ",", "threshold", ",", "check_diag", ")" ]
check_covariance_Kgrad_x(ACovarianceFunction covar, limix::mfloat_t relchange=1E-5, limix::mfloat_t threshold=1E-2, bool check_diag=True) -> bool Parameters ---------- covar: limix::ACovarianceFunction & relchange: limix::mfloat_t threshold: limix::mfloat_t check_diag: bool check_covariance_Kgrad_x(ACovarianceFunction covar, limix::mfloat_t relchange=1E-5, limix::mfloat_t threshold=1E-2) -> bool Parameters ---------- covar: limix::ACovarianceFunction & relchange: limix::mfloat_t threshold: limix::mfloat_t check_covariance_Kgrad_x(ACovarianceFunction covar, limix::mfloat_t relchange=1E-5) -> bool Parameters ---------- covar: limix::ACovarianceFunction & relchange: limix::mfloat_t check_covariance_Kgrad_x(ACovarianceFunction covar) -> bool Parameters ---------- covar: limix::ACovarianceFunction &
[ "check_covariance_Kgrad_x", "(", "ACovarianceFunction", "covar", "limix", "::", "mfloat_t", "relchange", "=", "1E", "-", "5", "limix", "::", "mfloat_t", "threshold", "=", "1E", "-", "2", "bool", "check_diag", "=", "True", ")", "-", ">", "bool" ]
python
train
sibirrer/lenstronomy
lenstronomy/Util/simulation_util.py
https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/Util/simulation_util.py#L10-L33
def data_configure_simple(numPix, deltaPix, exposure_time=1, sigma_bkg=1, inverse=False): """ configures the data keyword arguments with a coordinate grid centered at zero. :param numPix: number of pixel (numPix x numPix) :param deltaPix: pixel size (in angular units) :param exposure_time: exposure time :param sigma_bkg: background noise (Gaussian sigma) :param inverse: if True, coordinate system is ra to the left, if False, to the right :return: keyword arguments that can be used to construct a Data() class instance of lenstronomy """ mean = 0. # background mean flux (default zero) # 1d list of coordinates (x,y) of a numPix x numPix square grid, centered to zero x_grid, y_grid, ra_at_xy_0, dec_at_xy_0, x_at_radec_0, y_at_radec_0, Mpix2coord, Mcoord2pix = util.make_grid_with_coordtransform(numPix=numPix, deltapix=deltaPix, subgrid_res=1, inverse=inverse) # mask (1= model this pixel, 0= leave blanck) exposure_map = np.ones((numPix, numPix)) * exposure_time # individual exposure time/weight per pixel kwargs_data = { 'background_rms': sigma_bkg, 'exposure_map': exposure_map , 'ra_at_xy_0': ra_at_xy_0, 'dec_at_xy_0': dec_at_xy_0, 'transform_pix2angle': Mpix2coord , 'image_data': np.zeros((numPix, numPix)) } return kwargs_data
[ "def", "data_configure_simple", "(", "numPix", ",", "deltaPix", ",", "exposure_time", "=", "1", ",", "sigma_bkg", "=", "1", ",", "inverse", "=", "False", ")", ":", "mean", "=", "0.", "# background mean flux (default zero)", "# 1d list of coordinates (x,y) of a numPix x numPix square grid, centered to zero", "x_grid", ",", "y_grid", ",", "ra_at_xy_0", ",", "dec_at_xy_0", ",", "x_at_radec_0", ",", "y_at_radec_0", ",", "Mpix2coord", ",", "Mcoord2pix", "=", "util", ".", "make_grid_with_coordtransform", "(", "numPix", "=", "numPix", ",", "deltapix", "=", "deltaPix", ",", "subgrid_res", "=", "1", ",", "inverse", "=", "inverse", ")", "# mask (1= model this pixel, 0= leave blanck)", "exposure_map", "=", "np", ".", "ones", "(", "(", "numPix", ",", "numPix", ")", ")", "*", "exposure_time", "# individual exposure time/weight per pixel", "kwargs_data", "=", "{", "'background_rms'", ":", "sigma_bkg", ",", "'exposure_map'", ":", "exposure_map", ",", "'ra_at_xy_0'", ":", "ra_at_xy_0", ",", "'dec_at_xy_0'", ":", "dec_at_xy_0", ",", "'transform_pix2angle'", ":", "Mpix2coord", ",", "'image_data'", ":", "np", ".", "zeros", "(", "(", "numPix", ",", "numPix", ")", ")", "}", "return", "kwargs_data" ]
configures the data keyword arguments with a coordinate grid centered at zero. :param numPix: number of pixel (numPix x numPix) :param deltaPix: pixel size (in angular units) :param exposure_time: exposure time :param sigma_bkg: background noise (Gaussian sigma) :param inverse: if True, coordinate system is ra to the left, if False, to the right :return: keyword arguments that can be used to construct a Data() class instance of lenstronomy
[ "configures", "the", "data", "keyword", "arguments", "with", "a", "coordinate", "grid", "centered", "at", "zero", "." ]
python
train
geographika/mappyfile
mappyfile/transformer.py
https://github.com/geographika/mappyfile/blob/aecbc5e66ec06896bc4c5db41313503468829d00/mappyfile/transformer.py#L293-L316
def check_composite_tokens(self, name, tokens): """ Return the key and contents of a KEY..END block for PATTERN, POINTS, and PROJECTION """ assert len(tokens) >= 2 key = tokens[0] assert key.value.lower() == name assert tokens[-1].value.lower() == "end" if len(tokens) == 2: body = [] # empty TYPE..END block else: body = tokens[1:-1] body_tokens = [] for t in body: if isinstance(t, dict): body_tokens.append(t["__tokens__"]) else: body_tokens.append(t) return key, body_tokens
[ "def", "check_composite_tokens", "(", "self", ",", "name", ",", "tokens", ")", ":", "assert", "len", "(", "tokens", ")", ">=", "2", "key", "=", "tokens", "[", "0", "]", "assert", "key", ".", "value", ".", "lower", "(", ")", "==", "name", "assert", "tokens", "[", "-", "1", "]", ".", "value", ".", "lower", "(", ")", "==", "\"end\"", "if", "len", "(", "tokens", ")", "==", "2", ":", "body", "=", "[", "]", "# empty TYPE..END block", "else", ":", "body", "=", "tokens", "[", "1", ":", "-", "1", "]", "body_tokens", "=", "[", "]", "for", "t", "in", "body", ":", "if", "isinstance", "(", "t", ",", "dict", ")", ":", "body_tokens", ".", "append", "(", "t", "[", "\"__tokens__\"", "]", ")", "else", ":", "body_tokens", ".", "append", "(", "t", ")", "return", "key", ",", "body_tokens" ]
Return the key and contents of a KEY..END block for PATTERN, POINTS, and PROJECTION
[ "Return", "the", "key", "and", "contents", "of", "a", "KEY", "..", "END", "block", "for", "PATTERN", "POINTS", "and", "PROJECTION" ]
python
train
iotaledger/iota.lib.py
iota/bin/__init__.py
https://github.com/iotaledger/iota.lib.py/blob/97cdd1e241498446b46157b79b2a1ea2ec6d387a/iota/bin/__init__.py#L63-L78
def run_from_argv(self, argv=None): # type: (Optional[tuple]) -> int """ Executes the command from a collection of arguments (e.g., :py:data`sys.argv`) and returns the exit code. :param argv: Arguments to pass to the argument parser. If ``None``, defaults to ``sys.argv[1:]``. """ exit_code = self.execute(**self.parse_argv(argv)) if exit_code is None: exit_code = 0 return exit_code
[ "def", "run_from_argv", "(", "self", ",", "argv", "=", "None", ")", ":", "# type: (Optional[tuple]) -> int", "exit_code", "=", "self", ".", "execute", "(", "*", "*", "self", ".", "parse_argv", "(", "argv", ")", ")", "if", "exit_code", "is", "None", ":", "exit_code", "=", "0", "return", "exit_code" ]
Executes the command from a collection of arguments (e.g., :py:data`sys.argv`) and returns the exit code. :param argv: Arguments to pass to the argument parser. If ``None``, defaults to ``sys.argv[1:]``.
[ "Executes", "the", "command", "from", "a", "collection", "of", "arguments", "(", "e", ".", "g", ".", ":", "py", ":", "data", "sys", ".", "argv", ")", "and", "returns", "the", "exit", "code", "." ]
python
test
mozilla-releng/scriptworker
scriptworker/ed25519.py
https://github.com/mozilla-releng/scriptworker/blob/8e97bbd83b9b578565ec57904c966dd6ae4ef0ae/scriptworker/ed25519.py#L42-L57
def ed25519_public_key_from_string(string): """Create an ed25519 public key from ``string``, which is a seed. Args: string (str): the string to use as a seed. Returns: Ed25519PublicKey: the public key """ try: return Ed25519PublicKey.from_public_bytes( base64.b64decode(string) ) except (UnsupportedAlgorithm, Base64Error) as exc: raise ScriptWorkerEd25519Error("Can't create Ed25519PublicKey: {}!".format(str(exc)))
[ "def", "ed25519_public_key_from_string", "(", "string", ")", ":", "try", ":", "return", "Ed25519PublicKey", ".", "from_public_bytes", "(", "base64", ".", "b64decode", "(", "string", ")", ")", "except", "(", "UnsupportedAlgorithm", ",", "Base64Error", ")", "as", "exc", ":", "raise", "ScriptWorkerEd25519Error", "(", "\"Can't create Ed25519PublicKey: {}!\"", ".", "format", "(", "str", "(", "exc", ")", ")", ")" ]
Create an ed25519 public key from ``string``, which is a seed. Args: string (str): the string to use as a seed. Returns: Ed25519PublicKey: the public key
[ "Create", "an", "ed25519", "public", "key", "from", "string", "which", "is", "a", "seed", "." ]
python
train
jbloomlab/phydms
phydmslib/models.py
https://github.com/jbloomlab/phydms/blob/9cdebc10bafbe543c552d79486c7f950780ed3c0/phydmslib/models.py#L2011-L2017
def dlogprior(self, param): """Equal to value of `basemodel.dlogprior`.""" assert param in self.freeparams, "Invalid param: {0}".format(param) if param in self.distributionparams: return 0.0 else: return self._models[0].dlogprior(param)
[ "def", "dlogprior", "(", "self", ",", "param", ")", ":", "assert", "param", "in", "self", ".", "freeparams", ",", "\"Invalid param: {0}\"", ".", "format", "(", "param", ")", "if", "param", "in", "self", ".", "distributionparams", ":", "return", "0.0", "else", ":", "return", "self", ".", "_models", "[", "0", "]", ".", "dlogprior", "(", "param", ")" ]
Equal to value of `basemodel.dlogprior`.
[ "Equal", "to", "value", "of", "basemodel", ".", "dlogprior", "." ]
python
train
wmayner/pyphi
pyphi/macro.py
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/macro.py#L610-L630
def reindex(self): """Squeeze the indices of this blackboxing to ``0..n``. Returns: Blackbox: a new, reindexed |Blackbox|. Example: >>> partition = ((3,), (2, 4)) >>> output_indices = (2, 3) >>> blackbox = Blackbox(partition, output_indices) >>> blackbox.reindex() Blackbox(partition=((1,), (0, 2)), output_indices=(0, 1)) """ _map = dict(zip(self.micro_indices, reindex(self.micro_indices))) partition = tuple( tuple(_map[index] for index in group) for group in self.partition ) output_indices = tuple(_map[i] for i in self.output_indices) return Blackbox(partition, output_indices)
[ "def", "reindex", "(", "self", ")", ":", "_map", "=", "dict", "(", "zip", "(", "self", ".", "micro_indices", ",", "reindex", "(", "self", ".", "micro_indices", ")", ")", ")", "partition", "=", "tuple", "(", "tuple", "(", "_map", "[", "index", "]", "for", "index", "in", "group", ")", "for", "group", "in", "self", ".", "partition", ")", "output_indices", "=", "tuple", "(", "_map", "[", "i", "]", "for", "i", "in", "self", ".", "output_indices", ")", "return", "Blackbox", "(", "partition", ",", "output_indices", ")" ]
Squeeze the indices of this blackboxing to ``0..n``. Returns: Blackbox: a new, reindexed |Blackbox|. Example: >>> partition = ((3,), (2, 4)) >>> output_indices = (2, 3) >>> blackbox = Blackbox(partition, output_indices) >>> blackbox.reindex() Blackbox(partition=((1,), (0, 2)), output_indices=(0, 1))
[ "Squeeze", "the", "indices", "of", "this", "blackboxing", "to", "0", "..", "n", "." ]
python
train
programa-stic/barf-project
barf/core/smt/smttranslator.py
https://github.com/programa-stic/barf-project/blob/18ed9e5eace55f7bf6015ec57f037c364099021c/barf/core/smt/smttranslator.py#L385-L405
def _translate_div(self, oprnd1, oprnd2, oprnd3): """Return a formula representation of an DIV instruction. """ assert oprnd1.size and oprnd2.size and oprnd3.size assert oprnd1.size == oprnd2.size op1_var = self._translate_src_oprnd(oprnd1) op2_var = self._translate_src_oprnd(oprnd2) op3_var, op3_var_constrs = self._translate_dst_oprnd(oprnd3) if oprnd3.size > oprnd1.size: op1_var_zx = smtfunction.zero_extend(op1_var, oprnd3.size) op2_var_zx = smtfunction.zero_extend(op2_var, oprnd3.size) result = op1_var_zx.udiv(op2_var_zx) elif oprnd3.size < oprnd1.size: result = smtfunction.extract(op1_var.udiv(op2_var), 0, oprnd3.size) else: result = op1_var.udiv(op2_var) return [op3_var == result] + op3_var_constrs
[ "def", "_translate_div", "(", "self", ",", "oprnd1", ",", "oprnd2", ",", "oprnd3", ")", ":", "assert", "oprnd1", ".", "size", "and", "oprnd2", ".", "size", "and", "oprnd3", ".", "size", "assert", "oprnd1", ".", "size", "==", "oprnd2", ".", "size", "op1_var", "=", "self", ".", "_translate_src_oprnd", "(", "oprnd1", ")", "op2_var", "=", "self", ".", "_translate_src_oprnd", "(", "oprnd2", ")", "op3_var", ",", "op3_var_constrs", "=", "self", ".", "_translate_dst_oprnd", "(", "oprnd3", ")", "if", "oprnd3", ".", "size", ">", "oprnd1", ".", "size", ":", "op1_var_zx", "=", "smtfunction", ".", "zero_extend", "(", "op1_var", ",", "oprnd3", ".", "size", ")", "op2_var_zx", "=", "smtfunction", ".", "zero_extend", "(", "op2_var", ",", "oprnd3", ".", "size", ")", "result", "=", "op1_var_zx", ".", "udiv", "(", "op2_var_zx", ")", "elif", "oprnd3", ".", "size", "<", "oprnd1", ".", "size", ":", "result", "=", "smtfunction", ".", "extract", "(", "op1_var", ".", "udiv", "(", "op2_var", ")", ",", "0", ",", "oprnd3", ".", "size", ")", "else", ":", "result", "=", "op1_var", ".", "udiv", "(", "op2_var", ")", "return", "[", "op3_var", "==", "result", "]", "+", "op3_var_constrs" ]
Return a formula representation of an DIV instruction.
[ "Return", "a", "formula", "representation", "of", "an", "DIV", "instruction", "." ]
python
train
Qiskit/qiskit-terra
qiskit/providers/basicaer/qasm_simulator.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/providers/basicaer/qasm_simulator.py#L374-L404
def run(self, qobj, backend_options=None): """Run qobj asynchronously. Args: qobj (Qobj): payload of the experiment backend_options (dict): backend options Returns: BasicAerJob: derived from BaseJob Additional Information: backend_options: Is a dict of options for the backend. It may contain * "initial_statevector": vector_like The "initial_statevector" option specifies a custom initial initial statevector for the simulator to be used instead of the all zero state. This size of this vector must be correct for the number of qubits in all experiments in the qobj. Example:: backend_options = { "initial_statevector": np.array([1, 0, 0, 1j]) / np.sqrt(2), } """ self._set_options(qobj_config=qobj.config, backend_options=backend_options) job_id = str(uuid.uuid4()) job = BasicAerJob(self, job_id, self._run_job, qobj) job.submit() return job
[ "def", "run", "(", "self", ",", "qobj", ",", "backend_options", "=", "None", ")", ":", "self", ".", "_set_options", "(", "qobj_config", "=", "qobj", ".", "config", ",", "backend_options", "=", "backend_options", ")", "job_id", "=", "str", "(", "uuid", ".", "uuid4", "(", ")", ")", "job", "=", "BasicAerJob", "(", "self", ",", "job_id", ",", "self", ".", "_run_job", ",", "qobj", ")", "job", ".", "submit", "(", ")", "return", "job" ]
Run qobj asynchronously. Args: qobj (Qobj): payload of the experiment backend_options (dict): backend options Returns: BasicAerJob: derived from BaseJob Additional Information: backend_options: Is a dict of options for the backend. It may contain * "initial_statevector": vector_like The "initial_statevector" option specifies a custom initial initial statevector for the simulator to be used instead of the all zero state. This size of this vector must be correct for the number of qubits in all experiments in the qobj. Example:: backend_options = { "initial_statevector": np.array([1, 0, 0, 1j]) / np.sqrt(2), }
[ "Run", "qobj", "asynchronously", "." ]
python
test
gtalarico/airtable-python-wrapper
airtable/airtable.py
https://github.com/gtalarico/airtable-python-wrapper/blob/48b2d806178085b52a31817571e5a1fc3dce4045/airtable/airtable.py#L512-L534
def delete_by_field(self, field_name, field_value, **options): """ Deletes first record to match provided ``field_name`` and ``field_value``. >>> record = airtable.delete_by_field('Employee Id', 'DD13332454') Args: field_name (``str``): Name of field to match (column name). field_value (``str``): Value of field to match. Keyword Args: view (``str``, optional): The name or ID of a view. See :any:`ViewParam`. sort (``list``, optional): List of fields to sort by. Default order is ascending. See :any:`SortParam`. Returns: record (``dict``): Deleted Record """ record = self.match(field_name, field_value, **options) record_url = self.record_url(record['id']) return self._delete(record_url)
[ "def", "delete_by_field", "(", "self", ",", "field_name", ",", "field_value", ",", "*", "*", "options", ")", ":", "record", "=", "self", ".", "match", "(", "field_name", ",", "field_value", ",", "*", "*", "options", ")", "record_url", "=", "self", ".", "record_url", "(", "record", "[", "'id'", "]", ")", "return", "self", ".", "_delete", "(", "record_url", ")" ]
Deletes first record to match provided ``field_name`` and ``field_value``. >>> record = airtable.delete_by_field('Employee Id', 'DD13332454') Args: field_name (``str``): Name of field to match (column name). field_value (``str``): Value of field to match. Keyword Args: view (``str``, optional): The name or ID of a view. See :any:`ViewParam`. sort (``list``, optional): List of fields to sort by. Default order is ascending. See :any:`SortParam`. Returns: record (``dict``): Deleted Record
[ "Deletes", "first", "record", "to", "match", "provided", "field_name", "and", "field_value", ".", ">>>", "record", "=", "airtable", ".", "delete_by_field", "(", "Employee", "Id", "DD13332454", ")", "Args", ":", "field_name", "(", "str", ")", ":", "Name", "of", "field", "to", "match", "(", "column", "name", ")", ".", "field_value", "(", "str", ")", ":", "Value", "of", "field", "to", "match", ".", "Keyword", "Args", ":", "view", "(", "str", "optional", ")", ":", "The", "name", "or", "ID", "of", "a", "view", ".", "See", ":", "any", ":", "ViewParam", ".", "sort", "(", "list", "optional", ")", ":", "List", "of", "fields", "to", "sort", "by", ".", "Default", "order", "is", "ascending", ".", "See", ":", "any", ":", "SortParam", ".", "Returns", ":", "record", "(", "dict", ")", ":", "Deleted", "Record" ]
python
train
EmbodiedCognition/pagoda
pagoda/skeleton.py
https://github.com/EmbodiedCognition/pagoda/blob/8892f847026d98aba8646ecbc4589397e6dec7bd/pagoda/skeleton.py#L348-L372
def set_target_angles(self, angles): '''Move each joint toward a target angle. This method uses a PID controller to set a target angular velocity for each degree of freedom in the skeleton, based on the difference between the current and the target angle for the respective DOF. PID parameters are by default set to achieve a tiny bit less than complete convergence in one time step, using only the P term (i.e., the P coefficient is set to 1 - \delta, while I and D coefficients are set to 0). PID parameters can be updated by calling the `set_pid_params` method. Parameters ---------- angles : list of float A list of the target angles for every joint in the skeleton. ''' j = 0 for joint in self.joints: velocities = [ ctrl(tgt - cur, self.world.dt) for cur, tgt, ctrl in zip(joint.angles, angles[j:j+joint.ADOF], joint.controllers)] joint.velocities = velocities j += joint.ADOF
[ "def", "set_target_angles", "(", "self", ",", "angles", ")", ":", "j", "=", "0", "for", "joint", "in", "self", ".", "joints", ":", "velocities", "=", "[", "ctrl", "(", "tgt", "-", "cur", ",", "self", ".", "world", ".", "dt", ")", "for", "cur", ",", "tgt", ",", "ctrl", "in", "zip", "(", "joint", ".", "angles", ",", "angles", "[", "j", ":", "j", "+", "joint", ".", "ADOF", "]", ",", "joint", ".", "controllers", ")", "]", "joint", ".", "velocities", "=", "velocities", "j", "+=", "joint", ".", "ADOF" ]
Move each joint toward a target angle. This method uses a PID controller to set a target angular velocity for each degree of freedom in the skeleton, based on the difference between the current and the target angle for the respective DOF. PID parameters are by default set to achieve a tiny bit less than complete convergence in one time step, using only the P term (i.e., the P coefficient is set to 1 - \delta, while I and D coefficients are set to 0). PID parameters can be updated by calling the `set_pid_params` method. Parameters ---------- angles : list of float A list of the target angles for every joint in the skeleton.
[ "Move", "each", "joint", "toward", "a", "target", "angle", "." ]
python
valid
ultradns/python_rest_api_client
ultra_rest_client/ultra_rest_client.py
https://github.com/ultradns/python_rest_api_client/blob/e4095f28f5cb5e258b768c06ef7cf8b1915aa5ec/ultra_rest_client/ultra_rest_client.py#L283-L308
def get_rrsets_by_type_owner(self, zone_name, rtype, owner_name, q=None, **kwargs): """Returns the list of RRSets in the specified zone of the specified type. Arguments: zone_name -- The name of the zone. rtype -- The type of the RRSets. This can be numeric (1) or if a well-known name is defined for the type (A), you can use it instead. owner_name -- The owner name for the RRSet. If no trailing dot is supplied, the owner_name is assumed to be relative (foo). If a trailing dot is supplied, the owner name is assumed to be absolute (foo.zonename.com.) Keyword Arguments: q -- The search parameters, in a dict. Valid keys are: ttl - must match the TTL for the rrset value - substring match of the first BIND field value sort -- The sort column used to order the list. Valid values for the sort field are: TTL TYPE reverse -- Whether the list is ascending(False) or descending(True) offset -- The position in the list for the first returned element(0 based) limit -- The maximum number of rows to be returned. """ uri = "/v1/zones/" + zone_name + "/rrsets/" + rtype + "/" + owner_name params = build_params(q, kwargs) return self.rest_api_connection.get(uri, params)
[ "def", "get_rrsets_by_type_owner", "(", "self", ",", "zone_name", ",", "rtype", ",", "owner_name", ",", "q", "=", "None", ",", "*", "*", "kwargs", ")", ":", "uri", "=", "\"/v1/zones/\"", "+", "zone_name", "+", "\"/rrsets/\"", "+", "rtype", "+", "\"/\"", "+", "owner_name", "params", "=", "build_params", "(", "q", ",", "kwargs", ")", "return", "self", ".", "rest_api_connection", ".", "get", "(", "uri", ",", "params", ")" ]
Returns the list of RRSets in the specified zone of the specified type. Arguments: zone_name -- The name of the zone. rtype -- The type of the RRSets. This can be numeric (1) or if a well-known name is defined for the type (A), you can use it instead. owner_name -- The owner name for the RRSet. If no trailing dot is supplied, the owner_name is assumed to be relative (foo). If a trailing dot is supplied, the owner name is assumed to be absolute (foo.zonename.com.) Keyword Arguments: q -- The search parameters, in a dict. Valid keys are: ttl - must match the TTL for the rrset value - substring match of the first BIND field value sort -- The sort column used to order the list. Valid values for the sort field are: TTL TYPE reverse -- Whether the list is ascending(False) or descending(True) offset -- The position in the list for the first returned element(0 based) limit -- The maximum number of rows to be returned.
[ "Returns", "the", "list", "of", "RRSets", "in", "the", "specified", "zone", "of", "the", "specified", "type", "." ]
python
train
adafruit/Adafruit_Python_PN532
Adafruit_PN532/PN532.py
https://github.com/adafruit/Adafruit_Python_PN532/blob/343521a8ec842ea82f680a5ed868fee16e9609bd/Adafruit_PN532/PN532.py#L301-L330
def call_function(self, command, response_length=0, params=[], timeout_sec=1): """Send specified command to the PN532 and expect up to response_length bytes back in a response. Note that less than the expected bytes might be returned! Params can optionally specify an array of bytes to send as parameters to the function call. Will wait up to timeout_secs seconds for a response and return a bytearray of response bytes, or None if no response is available within the timeout. """ # Build frame data with command and parameters. data = bytearray(2+len(params)) data[0] = PN532_HOSTTOPN532 data[1] = command & 0xFF data[2:] = params # Send frame and wait for response. self._write_frame(data) if not self._wait_ready(timeout_sec): return None # Verify ACK response and wait to be ready for function response. response = self._read_data(len(PN532_ACK)) if response != PN532_ACK: raise RuntimeError('Did not receive expected ACK from PN532!') if not self._wait_ready(timeout_sec): return None # Read response bytes. response = self._read_frame(response_length+2) # Check that response is for the called function. if not (response[0] == PN532_PN532TOHOST and response[1] == (command+1)): raise RuntimeError('Received unexpected command response!') # Return response data. return response[2:]
[ "def", "call_function", "(", "self", ",", "command", ",", "response_length", "=", "0", ",", "params", "=", "[", "]", ",", "timeout_sec", "=", "1", ")", ":", "# Build frame data with command and parameters.", "data", "=", "bytearray", "(", "2", "+", "len", "(", "params", ")", ")", "data", "[", "0", "]", "=", "PN532_HOSTTOPN532", "data", "[", "1", "]", "=", "command", "&", "0xFF", "data", "[", "2", ":", "]", "=", "params", "# Send frame and wait for response.", "self", ".", "_write_frame", "(", "data", ")", "if", "not", "self", ".", "_wait_ready", "(", "timeout_sec", ")", ":", "return", "None", "# Verify ACK response and wait to be ready for function response.", "response", "=", "self", ".", "_read_data", "(", "len", "(", "PN532_ACK", ")", ")", "if", "response", "!=", "PN532_ACK", ":", "raise", "RuntimeError", "(", "'Did not receive expected ACK from PN532!'", ")", "if", "not", "self", ".", "_wait_ready", "(", "timeout_sec", ")", ":", "return", "None", "# Read response bytes.", "response", "=", "self", ".", "_read_frame", "(", "response_length", "+", "2", ")", "# Check that response is for the called function.", "if", "not", "(", "response", "[", "0", "]", "==", "PN532_PN532TOHOST", "and", "response", "[", "1", "]", "==", "(", "command", "+", "1", ")", ")", ":", "raise", "RuntimeError", "(", "'Received unexpected command response!'", ")", "# Return response data.", "return", "response", "[", "2", ":", "]" ]
Send specified command to the PN532 and expect up to response_length bytes back in a response. Note that less than the expected bytes might be returned! Params can optionally specify an array of bytes to send as parameters to the function call. Will wait up to timeout_secs seconds for a response and return a bytearray of response bytes, or None if no response is available within the timeout.
[ "Send", "specified", "command", "to", "the", "PN532", "and", "expect", "up", "to", "response_length", "bytes", "back", "in", "a", "response", ".", "Note", "that", "less", "than", "the", "expected", "bytes", "might", "be", "returned!", "Params", "can", "optionally", "specify", "an", "array", "of", "bytes", "to", "send", "as", "parameters", "to", "the", "function", "call", ".", "Will", "wait", "up", "to", "timeout_secs", "seconds", "for", "a", "response", "and", "return", "a", "bytearray", "of", "response", "bytes", "or", "None", "if", "no", "response", "is", "available", "within", "the", "timeout", "." ]
python
train
ehansis/ozelot
ozelot/cache.py
https://github.com/ehansis/ozelot/blob/948675e02eb6fca940450f5cb814f53e97159e5b/ozelot/cache.py#L224-L239
def has(self, url, xpath=None): """Check if a URL (and xpath) exists in the cache If DB has not been initialized yet, returns ``False`` for any URL. Args: url (str): If given, clear specific item only. Otherwise remove the DB file. xpath (str): xpath to search (may be ``None``) Returns: bool: ``True`` if URL exists, ``False`` otherwise """ if not path.exists(self.db_path): return False return self._query(url, xpath).count() > 0
[ "def", "has", "(", "self", ",", "url", ",", "xpath", "=", "None", ")", ":", "if", "not", "path", ".", "exists", "(", "self", ".", "db_path", ")", ":", "return", "False", "return", "self", ".", "_query", "(", "url", ",", "xpath", ")", ".", "count", "(", ")", ">", "0" ]
Check if a URL (and xpath) exists in the cache If DB has not been initialized yet, returns ``False`` for any URL. Args: url (str): If given, clear specific item only. Otherwise remove the DB file. xpath (str): xpath to search (may be ``None``) Returns: bool: ``True`` if URL exists, ``False`` otherwise
[ "Check", "if", "a", "URL", "(", "and", "xpath", ")", "exists", "in", "the", "cache" ]
python
train
zengbin93/zb
zb/dev/decorator.py
https://github.com/zengbin93/zb/blob/ccdb384a0b5801b459933220efcb71972c2b89a7/zb/dev/decorator.py#L63-L71
def _update_doc(self, func_doc): """更新文档信息,把原来的文档信息进行合并格式化, 即第一行为deprecated_doc(Deprecated: tip_info),下一行为原始func_doc""" deprecated_doc = "Deprecated" if self.tip_info: deprecated_doc = "{}: {}".format(deprecated_doc, self.tip_info) if func_doc: func_doc = "{}\n{}".format(deprecated_doc, func_doc) return func_doc
[ "def", "_update_doc", "(", "self", ",", "func_doc", ")", ":", "deprecated_doc", "=", "\"Deprecated\"", "if", "self", ".", "tip_info", ":", "deprecated_doc", "=", "\"{}: {}\"", ".", "format", "(", "deprecated_doc", ",", "self", ".", "tip_info", ")", "if", "func_doc", ":", "func_doc", "=", "\"{}\\n{}\"", ".", "format", "(", "deprecated_doc", ",", "func_doc", ")", "return", "func_doc" ]
更新文档信息,把原来的文档信息进行合并格式化, 即第一行为deprecated_doc(Deprecated: tip_info),下一行为原始func_doc
[ "更新文档信息,把原来的文档信息进行合并格式化", "即第一行为deprecated_doc", "(", "Deprecated", ":", "tip_info", ")", ",下一行为原始func_doc" ]
python
train
zhanglab/psamm
psamm/fluxanalysis.py
https://github.com/zhanglab/psamm/blob/dc427848c4f9d109ca590f0afa024c63b685b3f4/psamm/fluxanalysis.py#L293-L320
def flux_balance(model, reaction, tfba, solver): """Run flux balance analysis on the given model. Yields the reaction id and flux value for each reaction in the model. This is a convenience function for sertting up and running the FluxBalanceProblem. If the FBA is solved for more than one parameter it is recommended to setup and reuse the FluxBalanceProblem manually for a speed up. This is an implementation of flux balance analysis (FBA) as described in [Orth10]_ and [Fell86]_. Args: model: MetabolicModel to solve. reaction: Reaction to maximize. If a dict is given, this instead represents the objective function weights on each reaction. tfba: If True enable thermodynamic constraints. solver: LP solver instance to use. Returns: Iterator over reaction ID and reaction flux pairs. """ fba = _get_fba_problem(model, tfba, solver) fba.maximize(reaction) for reaction in model.reactions: yield reaction, fba.get_flux(reaction)
[ "def", "flux_balance", "(", "model", ",", "reaction", ",", "tfba", ",", "solver", ")", ":", "fba", "=", "_get_fba_problem", "(", "model", ",", "tfba", ",", "solver", ")", "fba", ".", "maximize", "(", "reaction", ")", "for", "reaction", "in", "model", ".", "reactions", ":", "yield", "reaction", ",", "fba", ".", "get_flux", "(", "reaction", ")" ]
Run flux balance analysis on the given model. Yields the reaction id and flux value for each reaction in the model. This is a convenience function for sertting up and running the FluxBalanceProblem. If the FBA is solved for more than one parameter it is recommended to setup and reuse the FluxBalanceProblem manually for a speed up. This is an implementation of flux balance analysis (FBA) as described in [Orth10]_ and [Fell86]_. Args: model: MetabolicModel to solve. reaction: Reaction to maximize. If a dict is given, this instead represents the objective function weights on each reaction. tfba: If True enable thermodynamic constraints. solver: LP solver instance to use. Returns: Iterator over reaction ID and reaction flux pairs.
[ "Run", "flux", "balance", "analysis", "on", "the", "given", "model", "." ]
python
train
Yelp/kafka-utils
kafka_utils/util/offsets.py
https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/util/offsets.py#L526-L536
def nullify_offsets(offsets): """Modify offsets metadata so that the partition offsets have null payloads. :param offsets: dict {<topic>: {<partition>: <offset>}} :returns: a dict topic: partition: offset """ result = {} for topic, partition_offsets in six.iteritems(offsets): result[topic] = _nullify_partition_offsets(partition_offsets) return result
[ "def", "nullify_offsets", "(", "offsets", ")", ":", "result", "=", "{", "}", "for", "topic", ",", "partition_offsets", "in", "six", ".", "iteritems", "(", "offsets", ")", ":", "result", "[", "topic", "]", "=", "_nullify_partition_offsets", "(", "partition_offsets", ")", "return", "result" ]
Modify offsets metadata so that the partition offsets have null payloads. :param offsets: dict {<topic>: {<partition>: <offset>}} :returns: a dict topic: partition: offset
[ "Modify", "offsets", "metadata", "so", "that", "the", "partition", "offsets", "have", "null", "payloads", "." ]
python
train
dddomodossola/remi
remi/gui.py
https://github.com/dddomodossola/remi/blob/85206f62220662bb7ecd471042268def71ccad28/remi/gui.py#L526-L537
def set_style(self, style): """ Allows to set style properties for the widget. Args: style (str or dict): The style property dictionary or json string. """ if style is not None: try: self.style.update(style) except ValueError: for s in style.split(';'): k, v = s.split(':', 1) self.style[k.strip()] = v.strip()
[ "def", "set_style", "(", "self", ",", "style", ")", ":", "if", "style", "is", "not", "None", ":", "try", ":", "self", ".", "style", ".", "update", "(", "style", ")", "except", "ValueError", ":", "for", "s", "in", "style", ".", "split", "(", "';'", ")", ":", "k", ",", "v", "=", "s", ".", "split", "(", "':'", ",", "1", ")", "self", ".", "style", "[", "k", ".", "strip", "(", ")", "]", "=", "v", ".", "strip", "(", ")" ]
Allows to set style properties for the widget. Args: style (str or dict): The style property dictionary or json string.
[ "Allows", "to", "set", "style", "properties", "for", "the", "widget", ".", "Args", ":", "style", "(", "str", "or", "dict", ")", ":", "The", "style", "property", "dictionary", "or", "json", "string", "." ]
python
train
drericstrong/pyedna
pyedna/ezdna.py
https://github.com/drericstrong/pyedna/blob/b8f8f52def4f26bb4f3a993ce3400769518385f6/pyedna/ezdna.py#L609-L642
def GetTagDescription(tag_name): """ Gets the current description of a point configured in a real-time eDNA service. :param tag_name: fully-qualified (site.service.tag) eDNA tag :return: tag description """ # Check if the point even exists if not DoesIDExist(tag_name): warnings.warn("WARNING- " + tag_name + " does not exist or " + "connection was dropped. Try again if tag does exist.") return None # To get the point information for the service, we need the Site.Service split_tag = tag_name.split(".") # If the full Site.Service.Tag was not supplied, return the tag_name if len(split_tag) < 3: warnings.warn("WARNING- Please supply the full Site.Service.Tag.") return tag_name # The Site.Service will be the first two split strings site_service = split_tag[0] + "." + split_tag[1] # GetPoints will return a DataFrame with point information points = GetPoints(site_service) if tag_name in points.Tag.values: description = points[points.Tag == tag_name].Description.values[0] if description: return description else: return tag_name else: warnings.warn("WARNING- " + tag_name + " not found in service.") return None
[ "def", "GetTagDescription", "(", "tag_name", ")", ":", "# Check if the point even exists\r", "if", "not", "DoesIDExist", "(", "tag_name", ")", ":", "warnings", ".", "warn", "(", "\"WARNING- \"", "+", "tag_name", "+", "\" does not exist or \"", "+", "\"connection was dropped. Try again if tag does exist.\"", ")", "return", "None", "# To get the point information for the service, we need the Site.Service\r", "split_tag", "=", "tag_name", ".", "split", "(", "\".\"", ")", "# If the full Site.Service.Tag was not supplied, return the tag_name\r", "if", "len", "(", "split_tag", ")", "<", "3", ":", "warnings", ".", "warn", "(", "\"WARNING- Please supply the full Site.Service.Tag.\"", ")", "return", "tag_name", "# The Site.Service will be the first two split strings\r", "site_service", "=", "split_tag", "[", "0", "]", "+", "\".\"", "+", "split_tag", "[", "1", "]", "# GetPoints will return a DataFrame with point information\r", "points", "=", "GetPoints", "(", "site_service", ")", "if", "tag_name", "in", "points", ".", "Tag", ".", "values", ":", "description", "=", "points", "[", "points", ".", "Tag", "==", "tag_name", "]", ".", "Description", ".", "values", "[", "0", "]", "if", "description", ":", "return", "description", "else", ":", "return", "tag_name", "else", ":", "warnings", ".", "warn", "(", "\"WARNING- \"", "+", "tag_name", "+", "\" not found in service.\"", ")", "return", "None" ]
Gets the current description of a point configured in a real-time eDNA service. :param tag_name: fully-qualified (site.service.tag) eDNA tag :return: tag description
[ "Gets", "the", "current", "description", "of", "a", "point", "configured", "in", "a", "real", "-", "time", "eDNA", "service", ".", ":", "param", "tag_name", ":", "fully", "-", "qualified", "(", "site", ".", "service", ".", "tag", ")", "eDNA", "tag", ":", "return", ":", "tag", "description" ]
python
train
pywbem/pywbem
pywbem/cim_types.py
https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/pywbem/cim_types.py#L997-L1071
def cimtype(obj): """ Return the CIM data type name of a CIM typed object, as a string. For an array, the type is determined from the first array element (CIM arrays must be homogeneous w.r.t. the type of their elements). If the array is empty, that is not possible and :exc:`~py:exceptions.ValueError` is raised. Note that Python :term:`numbers <number>` are not valid input objects because determining their CIM data type (e.g. :class:`~pywbem.Uint8`, :class:`~pywbem.Real32`) would require knowing the value range. Therefore, :exc:`~py:exceptions.TypeError` is raised in this case. Parameters: obj (:term:`CIM data type`): The object whose CIM data type name is returned. Returns: :term:`string`: The CIM data type name of the object (e.g. ``"uint8"``). Raises: TypeError: The object does not have a valid CIM data type. ValueError: Cannot determine CIM data type from an empty array. """ if isinstance(obj, CIMType): return obj.cimtype if isinstance(obj, bool): return 'boolean' if isinstance(obj, (six.binary_type, six.text_type)): # accept both possible types return 'string' if isinstance(obj, list): try: obj = obj[0] except IndexError: raise ValueError("Cannot determine CIM data type from empty array") return cimtype(obj) if isinstance(obj, (datetime, timedelta)): return 'datetime' try: instancename_type = CIMInstanceName except NameError: # Defer import due to circular import dependencies: from pywbem.cim_obj import CIMInstanceName as instancename_type if isinstance(obj, instancename_type): return 'reference' try: instance_type = CIMInstance except NameError: # Defer import due to circular import dependencies: from pywbem.cim_obj import CIMInstance as instance_type if isinstance(obj, instance_type): # embedded instance return 'string' try: class_type = CIMClass except NameError: # Defer import due to circular import dependencies: from pywbem.cim_obj import CIMClass as class_type if isinstance(obj, class_type): # embedded class return 'string' raise TypeError( _format("Object does not have a valid CIM data type: {0!A}", obj))
[ "def", "cimtype", "(", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "CIMType", ")", ":", "return", "obj", ".", "cimtype", "if", "isinstance", "(", "obj", ",", "bool", ")", ":", "return", "'boolean'", "if", "isinstance", "(", "obj", ",", "(", "six", ".", "binary_type", ",", "six", ".", "text_type", ")", ")", ":", "# accept both possible types", "return", "'string'", "if", "isinstance", "(", "obj", ",", "list", ")", ":", "try", ":", "obj", "=", "obj", "[", "0", "]", "except", "IndexError", ":", "raise", "ValueError", "(", "\"Cannot determine CIM data type from empty array\"", ")", "return", "cimtype", "(", "obj", ")", "if", "isinstance", "(", "obj", ",", "(", "datetime", ",", "timedelta", ")", ")", ":", "return", "'datetime'", "try", ":", "instancename_type", "=", "CIMInstanceName", "except", "NameError", ":", "# Defer import due to circular import dependencies:", "from", "pywbem", ".", "cim_obj", "import", "CIMInstanceName", "as", "instancename_type", "if", "isinstance", "(", "obj", ",", "instancename_type", ")", ":", "return", "'reference'", "try", ":", "instance_type", "=", "CIMInstance", "except", "NameError", ":", "# Defer import due to circular import dependencies:", "from", "pywbem", ".", "cim_obj", "import", "CIMInstance", "as", "instance_type", "if", "isinstance", "(", "obj", ",", "instance_type", ")", ":", "# embedded instance", "return", "'string'", "try", ":", "class_type", "=", "CIMClass", "except", "NameError", ":", "# Defer import due to circular import dependencies:", "from", "pywbem", ".", "cim_obj", "import", "CIMClass", "as", "class_type", "if", "isinstance", "(", "obj", ",", "class_type", ")", ":", "# embedded class", "return", "'string'", "raise", "TypeError", "(", "_format", "(", "\"Object does not have a valid CIM data type: {0!A}\"", ",", "obj", ")", ")" ]
Return the CIM data type name of a CIM typed object, as a string. For an array, the type is determined from the first array element (CIM arrays must be homogeneous w.r.t. the type of their elements). If the array is empty, that is not possible and :exc:`~py:exceptions.ValueError` is raised. Note that Python :term:`numbers <number>` are not valid input objects because determining their CIM data type (e.g. :class:`~pywbem.Uint8`, :class:`~pywbem.Real32`) would require knowing the value range. Therefore, :exc:`~py:exceptions.TypeError` is raised in this case. Parameters: obj (:term:`CIM data type`): The object whose CIM data type name is returned. Returns: :term:`string`: The CIM data type name of the object (e.g. ``"uint8"``). Raises: TypeError: The object does not have a valid CIM data type. ValueError: Cannot determine CIM data type from an empty array.
[ "Return", "the", "CIM", "data", "type", "name", "of", "a", "CIM", "typed", "object", "as", "a", "string", "." ]
python
train
PBR/MQ2
MQ2/__init__.py
https://github.com/PBR/MQ2/blob/6d84dea47e6751333004743f588f03158e35c28d/MQ2/__init__.py#L51-L92
def extract_zip(filename, extract_dir): """ Extract the sources in a temporary folder. :arg filename, name of the zip file containing the data from MapQTL which will be extracted :arg extract_dir, folder in which to extract the archive. """ LOG.info("Extracting %s in %s " % (filename, extract_dir)) if not os.path.exists(extract_dir): try: os.mkdir(extract_dir) except IOError as err: # pragma: no cover LOG.info("Could not generate the folder %s" % extract_dir) LOG.debug("Error: %s" % err) return if zipfile.is_zipfile(filename): try: zfile = zipfile.ZipFile(filename, "r") for name in zfile.namelist(): if os.path.dirname(name): curdir = os.path.join(extract_dir, os.path.dirname(name)) if not os.path.exists(curdir): os.mkdir(curdir) continue outfile = open(os.path.join(extract_dir, name), 'wb') outfile.write(zfile.read(name)) outfile.flush() outfile.close() zfile.close() except IOError as err: # pragma: no cover LOG.info("Error while extracting the zip archive.") LOG.debug("Error: %s" % err) else: try: # pragma: no cover We only have zipfile to test with tar = tarfile.open(filename) tar.extractall(extract_dir) tar.close() except tarfile.ReadError as err: # pragma: no cover LOG.info("Error while extracting the tarball.") LOG.debug("Error: %s" % err) return extract_dir
[ "def", "extract_zip", "(", "filename", ",", "extract_dir", ")", ":", "LOG", ".", "info", "(", "\"Extracting %s in %s \"", "%", "(", "filename", ",", "extract_dir", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "extract_dir", ")", ":", "try", ":", "os", ".", "mkdir", "(", "extract_dir", ")", "except", "IOError", "as", "err", ":", "# pragma: no cover", "LOG", ".", "info", "(", "\"Could not generate the folder %s\"", "%", "extract_dir", ")", "LOG", ".", "debug", "(", "\"Error: %s\"", "%", "err", ")", "return", "if", "zipfile", ".", "is_zipfile", "(", "filename", ")", ":", "try", ":", "zfile", "=", "zipfile", ".", "ZipFile", "(", "filename", ",", "\"r\"", ")", "for", "name", "in", "zfile", ".", "namelist", "(", ")", ":", "if", "os", ".", "path", ".", "dirname", "(", "name", ")", ":", "curdir", "=", "os", ".", "path", ".", "join", "(", "extract_dir", ",", "os", ".", "path", ".", "dirname", "(", "name", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "curdir", ")", ":", "os", ".", "mkdir", "(", "curdir", ")", "continue", "outfile", "=", "open", "(", "os", ".", "path", ".", "join", "(", "extract_dir", ",", "name", ")", ",", "'wb'", ")", "outfile", ".", "write", "(", "zfile", ".", "read", "(", "name", ")", ")", "outfile", ".", "flush", "(", ")", "outfile", ".", "close", "(", ")", "zfile", ".", "close", "(", ")", "except", "IOError", "as", "err", ":", "# pragma: no cover", "LOG", ".", "info", "(", "\"Error while extracting the zip archive.\"", ")", "LOG", ".", "debug", "(", "\"Error: %s\"", "%", "err", ")", "else", ":", "try", ":", "# pragma: no cover We only have zipfile to test with", "tar", "=", "tarfile", ".", "open", "(", "filename", ")", "tar", ".", "extractall", "(", "extract_dir", ")", "tar", ".", "close", "(", ")", "except", "tarfile", ".", "ReadError", "as", "err", ":", "# pragma: no cover", "LOG", ".", "info", "(", "\"Error while extracting the tarball.\"", ")", "LOG", ".", "debug", "(", "\"Error: %s\"", "%", "err", ")", "return", "extract_dir" ]
Extract the sources in a temporary folder. :arg filename, name of the zip file containing the data from MapQTL which will be extracted :arg extract_dir, folder in which to extract the archive.
[ "Extract", "the", "sources", "in", "a", "temporary", "folder", ".", ":", "arg", "filename", "name", "of", "the", "zip", "file", "containing", "the", "data", "from", "MapQTL", "which", "will", "be", "extracted", ":", "arg", "extract_dir", "folder", "in", "which", "to", "extract", "the", "archive", "." ]
python
train
jxtech/wechatpy
wechatpy/enterprise/client/api/chat.py
https://github.com/jxtech/wechatpy/blob/4df0da795618c0895a10f1c2cde9e9d5c0a93aaa/wechatpy/enterprise/client/api/chat.py#L161-L170
def send_single_text(self, sender, receiver, content): """ 发送单聊文本消息 :param sender: 发送人 :param receiver: 接收人成员 ID :param content: 消息内容 :return: 返回的 JSON 数据包 """ return self.send_text(sender, 'single', receiver, content)
[ "def", "send_single_text", "(", "self", ",", "sender", ",", "receiver", ",", "content", ")", ":", "return", "self", ".", "send_text", "(", "sender", ",", "'single'", ",", "receiver", ",", "content", ")" ]
发送单聊文本消息 :param sender: 发送人 :param receiver: 接收人成员 ID :param content: 消息内容 :return: 返回的 JSON 数据包
[ "发送单聊文本消息" ]
python
train
codelv/enaml-native-maps
src/googlemaps/android/android_map_view.py
https://github.com/codelv/enaml-native-maps/blob/5b6dda745cede05755dd40d29775cc0544226c29/src/googlemaps/android/android_map_view.py#L980-L1002
def on_marker(self, marker): """ Convert our options into the actual marker object""" mid, pos = marker self.marker = Marker(__id__=mid) mapview = self.parent() # Save ref mapview.markers[mid] = self # Required so the packer can pass the id self.marker.setTag(mid) # If we have a child widget we must configure the map to use the # custom adapter for w in self.child_widgets(): mapview.init_info_window_adapter() break d = self.declaration if d.show_info: self.set_show_info(d.show_info) #: Can free the options now del self.options
[ "def", "on_marker", "(", "self", ",", "marker", ")", ":", "mid", ",", "pos", "=", "marker", "self", ".", "marker", "=", "Marker", "(", "__id__", "=", "mid", ")", "mapview", "=", "self", ".", "parent", "(", ")", "# Save ref", "mapview", ".", "markers", "[", "mid", "]", "=", "self", "# Required so the packer can pass the id", "self", ".", "marker", ".", "setTag", "(", "mid", ")", "# If we have a child widget we must configure the map to use the", "# custom adapter", "for", "w", "in", "self", ".", "child_widgets", "(", ")", ":", "mapview", ".", "init_info_window_adapter", "(", ")", "break", "d", "=", "self", ".", "declaration", "if", "d", ".", "show_info", ":", "self", ".", "set_show_info", "(", "d", ".", "show_info", ")", "#: Can free the options now", "del", "self", ".", "options" ]
Convert our options into the actual marker object
[ "Convert", "our", "options", "into", "the", "actual", "marker", "object" ]
python
valid
ladybug-tools/ladybug
ladybug/designday.py
https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/designday.py#L1357-L1365
def to_json(self): """Convert the Sky Condition to a dictionary.""" return { 'solar_model': self.solar_model, 'month': self.month, 'day_of_month': self.day_of_month, 'clearness': self.clearness, 'daylight_savings_indicator': self.daylight_savings_indicator }
[ "def", "to_json", "(", "self", ")", ":", "return", "{", "'solar_model'", ":", "self", ".", "solar_model", ",", "'month'", ":", "self", ".", "month", ",", "'day_of_month'", ":", "self", ".", "day_of_month", ",", "'clearness'", ":", "self", ".", "clearness", ",", "'daylight_savings_indicator'", ":", "self", ".", "daylight_savings_indicator", "}" ]
Convert the Sky Condition to a dictionary.
[ "Convert", "the", "Sky", "Condition", "to", "a", "dictionary", "." ]
python
train
hfaran/progressive
progressive/bar.py
https://github.com/hfaran/progressive/blob/e39c7fb17405dbe997c3417a5993b94ef16dab0a/progressive/bar.py#L169-L184
def full_line_width(self): """Find actual length of bar_str e.g., Progress [ | ] 10/10 """ bar_str_len = sum([ self._indent, ((len(self.title) + 1) if self._title_pos in ["left", "right"] else 0), # Title if present len(self.start_char), self.max_width, # Progress bar len(self.end_char), 1, # Space between end_char and amount_complete_str len(str(self.max_value)) * 2 + 1 # 100/100 ]) return bar_str_len
[ "def", "full_line_width", "(", "self", ")", ":", "bar_str_len", "=", "sum", "(", "[", "self", ".", "_indent", ",", "(", "(", "len", "(", "self", ".", "title", ")", "+", "1", ")", "if", "self", ".", "_title_pos", "in", "[", "\"left\"", ",", "\"right\"", "]", "else", "0", ")", ",", "# Title if present", "len", "(", "self", ".", "start_char", ")", ",", "self", ".", "max_width", ",", "# Progress bar", "len", "(", "self", ".", "end_char", ")", ",", "1", ",", "# Space between end_char and amount_complete_str", "len", "(", "str", "(", "self", ".", "max_value", ")", ")", "*", "2", "+", "1", "# 100/100", "]", ")", "return", "bar_str_len" ]
Find actual length of bar_str e.g., Progress [ | ] 10/10
[ "Find", "actual", "length", "of", "bar_str" ]
python
train
saltstack/salt
salt/modules/drbd.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/drbd.py#L70-L84
def _add_res(line): ''' Analyse the line of local resource of ``drbdadm status`` ''' global resource fields = line.strip().split() if resource: ret.append(resource) resource = {} resource["resource name"] = fields[0] resource["local role"] = fields[1].split(":")[1] resource["local volumes"] = [] resource["peer nodes"] = []
[ "def", "_add_res", "(", "line", ")", ":", "global", "resource", "fields", "=", "line", ".", "strip", "(", ")", ".", "split", "(", ")", "if", "resource", ":", "ret", ".", "append", "(", "resource", ")", "resource", "=", "{", "}", "resource", "[", "\"resource name\"", "]", "=", "fields", "[", "0", "]", "resource", "[", "\"local role\"", "]", "=", "fields", "[", "1", "]", ".", "split", "(", "\":\"", ")", "[", "1", "]", "resource", "[", "\"local volumes\"", "]", "=", "[", "]", "resource", "[", "\"peer nodes\"", "]", "=", "[", "]" ]
Analyse the line of local resource of ``drbdadm status``
[ "Analyse", "the", "line", "of", "local", "resource", "of", "drbdadm", "status" ]
python
train
markovmodel/PyEMMA
pyemma/coordinates/data/util/reader_utils.py
https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/coordinates/data/util/reader_utils.py#L134-L151
def copy_traj_attributes(target, origin, start): """ Inserts certain attributes of origin into target :param target: target trajectory object :param origin: origin trajectory object :param start: :py:obj:`origin` attributes will be inserted in :py:obj:`target` starting at this index :return: target: the md trajectory with the attributes of :py:obj:`origin` inserted """ # The list of copied attributes can be extended here with time # Or perhaps ask the mdtraj guys to implement something similar? stop = start+origin.n_frames target.xyz[start:stop] = origin.xyz target.unitcell_lengths[start:stop] = origin.unitcell_lengths target.unitcell_angles[start:stop] = origin.unitcell_angles target.time[start:stop] = origin.time return target
[ "def", "copy_traj_attributes", "(", "target", ",", "origin", ",", "start", ")", ":", "# The list of copied attributes can be extended here with time", "# Or perhaps ask the mdtraj guys to implement something similar?", "stop", "=", "start", "+", "origin", ".", "n_frames", "target", ".", "xyz", "[", "start", ":", "stop", "]", "=", "origin", ".", "xyz", "target", ".", "unitcell_lengths", "[", "start", ":", "stop", "]", "=", "origin", ".", "unitcell_lengths", "target", ".", "unitcell_angles", "[", "start", ":", "stop", "]", "=", "origin", ".", "unitcell_angles", "target", ".", "time", "[", "start", ":", "stop", "]", "=", "origin", ".", "time", "return", "target" ]
Inserts certain attributes of origin into target :param target: target trajectory object :param origin: origin trajectory object :param start: :py:obj:`origin` attributes will be inserted in :py:obj:`target` starting at this index :return: target: the md trajectory with the attributes of :py:obj:`origin` inserted
[ "Inserts", "certain", "attributes", "of", "origin", "into", "target", ":", "param", "target", ":", "target", "trajectory", "object", ":", "param", "origin", ":", "origin", "trajectory", "object", ":", "param", "start", ":", ":", "py", ":", "obj", ":", "origin", "attributes", "will", "be", "inserted", "in", ":", "py", ":", "obj", ":", "target", "starting", "at", "this", "index", ":", "return", ":", "target", ":", "the", "md", "trajectory", "with", "the", "attributes", "of", ":", "py", ":", "obj", ":", "origin", "inserted" ]
python
train
heronotears/lazyxml
lazyxml/parser.py
https://github.com/heronotears/lazyxml/blob/e3f1ebd3f34cfa03d022ddec90e17d60c1c81953/lazyxml/parser.py#L102-L109
def guess_xml_encoding(self, content): r"""Guess encoding from xml header declaration. :param content: xml content :rtype: str or None """ matchobj = self.__regex['xml_encoding'].match(content) return matchobj and matchobj.group(1).lower()
[ "def", "guess_xml_encoding", "(", "self", ",", "content", ")", ":", "matchobj", "=", "self", ".", "__regex", "[", "'xml_encoding'", "]", ".", "match", "(", "content", ")", "return", "matchobj", "and", "matchobj", ".", "group", "(", "1", ")", ".", "lower", "(", ")" ]
r"""Guess encoding from xml header declaration. :param content: xml content :rtype: str or None
[ "r", "Guess", "encoding", "from", "xml", "header", "declaration", "." ]
python
train
mikedh/trimesh
trimesh/path/path.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/path/path.py#L1381-L1387
def identifier_md5(self): """ Return an MD5 of the identifier """ as_int = (self.identifier * 1e4).astype(np.int64) hashed = util.md5_object(as_int.tostring(order='C')) return hashed
[ "def", "identifier_md5", "(", "self", ")", ":", "as_int", "=", "(", "self", ".", "identifier", "*", "1e4", ")", ".", "astype", "(", "np", ".", "int64", ")", "hashed", "=", "util", ".", "md5_object", "(", "as_int", ".", "tostring", "(", "order", "=", "'C'", ")", ")", "return", "hashed" ]
Return an MD5 of the identifier
[ "Return", "an", "MD5", "of", "the", "identifier" ]
python
train
mmp2/megaman
megaman/utils/k_means_clustering.py
https://github.com/mmp2/megaman/blob/faccaf267aad0a8b18ec8a705735fd9dd838ca1e/megaman/utils/k_means_clustering.py#L49-L74
def orthogonal_initialization(X,K): """ Initialize the centrodis by orthogonal_initialization. Parameters -------------------- X(data): array-like, shape= (m_samples,n_samples) K: integer number of K clusters Returns ------- centroids: array-like, shape (K,n_samples) data_norms: array-like, shape=(1,n_samples) """ N,M = X.shape centroids= X[np.random.randint(0, N-1,1),:] data_norms = np.linalg.norm(X, axis = 1)# contains the norm of each data point, only do this once center_norms = np.linalg.norm(centroids, axis=1) # contains the norms of the centers, will need to be updated when new center added for k in range(1,K): ## Here's where we compute the cosine of the angle between them: # Compute the dot (inner) product between each data point and each center new_center_index,new_center = new_orthogonal_center(X,data_norms,centroids,center_norms =center_norms) centroids = np.vstack((centroids,new_center)) center_norms = np.hstack((center_norms,data_norms[new_center_index])) return centroids,data_norms
[ "def", "orthogonal_initialization", "(", "X", ",", "K", ")", ":", "N", ",", "M", "=", "X", ".", "shape", "centroids", "=", "X", "[", "np", ".", "random", ".", "randint", "(", "0", ",", "N", "-", "1", ",", "1", ")", ",", ":", "]", "data_norms", "=", "np", ".", "linalg", ".", "norm", "(", "X", ",", "axis", "=", "1", ")", "# contains the norm of each data point, only do this once", "center_norms", "=", "np", ".", "linalg", ".", "norm", "(", "centroids", ",", "axis", "=", "1", ")", "# contains the norms of the centers, will need to be updated when new center added", "for", "k", "in", "range", "(", "1", ",", "K", ")", ":", "## Here's where we compute the cosine of the angle between them:", "# Compute the dot (inner) product between each data point and each center", "new_center_index", ",", "new_center", "=", "new_orthogonal_center", "(", "X", ",", "data_norms", ",", "centroids", ",", "center_norms", "=", "center_norms", ")", "centroids", "=", "np", ".", "vstack", "(", "(", "centroids", ",", "new_center", ")", ")", "center_norms", "=", "np", ".", "hstack", "(", "(", "center_norms", ",", "data_norms", "[", "new_center_index", "]", ")", ")", "return", "centroids", ",", "data_norms" ]
Initialize the centrodis by orthogonal_initialization. Parameters -------------------- X(data): array-like, shape= (m_samples,n_samples) K: integer number of K clusters Returns ------- centroids: array-like, shape (K,n_samples) data_norms: array-like, shape=(1,n_samples)
[ "Initialize", "the", "centrodis", "by", "orthogonal_initialization", ".", "Parameters", "--------------------", "X", "(", "data", ")", ":", "array", "-", "like", "shape", "=", "(", "m_samples", "n_samples", ")", "K", ":", "integer", "number", "of", "K", "clusters", "Returns", "-------", "centroids", ":", "array", "-", "like", "shape", "(", "K", "n_samples", ")", "data_norms", ":", "array", "-", "like", "shape", "=", "(", "1", "n_samples", ")" ]
python
train
rosenbrockc/fortpy
fortpy/elements.py
https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/elements.py#L1870-L1881
def charindex(self, line, column): """Gets the absolute character index of the line and column in the continuous string.""" #Make sure that we have chars and lines to work from if this #gets called before linenum() does. if len(self._lines) == 0: self.linenum(1) if line < len(self._chars): return self._chars[line - 1] + column else: return len(self.refstring)
[ "def", "charindex", "(", "self", ",", "line", ",", "column", ")", ":", "#Make sure that we have chars and lines to work from if this", "#gets called before linenum() does.", "if", "len", "(", "self", ".", "_lines", ")", "==", "0", ":", "self", ".", "linenum", "(", "1", ")", "if", "line", "<", "len", "(", "self", ".", "_chars", ")", ":", "return", "self", ".", "_chars", "[", "line", "-", "1", "]", "+", "column", "else", ":", "return", "len", "(", "self", ".", "refstring", ")" ]
Gets the absolute character index of the line and column in the continuous string.
[ "Gets", "the", "absolute", "character", "index", "of", "the", "line", "and", "column", "in", "the", "continuous", "string", "." ]
python
train
tensorflow/probability
tensorflow_probability/python/distributions/variational_gaussian_process.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/variational_gaussian_process.py#L728-L842
def variational_loss(self, observations, observation_index_points=None, kl_weight=1., name='variational_loss'): """Variational loss for the VGP. Given `observations` and `observation_index_points`, compute the negative variational lower bound as specified in [Hensman, 2013][1]. Args: observations: `float` `Tensor` representing collection, or batch of collections, of observations corresponding to `observation_index_points`. Shape has the form `[b1, ..., bB, e]`, which must be brodcastable with the batch and example shapes of `observation_index_points`. The batch shape `[b1, ..., bB]` must be broadcastable with the shapes of all other batched parameters (`kernel.batch_shape`, `observation_index_points`, etc.). observation_index_points: `float` `Tensor` representing finite (batch of) vector(s) of points where observations are defined. Shape has the form `[b1, ..., bB, e1, f1, ..., fF]` where `F` is the number of feature dimensions and must equal `kernel.feature_ndims` and `e1` is the number (size) of index points in each batch (we denote it `e1` to distinguish it from the numer of inducing index points, denoted `e2` below). If set to `None` uses `index_points` as the origin for observations. Default value: None. kl_weight: Amount by which to scale the KL divergence loss between prior and posterior. Default value: 1. name: Python `str` name prefixed to Ops created by this class. Default value: "GaussianProcess". Returns: loss: Scalar tensor representing the negative variational lower bound. Can be directly used in a `tf.Optimizer`. Raises: ValueError: if `mean_fn` is not `None` and is not callable. #### References [1]: Hensman, J., Lawrence, N. "Gaussian Processes for Big Data", 2013 https://arxiv.org/abs/1309.6835 """ with tf.name_scope(name or 'variational_gp_loss'): if observation_index_points is None: observation_index_points = self._index_points observation_index_points = tf.convert_to_tensor( value=observation_index_points, dtype=self._dtype, name='observation_index_points') observations = tf.convert_to_tensor( value=observations, dtype=self._dtype, name='observations') kl_weight = tf.convert_to_tensor( value=kl_weight, dtype=self._dtype, name='kl_weight') # The variational loss is a negative ELBO. The ELBO can be broken down # into three terms: # 1. a likelihood term # 2. a trace term arising from the covariance of the posterior predictive kzx = self.kernel.matrix(self._inducing_index_points, observation_index_points) kzx_linop = tf.linalg.LinearOperatorFullMatrix(kzx) loc = (self._mean_fn(observation_index_points) + kzx_linop.matvec(self._kzz_inv_varloc, adjoint=True)) likelihood = independent.Independent( normal.Normal( loc=loc, scale=tf.sqrt(self._observation_noise_variance + self._jitter), name='NormalLikelihood'), reinterpreted_batch_ndims=1) obs_ll = likelihood.log_prob(observations) chol_kzz_linop = tf.linalg.LinearOperatorLowerTriangular(self._chol_kzz) chol_kzz_inv_kzx = chol_kzz_linop.solve(kzx) kzz_inv_kzx = chol_kzz_linop.solve(chol_kzz_inv_kzx, adjoint=True) kxx_diag = tf.linalg.diag_part( self.kernel.matrix( observation_index_points, observation_index_points)) ktilde_trace_term = ( tf.reduce_sum(input_tensor=kxx_diag, axis=-1) - tf.reduce_sum(input_tensor=chol_kzz_inv_kzx ** 2, axis=[-2, -1])) # Tr(SB) # where S = A A.T, A = variational_inducing_observations_scale # and B = Kzz^-1 Kzx Kzx.T Kzz^-1 # # Now Tr(SB) = Tr(A A.T Kzz^-1 Kzx Kzx.T Kzz^-1) # = Tr(A.T Kzz^-1 Kzx Kzx.T Kzz^-1 A) # = sum_ij (A.T Kzz^-1 Kzx)_{ij}^2 other_trace_term = tf.reduce_sum( input_tensor=( self._variational_inducing_observations_posterior.scale.matmul( kzz_inv_kzx) ** 2), axis=[-2, -1]) trace_term = (.5 * (ktilde_trace_term + other_trace_term) / self._observation_noise_variance) inducing_prior = gaussian_process.GaussianProcess( kernel=self._kernel, mean_fn=self._mean_fn, index_points=self._inducing_index_points, observation_noise_variance=self._observation_noise_variance) kl_term = kl_weight * kullback_leibler.kl_divergence( self._variational_inducing_observations_posterior, inducing_prior) lower_bound = (obs_ll - trace_term - kl_term) return -tf.reduce_mean(input_tensor=lower_bound)
[ "def", "variational_loss", "(", "self", ",", "observations", ",", "observation_index_points", "=", "None", ",", "kl_weight", "=", "1.", ",", "name", "=", "'variational_loss'", ")", ":", "with", "tf", ".", "name_scope", "(", "name", "or", "'variational_gp_loss'", ")", ":", "if", "observation_index_points", "is", "None", ":", "observation_index_points", "=", "self", ".", "_index_points", "observation_index_points", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "observation_index_points", ",", "dtype", "=", "self", ".", "_dtype", ",", "name", "=", "'observation_index_points'", ")", "observations", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "observations", ",", "dtype", "=", "self", ".", "_dtype", ",", "name", "=", "'observations'", ")", "kl_weight", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "kl_weight", ",", "dtype", "=", "self", ".", "_dtype", ",", "name", "=", "'kl_weight'", ")", "# The variational loss is a negative ELBO. The ELBO can be broken down", "# into three terms:", "# 1. a likelihood term", "# 2. a trace term arising from the covariance of the posterior predictive", "kzx", "=", "self", ".", "kernel", ".", "matrix", "(", "self", ".", "_inducing_index_points", ",", "observation_index_points", ")", "kzx_linop", "=", "tf", ".", "linalg", ".", "LinearOperatorFullMatrix", "(", "kzx", ")", "loc", "=", "(", "self", ".", "_mean_fn", "(", "observation_index_points", ")", "+", "kzx_linop", ".", "matvec", "(", "self", ".", "_kzz_inv_varloc", ",", "adjoint", "=", "True", ")", ")", "likelihood", "=", "independent", ".", "Independent", "(", "normal", ".", "Normal", "(", "loc", "=", "loc", ",", "scale", "=", "tf", ".", "sqrt", "(", "self", ".", "_observation_noise_variance", "+", "self", ".", "_jitter", ")", ",", "name", "=", "'NormalLikelihood'", ")", ",", "reinterpreted_batch_ndims", "=", "1", ")", "obs_ll", "=", "likelihood", ".", "log_prob", "(", "observations", ")", "chol_kzz_linop", "=", "tf", ".", "linalg", ".", "LinearOperatorLowerTriangular", "(", "self", ".", "_chol_kzz", ")", "chol_kzz_inv_kzx", "=", "chol_kzz_linop", ".", "solve", "(", "kzx", ")", "kzz_inv_kzx", "=", "chol_kzz_linop", ".", "solve", "(", "chol_kzz_inv_kzx", ",", "adjoint", "=", "True", ")", "kxx_diag", "=", "tf", ".", "linalg", ".", "diag_part", "(", "self", ".", "kernel", ".", "matrix", "(", "observation_index_points", ",", "observation_index_points", ")", ")", "ktilde_trace_term", "=", "(", "tf", ".", "reduce_sum", "(", "input_tensor", "=", "kxx_diag", ",", "axis", "=", "-", "1", ")", "-", "tf", ".", "reduce_sum", "(", "input_tensor", "=", "chol_kzz_inv_kzx", "**", "2", ",", "axis", "=", "[", "-", "2", ",", "-", "1", "]", ")", ")", "# Tr(SB)", "# where S = A A.T, A = variational_inducing_observations_scale", "# and B = Kzz^-1 Kzx Kzx.T Kzz^-1", "#", "# Now Tr(SB) = Tr(A A.T Kzz^-1 Kzx Kzx.T Kzz^-1)", "# = Tr(A.T Kzz^-1 Kzx Kzx.T Kzz^-1 A)", "# = sum_ij (A.T Kzz^-1 Kzx)_{ij}^2", "other_trace_term", "=", "tf", ".", "reduce_sum", "(", "input_tensor", "=", "(", "self", ".", "_variational_inducing_observations_posterior", ".", "scale", ".", "matmul", "(", "kzz_inv_kzx", ")", "**", "2", ")", ",", "axis", "=", "[", "-", "2", ",", "-", "1", "]", ")", "trace_term", "=", "(", ".5", "*", "(", "ktilde_trace_term", "+", "other_trace_term", ")", "/", "self", ".", "_observation_noise_variance", ")", "inducing_prior", "=", "gaussian_process", ".", "GaussianProcess", "(", "kernel", "=", "self", ".", "_kernel", ",", "mean_fn", "=", "self", ".", "_mean_fn", ",", "index_points", "=", "self", ".", "_inducing_index_points", ",", "observation_noise_variance", "=", "self", ".", "_observation_noise_variance", ")", "kl_term", "=", "kl_weight", "*", "kullback_leibler", ".", "kl_divergence", "(", "self", ".", "_variational_inducing_observations_posterior", ",", "inducing_prior", ")", "lower_bound", "=", "(", "obs_ll", "-", "trace_term", "-", "kl_term", ")", "return", "-", "tf", ".", "reduce_mean", "(", "input_tensor", "=", "lower_bound", ")" ]
Variational loss for the VGP. Given `observations` and `observation_index_points`, compute the negative variational lower bound as specified in [Hensman, 2013][1]. Args: observations: `float` `Tensor` representing collection, or batch of collections, of observations corresponding to `observation_index_points`. Shape has the form `[b1, ..., bB, e]`, which must be brodcastable with the batch and example shapes of `observation_index_points`. The batch shape `[b1, ..., bB]` must be broadcastable with the shapes of all other batched parameters (`kernel.batch_shape`, `observation_index_points`, etc.). observation_index_points: `float` `Tensor` representing finite (batch of) vector(s) of points where observations are defined. Shape has the form `[b1, ..., bB, e1, f1, ..., fF]` where `F` is the number of feature dimensions and must equal `kernel.feature_ndims` and `e1` is the number (size) of index points in each batch (we denote it `e1` to distinguish it from the numer of inducing index points, denoted `e2` below). If set to `None` uses `index_points` as the origin for observations. Default value: None. kl_weight: Amount by which to scale the KL divergence loss between prior and posterior. Default value: 1. name: Python `str` name prefixed to Ops created by this class. Default value: "GaussianProcess". Returns: loss: Scalar tensor representing the negative variational lower bound. Can be directly used in a `tf.Optimizer`. Raises: ValueError: if `mean_fn` is not `None` and is not callable. #### References [1]: Hensman, J., Lawrence, N. "Gaussian Processes for Big Data", 2013 https://arxiv.org/abs/1309.6835
[ "Variational", "loss", "for", "the", "VGP", "." ]
python
test
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_0/identity/identity_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/identity/identity_client.py#L148-L184
def read_identities(self, descriptors=None, identity_ids=None, subject_descriptors=None, search_filter=None, filter_value=None, query_membership=None, properties=None, include_restricted_visibility=None, options=None): """ReadIdentities. :param str descriptors: :param str identity_ids: :param str subject_descriptors: :param str search_filter: :param str filter_value: :param str query_membership: :param str properties: :param bool include_restricted_visibility: :param str options: :rtype: [Identity] """ query_parameters = {} if descriptors is not None: query_parameters['descriptors'] = self._serialize.query('descriptors', descriptors, 'str') if identity_ids is not None: query_parameters['identityIds'] = self._serialize.query('identity_ids', identity_ids, 'str') if subject_descriptors is not None: query_parameters['subjectDescriptors'] = self._serialize.query('subject_descriptors', subject_descriptors, 'str') if search_filter is not None: query_parameters['searchFilter'] = self._serialize.query('search_filter', search_filter, 'str') if filter_value is not None: query_parameters['filterValue'] = self._serialize.query('filter_value', filter_value, 'str') if query_membership is not None: query_parameters['queryMembership'] = self._serialize.query('query_membership', query_membership, 'str') if properties is not None: query_parameters['properties'] = self._serialize.query('properties', properties, 'str') if include_restricted_visibility is not None: query_parameters['includeRestrictedVisibility'] = self._serialize.query('include_restricted_visibility', include_restricted_visibility, 'bool') if options is not None: query_parameters['options'] = self._serialize.query('options', options, 'str') response = self._send(http_method='GET', location_id='28010c54-d0c0-4c89-a5b0-1c9e188b9fb7', version='5.0', query_parameters=query_parameters) return self._deserialize('[Identity]', self._unwrap_collection(response))
[ "def", "read_identities", "(", "self", ",", "descriptors", "=", "None", ",", "identity_ids", "=", "None", ",", "subject_descriptors", "=", "None", ",", "search_filter", "=", "None", ",", "filter_value", "=", "None", ",", "query_membership", "=", "None", ",", "properties", "=", "None", ",", "include_restricted_visibility", "=", "None", ",", "options", "=", "None", ")", ":", "query_parameters", "=", "{", "}", "if", "descriptors", "is", "not", "None", ":", "query_parameters", "[", "'descriptors'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'descriptors'", ",", "descriptors", ",", "'str'", ")", "if", "identity_ids", "is", "not", "None", ":", "query_parameters", "[", "'identityIds'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'identity_ids'", ",", "identity_ids", ",", "'str'", ")", "if", "subject_descriptors", "is", "not", "None", ":", "query_parameters", "[", "'subjectDescriptors'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'subject_descriptors'", ",", "subject_descriptors", ",", "'str'", ")", "if", "search_filter", "is", "not", "None", ":", "query_parameters", "[", "'searchFilter'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'search_filter'", ",", "search_filter", ",", "'str'", ")", "if", "filter_value", "is", "not", "None", ":", "query_parameters", "[", "'filterValue'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'filter_value'", ",", "filter_value", ",", "'str'", ")", "if", "query_membership", "is", "not", "None", ":", "query_parameters", "[", "'queryMembership'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'query_membership'", ",", "query_membership", ",", "'str'", ")", "if", "properties", "is", "not", "None", ":", "query_parameters", "[", "'properties'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'properties'", ",", "properties", ",", "'str'", ")", "if", "include_restricted_visibility", "is", "not", "None", ":", "query_parameters", "[", "'includeRestrictedVisibility'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'include_restricted_visibility'", ",", "include_restricted_visibility", ",", "'bool'", ")", "if", "options", "is", "not", "None", ":", "query_parameters", "[", "'options'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'options'", ",", "options", ",", "'str'", ")", "response", "=", "self", ".", "_send", "(", "http_method", "=", "'GET'", ",", "location_id", "=", "'28010c54-d0c0-4c89-a5b0-1c9e188b9fb7'", ",", "version", "=", "'5.0'", ",", "query_parameters", "=", "query_parameters", ")", "return", "self", ".", "_deserialize", "(", "'[Identity]'", ",", "self", ".", "_unwrap_collection", "(", "response", ")", ")" ]
ReadIdentities. :param str descriptors: :param str identity_ids: :param str subject_descriptors: :param str search_filter: :param str filter_value: :param str query_membership: :param str properties: :param bool include_restricted_visibility: :param str options: :rtype: [Identity]
[ "ReadIdentities", ".", ":", "param", "str", "descriptors", ":", ":", "param", "str", "identity_ids", ":", ":", "param", "str", "subject_descriptors", ":", ":", "param", "str", "search_filter", ":", ":", "param", "str", "filter_value", ":", ":", "param", "str", "query_membership", ":", ":", "param", "str", "properties", ":", ":", "param", "bool", "include_restricted_visibility", ":", ":", "param", "str", "options", ":", ":", "rtype", ":", "[", "Identity", "]" ]
python
train
tanghaibao/goatools
goatools/cli/compare_gos.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/cli/compare_gos.py#L129-L135
def _get_fncsortnt(flds): """Return a sort function for sorting header GO IDs found in sections.""" if 'tinfo' in flds: return lambda ntgo: [ntgo.NS, -1*ntgo.tinfo, ntgo.depth, ntgo.alt] if 'dcnt' in flds: return lambda ntgo: [ntgo.NS, -1*ntgo.dcnt, ntgo.depth, ntgo.alt] return lambda ntgo: [ntgo.NS, -1*ntgo.depth, ntgo.alt]
[ "def", "_get_fncsortnt", "(", "flds", ")", ":", "if", "'tinfo'", "in", "flds", ":", "return", "lambda", "ntgo", ":", "[", "ntgo", ".", "NS", ",", "-", "1", "*", "ntgo", ".", "tinfo", ",", "ntgo", ".", "depth", ",", "ntgo", ".", "alt", "]", "if", "'dcnt'", "in", "flds", ":", "return", "lambda", "ntgo", ":", "[", "ntgo", ".", "NS", ",", "-", "1", "*", "ntgo", ".", "dcnt", ",", "ntgo", ".", "depth", ",", "ntgo", ".", "alt", "]", "return", "lambda", "ntgo", ":", "[", "ntgo", ".", "NS", ",", "-", "1", "*", "ntgo", ".", "depth", ",", "ntgo", ".", "alt", "]" ]
Return a sort function for sorting header GO IDs found in sections.
[ "Return", "a", "sort", "function", "for", "sorting", "header", "GO", "IDs", "found", "in", "sections", "." ]
python
train
tensorlayer/tensorlayer
examples/data_process/tutorial_tfrecord3.py
https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/examples/data_process/tutorial_tfrecord3.py#L41-L45
def _int64_feature_list(values): """Wrapper for inserting an int64 FeatureList into a SequenceExample proto, e.g, sentence in list of ints """ return tf.train.FeatureList(feature=[_int64_feature(v) for v in values])
[ "def", "_int64_feature_list", "(", "values", ")", ":", "return", "tf", ".", "train", ".", "FeatureList", "(", "feature", "=", "[", "_int64_feature", "(", "v", ")", "for", "v", "in", "values", "]", ")" ]
Wrapper for inserting an int64 FeatureList into a SequenceExample proto, e.g, sentence in list of ints
[ "Wrapper", "for", "inserting", "an", "int64", "FeatureList", "into", "a", "SequenceExample", "proto", "e", ".", "g", "sentence", "in", "list", "of", "ints" ]
python
valid
hermanschaaf/mafan
mafan/third_party/jianfan/__init__.py
https://github.com/hermanschaaf/mafan/blob/373ddf299aeb2bd8413bf921c71768af7a8170ea/mafan/third_party/jianfan/__init__.py#L23-L45
def _t(unistr, charset_from, charset_to): """ This is a unexposed function, is responsibility for translation internal. """ # if type(unistr) is str: # try: # unistr = unistr.decode('utf-8') # # Python 3 returns AttributeError when .decode() is called on a str # # This means it is already unicode. # except AttributeError: # pass # try: # if type(unistr) is not unicode: # return unistr # # Python 3 returns NameError because unicode is not a type. # except NameError: # pass chars = [] for c in unistr: idx = charset_from.find(c) chars.append(charset_to[idx] if idx!=-1 else c) return u''.join(chars)
[ "def", "_t", "(", "unistr", ",", "charset_from", ",", "charset_to", ")", ":", "# if type(unistr) is str:", "# try:", "# unistr = unistr.decode('utf-8')", "# # Python 3 returns AttributeError when .decode() is called on a str", "# # This means it is already unicode.", "# except AttributeError:", "# pass", "# try:", "# if type(unistr) is not unicode:", "# return unistr", "# # Python 3 returns NameError because unicode is not a type.", "# except NameError:", "# pass", "chars", "=", "[", "]", "for", "c", "in", "unistr", ":", "idx", "=", "charset_from", ".", "find", "(", "c", ")", "chars", ".", "append", "(", "charset_to", "[", "idx", "]", "if", "idx", "!=", "-", "1", "else", "c", ")", "return", "u''", ".", "join", "(", "chars", ")" ]
This is a unexposed function, is responsibility for translation internal.
[ "This", "is", "a", "unexposed", "function", "is", "responsibility", "for", "translation", "internal", "." ]
python
train
xflr6/bitsets
bitsets/transform.py
https://github.com/xflr6/bitsets/blob/ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf/bitsets/transform.py#L106-L116
def unpackbools(integers, dtype='L'): """Yield booleans unpacking integers of dtype bit-length. >>> list(unpackbools([42], 'B')) [False, True, False, True, False, True, False, False] """ atoms = ATOMS[dtype] for chunk in integers: for a in atoms: yield not not chunk & a
[ "def", "unpackbools", "(", "integers", ",", "dtype", "=", "'L'", ")", ":", "atoms", "=", "ATOMS", "[", "dtype", "]", "for", "chunk", "in", "integers", ":", "for", "a", "in", "atoms", ":", "yield", "not", "not", "chunk", "&", "a" ]
Yield booleans unpacking integers of dtype bit-length. >>> list(unpackbools([42], 'B')) [False, True, False, True, False, True, False, False]
[ "Yield", "booleans", "unpacking", "integers", "of", "dtype", "bit", "-", "length", "." ]
python
train
OSSOS/MOP
src/ossos/core/ossos/planning/obs_planner.py
https://github.com/OSSOS/MOP/blob/94f91d32ad5ec081d5a1ebd67604a838003465af/src/ossos/core/ossos/planning/obs_planner.py#L893-L1058
def save_pointings(self): """Print the currently defined FOVs""" i = 0 if self.pointing_format.get() in ['GEMINI ET', 'CFHT ET', 'CFHT API']: logging.info('Beginning table pointing save.') for pointing in self.pointings: name = pointing["label"]["text"] camera = pointing["camera"] ccds = numpy.radians(camera.geometry) polygons = [] for ccd in ccds: polygon = Polygon.Polygon(((ccd[0], ccd[1]), (ccd[0], ccd[3]), (ccd[2], ccd[3]), (ccd[2], ccd[1]), (ccd[0], ccd[1]))) polygons.append(polygon) et = EphemTarget(name, ephem_format=self.pointing_format.get()) # determine the mean motion of target KBOs in this field. field_kbos = [] center_ra = 0 center_dec = 0 pointing_date = mpc.Time(self.date.get(), scale='utc') start_date = mpc.Time(self.date.get(), scale='utc') - TimeDelta(8.1*units.day) end_date = start_date + TimeDelta(17*units.day) time_step = TimeDelta(3.0*units.hour) # Compute the mean position of KBOs in the field on current date. for kbo_name, kbo in self.kbos.items(): if kbo_name in Neptune or kbo_name in tracking_termination: print 'skipping', kbo_name continue kbo.predict(pointing_date) ra = kbo.coordinate.ra dec = kbo.coordinate.dec if kbo_name in name: print "{} matches pointing {} by name, adding to field.".format(kbo_name, name) field_kbos.append(kbo) center_ra += ra.radian center_dec += dec.radian else: for polygon in polygons: if polygon.isInside(ra.radian, dec.radian): print "{} inside pointing {} polygon, adding to field.".format(kbo_name, name) field_kbos.append(kbo) center_ra += ra.radian center_dec += dec.radian # logging.critical("KBOs in field {0}: {1}".format(name, ', '.join([n.name for n in field_kbos]))) today = start_date while today < end_date: today += time_step mean_motion = (0, 0) max_mag = 0.0 if len(field_kbos) > 0: current_ra = 0 current_dec = 0 for kbo in field_kbos: kbo.predict(today) max_mag = max(max_mag, kbo.mag) current_ra += kbo.coordinate.ra.radian current_dec += kbo.coordinate.dec.radian mean_motion = ((current_ra - center_ra) / len(field_kbos), (current_dec - center_dec) / len(field_kbos)) ra = pointing['camera'].coordinate.ra.radian + mean_motion[0] dec = pointing['camera'].coordinate.dec.radian + mean_motion[1] cc = SkyCoord(ra=ra, dec=dec, unit=(units.radian, units.radian), obstime=today) dt = pointing_date - today cc.dra = (mean_motion[0] * units.radian / dt.to(units.hour)).to(units.arcsec/units.hour).value*math.cos(dec) cc.ddec = (mean_motion[1] * units.radian / dt.to(units.hour)).to(units.arcsec/units.hour).value cc.mag = max_mag et.append(cc) et.save() return f = tkFileDialog.asksaveasfile() if self.pointing_format.get() == 'Subaru': for pointing in self.pointings: (sra, sdec) = str(pointing["camera"]).split() ra = sra.replace(":", "") dec = sdec.replace(":", "") name = pointing["label"]["text"] f.write("""{}=OBJECT="{}" RA={} DEC={} EQUINOX=2000.0 INSROT_PA=90\n""".format(name, name, ra, dec)) return if self.pointing_format.get() == 'CFHT PH': f.write("""<?xml version = "1.0"?> <!DOCTYPE ASTRO SYSTEM "http://vizier.u-strasbg.fr/xml/astrores.dtd"> <ASTRO ID="v0.8" xmlns:ASTRO="http://vizier.u-strasbg.fr/doc/astrores.htx"> <TABLE ID="Table"> <NAME>Fixed Targets</NAME> <TITLE>Fixed Targets for CFHT QSO</TITLE> <!-- Definition of each field --> <FIELD name="NAME" datatype="A" width="20"> <DESCRIPTION>Name of target</DESCRIPTION> </FIELD> <FIELD name="RA" ref="" datatype="A" width="11" unit="&quot;h:m:s&quot;"> <DESCRIPTION>Right ascension of target</DESCRIPTION> </FIELD> <FIELD name="DEC" ref="" datatype="A" width="11" unit="&quot;d:m:s&quot;"> <DESCRIPTION>Declination of target</DESCRIPTION> </FIELD> <FIELD name="EPOCH" datatype="F" width="6"> <DESCRIPTION>Epoch of coordinates</DESCRIPTION> </FIELD> <FIELD name="POINT" datatype="A" width="5"> <DESCRIPTION>Pointing name</DESCRIPTION> </FIELD> <!-- Data table --> <DATA><CSV headlines="4" colsep="|"><![CDATA[ NAME |RA |DEC |EPOCH |POINT| |hh:mm:ss.ss|+dd:mm:ss.s| | | 12345678901234567890|12345678901|12345678901|123456|12345| --------------------|-----------|-----------|------|-----|\n""") if self.pointing_format.get() == 'Palomar': f.write("index\n") for pointing in self.pointings: i = i + 1 name = pointing["label"]["text"] (sra, sdec) = str(pointing["camera"]).split() ra = sra.split(":") dec = sdec.split(":") dec[0] = str(int(dec[0])) if int(dec[0]) >= 0: dec[0] = '+' + dec[0] if self.pointing_format.get() == 'Palomar': f.write("%5d %16s %2s %2s %4s %3s %2s %4s 2000\n" % (i, name, ra[0].zfill(2), ra[1].zfill(2), ra[2].zfill(2), dec[0].zfill(3), dec[1].zfill(2), dec[2].zfill(2))) elif self.pointing_format.get() == 'CFHT PH': # f.write("%f %f\n" % (pointing["camera"].ra,pointing["camera"].dec)) f.write("%-20s|%11s|%11s|%6.1f|%-5d|\n" % (name, sra, sdec, 2000.0, 1)) elif self.pointing_format.get() == 'KPNO/CTIO': str1 = sra.replace(":", " ") str2 = sdec.replace(":", " ") f.write("%16s %16s %16s 2000\n" % ( name, str1, str2)) elif self.pointing_format.get() == 'SSim': ra = [] dec = [] for ccd in pointing["camera"].geometry: ra.append(ccd[0]) ra.append(ccd[2]) dec.append(ccd[1]) dec.append(ccd[3]) dra = math.degrees(math.fabs(max(ra) - min(ra))) ddec = math.degrees(math.fabs(max(dec) - min(dec))) f.write("%f %f %16s %16s DATE 1.00 1.00 500 FILE\n" % (dra, ddec, sra, sdec )) if self.pointing_format.get() == 'CFHT PH': f.write("""]]</CSV></DATA> </TABLE> </ASTRO> """) f.close()
[ "def", "save_pointings", "(", "self", ")", ":", "i", "=", "0", "if", "self", ".", "pointing_format", ".", "get", "(", ")", "in", "[", "'GEMINI ET'", ",", "'CFHT ET'", ",", "'CFHT API'", "]", ":", "logging", ".", "info", "(", "'Beginning table pointing save.'", ")", "for", "pointing", "in", "self", ".", "pointings", ":", "name", "=", "pointing", "[", "\"label\"", "]", "[", "\"text\"", "]", "camera", "=", "pointing", "[", "\"camera\"", "]", "ccds", "=", "numpy", ".", "radians", "(", "camera", ".", "geometry", ")", "polygons", "=", "[", "]", "for", "ccd", "in", "ccds", ":", "polygon", "=", "Polygon", ".", "Polygon", "(", "(", "(", "ccd", "[", "0", "]", ",", "ccd", "[", "1", "]", ")", ",", "(", "ccd", "[", "0", "]", ",", "ccd", "[", "3", "]", ")", ",", "(", "ccd", "[", "2", "]", ",", "ccd", "[", "3", "]", ")", ",", "(", "ccd", "[", "2", "]", ",", "ccd", "[", "1", "]", ")", ",", "(", "ccd", "[", "0", "]", ",", "ccd", "[", "1", "]", ")", ")", ")", "polygons", ".", "append", "(", "polygon", ")", "et", "=", "EphemTarget", "(", "name", ",", "ephem_format", "=", "self", ".", "pointing_format", ".", "get", "(", ")", ")", "# determine the mean motion of target KBOs in this field.", "field_kbos", "=", "[", "]", "center_ra", "=", "0", "center_dec", "=", "0", "pointing_date", "=", "mpc", ".", "Time", "(", "self", ".", "date", ".", "get", "(", ")", ",", "scale", "=", "'utc'", ")", "start_date", "=", "mpc", ".", "Time", "(", "self", ".", "date", ".", "get", "(", ")", ",", "scale", "=", "'utc'", ")", "-", "TimeDelta", "(", "8.1", "*", "units", ".", "day", ")", "end_date", "=", "start_date", "+", "TimeDelta", "(", "17", "*", "units", ".", "day", ")", "time_step", "=", "TimeDelta", "(", "3.0", "*", "units", ".", "hour", ")", "# Compute the mean position of KBOs in the field on current date.", "for", "kbo_name", ",", "kbo", "in", "self", ".", "kbos", ".", "items", "(", ")", ":", "if", "kbo_name", "in", "Neptune", "or", "kbo_name", "in", "tracking_termination", ":", "print", "'skipping'", ",", "kbo_name", "continue", "kbo", ".", "predict", "(", "pointing_date", ")", "ra", "=", "kbo", ".", "coordinate", ".", "ra", "dec", "=", "kbo", ".", "coordinate", ".", "dec", "if", "kbo_name", "in", "name", ":", "print", "\"{} matches pointing {} by name, adding to field.\"", ".", "format", "(", "kbo_name", ",", "name", ")", "field_kbos", ".", "append", "(", "kbo", ")", "center_ra", "+=", "ra", ".", "radian", "center_dec", "+=", "dec", ".", "radian", "else", ":", "for", "polygon", "in", "polygons", ":", "if", "polygon", ".", "isInside", "(", "ra", ".", "radian", ",", "dec", ".", "radian", ")", ":", "print", "\"{} inside pointing {} polygon, adding to field.\"", ".", "format", "(", "kbo_name", ",", "name", ")", "field_kbos", ".", "append", "(", "kbo", ")", "center_ra", "+=", "ra", ".", "radian", "center_dec", "+=", "dec", ".", "radian", "# logging.critical(\"KBOs in field {0}: {1}\".format(name, ', '.join([n.name for n in field_kbos])))", "today", "=", "start_date", "while", "today", "<", "end_date", ":", "today", "+=", "time_step", "mean_motion", "=", "(", "0", ",", "0", ")", "max_mag", "=", "0.0", "if", "len", "(", "field_kbos", ")", ">", "0", ":", "current_ra", "=", "0", "current_dec", "=", "0", "for", "kbo", "in", "field_kbos", ":", "kbo", ".", "predict", "(", "today", ")", "max_mag", "=", "max", "(", "max_mag", ",", "kbo", ".", "mag", ")", "current_ra", "+=", "kbo", ".", "coordinate", ".", "ra", ".", "radian", "current_dec", "+=", "kbo", ".", "coordinate", ".", "dec", ".", "radian", "mean_motion", "=", "(", "(", "current_ra", "-", "center_ra", ")", "/", "len", "(", "field_kbos", ")", ",", "(", "current_dec", "-", "center_dec", ")", "/", "len", "(", "field_kbos", ")", ")", "ra", "=", "pointing", "[", "'camera'", "]", ".", "coordinate", ".", "ra", ".", "radian", "+", "mean_motion", "[", "0", "]", "dec", "=", "pointing", "[", "'camera'", "]", ".", "coordinate", ".", "dec", ".", "radian", "+", "mean_motion", "[", "1", "]", "cc", "=", "SkyCoord", "(", "ra", "=", "ra", ",", "dec", "=", "dec", ",", "unit", "=", "(", "units", ".", "radian", ",", "units", ".", "radian", ")", ",", "obstime", "=", "today", ")", "dt", "=", "pointing_date", "-", "today", "cc", ".", "dra", "=", "(", "mean_motion", "[", "0", "]", "*", "units", ".", "radian", "/", "dt", ".", "to", "(", "units", ".", "hour", ")", ")", ".", "to", "(", "units", ".", "arcsec", "/", "units", ".", "hour", ")", ".", "value", "*", "math", ".", "cos", "(", "dec", ")", "cc", ".", "ddec", "=", "(", "mean_motion", "[", "1", "]", "*", "units", ".", "radian", "/", "dt", ".", "to", "(", "units", ".", "hour", ")", ")", ".", "to", "(", "units", ".", "arcsec", "/", "units", ".", "hour", ")", ".", "value", "cc", ".", "mag", "=", "max_mag", "et", ".", "append", "(", "cc", ")", "et", ".", "save", "(", ")", "return", "f", "=", "tkFileDialog", ".", "asksaveasfile", "(", ")", "if", "self", ".", "pointing_format", ".", "get", "(", ")", "==", "'Subaru'", ":", "for", "pointing", "in", "self", ".", "pointings", ":", "(", "sra", ",", "sdec", ")", "=", "str", "(", "pointing", "[", "\"camera\"", "]", ")", ".", "split", "(", ")", "ra", "=", "sra", ".", "replace", "(", "\":\"", ",", "\"\"", ")", "dec", "=", "sdec", ".", "replace", "(", "\":\"", ",", "\"\"", ")", "name", "=", "pointing", "[", "\"label\"", "]", "[", "\"text\"", "]", "f", ".", "write", "(", "\"\"\"{}=OBJECT=\"{}\" RA={} DEC={} EQUINOX=2000.0 INSROT_PA=90\\n\"\"\"", ".", "format", "(", "name", ",", "name", ",", "ra", ",", "dec", ")", ")", "return", "if", "self", ".", "pointing_format", ".", "get", "(", ")", "==", "'CFHT PH'", ":", "f", ".", "write", "(", "\"\"\"<?xml version = \"1.0\"?>\n<!DOCTYPE ASTRO SYSTEM \"http://vizier.u-strasbg.fr/xml/astrores.dtd\">\n<ASTRO ID=\"v0.8\" xmlns:ASTRO=\"http://vizier.u-strasbg.fr/doc/astrores.htx\">\n<TABLE ID=\"Table\">\n<NAME>Fixed Targets</NAME>\n<TITLE>Fixed Targets for CFHT QSO</TITLE>\n<!-- Definition of each field -->\n<FIELD name=\"NAME\" datatype=\"A\" width=\"20\">\n <DESCRIPTION>Name of target</DESCRIPTION>\n</FIELD>\n<FIELD name=\"RA\" ref=\"\" datatype=\"A\" width=\"11\" unit=\"&quot;h:m:s&quot;\">\n <DESCRIPTION>Right ascension of target</DESCRIPTION>\n</FIELD> \n<FIELD name=\"DEC\" ref=\"\" datatype=\"A\" width=\"11\" unit=\"&quot;d:m:s&quot;\">\n <DESCRIPTION>Declination of target</DESCRIPTION>\n</FIELD> \n<FIELD name=\"EPOCH\" datatype=\"F\" width=\"6\">\n <DESCRIPTION>Epoch of coordinates</DESCRIPTION>\n</FIELD> \n<FIELD name=\"POINT\" datatype=\"A\" width=\"5\">\n<DESCRIPTION>Pointing name</DESCRIPTION>\n</FIELD> \n<!-- Data table --> \n<DATA><CSV headlines=\"4\" colsep=\"|\"><![CDATA[\nNAME |RA |DEC |EPOCH |POINT|\n |hh:mm:ss.ss|+dd:mm:ss.s| | |\n12345678901234567890|12345678901|12345678901|123456|12345|\n--------------------|-----------|-----------|------|-----|\\n\"\"\"", ")", "if", "self", ".", "pointing_format", ".", "get", "(", ")", "==", "'Palomar'", ":", "f", ".", "write", "(", "\"index\\n\"", ")", "for", "pointing", "in", "self", ".", "pointings", ":", "i", "=", "i", "+", "1", "name", "=", "pointing", "[", "\"label\"", "]", "[", "\"text\"", "]", "(", "sra", ",", "sdec", ")", "=", "str", "(", "pointing", "[", "\"camera\"", "]", ")", ".", "split", "(", ")", "ra", "=", "sra", ".", "split", "(", "\":\"", ")", "dec", "=", "sdec", ".", "split", "(", "\":\"", ")", "dec", "[", "0", "]", "=", "str", "(", "int", "(", "dec", "[", "0", "]", ")", ")", "if", "int", "(", "dec", "[", "0", "]", ")", ">=", "0", ":", "dec", "[", "0", "]", "=", "'+'", "+", "dec", "[", "0", "]", "if", "self", ".", "pointing_format", ".", "get", "(", ")", "==", "'Palomar'", ":", "f", ".", "write", "(", "\"%5d %16s %2s %2s %4s %3s %2s %4s 2000\\n\"", "%", "(", "i", ",", "name", ",", "ra", "[", "0", "]", ".", "zfill", "(", "2", ")", ",", "ra", "[", "1", "]", ".", "zfill", "(", "2", ")", ",", "ra", "[", "2", "]", ".", "zfill", "(", "2", ")", ",", "dec", "[", "0", "]", ".", "zfill", "(", "3", ")", ",", "dec", "[", "1", "]", ".", "zfill", "(", "2", ")", ",", "dec", "[", "2", "]", ".", "zfill", "(", "2", ")", ")", ")", "elif", "self", ".", "pointing_format", ".", "get", "(", ")", "==", "'CFHT PH'", ":", "# f.write(\"%f %f\\n\" % (pointing[\"camera\"].ra,pointing[\"camera\"].dec))", "f", ".", "write", "(", "\"%-20s|%11s|%11s|%6.1f|%-5d|\\n\"", "%", "(", "name", ",", "sra", ",", "sdec", ",", "2000.0", ",", "1", ")", ")", "elif", "self", ".", "pointing_format", ".", "get", "(", ")", "==", "'KPNO/CTIO'", ":", "str1", "=", "sra", ".", "replace", "(", "\":\"", ",", "\" \"", ")", "str2", "=", "sdec", ".", "replace", "(", "\":\"", ",", "\" \"", ")", "f", ".", "write", "(", "\"%16s %16s %16s 2000\\n\"", "%", "(", "name", ",", "str1", ",", "str2", ")", ")", "elif", "self", ".", "pointing_format", ".", "get", "(", ")", "==", "'SSim'", ":", "ra", "=", "[", "]", "dec", "=", "[", "]", "for", "ccd", "in", "pointing", "[", "\"camera\"", "]", ".", "geometry", ":", "ra", ".", "append", "(", "ccd", "[", "0", "]", ")", "ra", ".", "append", "(", "ccd", "[", "2", "]", ")", "dec", ".", "append", "(", "ccd", "[", "1", "]", ")", "dec", ".", "append", "(", "ccd", "[", "3", "]", ")", "dra", "=", "math", ".", "degrees", "(", "math", ".", "fabs", "(", "max", "(", "ra", ")", "-", "min", "(", "ra", ")", ")", ")", "ddec", "=", "math", ".", "degrees", "(", "math", ".", "fabs", "(", "max", "(", "dec", ")", "-", "min", "(", "dec", ")", ")", ")", "f", ".", "write", "(", "\"%f %f %16s %16s DATE 1.00 1.00 500 FILE\\n\"", "%", "(", "dra", ",", "ddec", ",", "sra", ",", "sdec", ")", ")", "if", "self", ".", "pointing_format", ".", "get", "(", ")", "==", "'CFHT PH'", ":", "f", ".", "write", "(", "\"\"\"]]</CSV></DATA>\n</TABLE>\n</ASTRO>\n\"\"\"", ")", "f", ".", "close", "(", ")" ]
Print the currently defined FOVs
[ "Print", "the", "currently", "defined", "FOVs" ]
python
train
sdispater/cleo
cleo/commands/command.py
https://github.com/sdispater/cleo/blob/cf44ac2eba2d6435516501e47e5521ee2da9115a/cleo/commands/command.py#L112-L122
def call_silent(self, name, args=None): # type: (str, Optional[str]) -> int """ Call another command. """ if args is None: args = "" args = StringArgs(args) command = self.application.get_command(name) return command.run(args, NullIO())
[ "def", "call_silent", "(", "self", ",", "name", ",", "args", "=", "None", ")", ":", "# type: (str, Optional[str]) -> int", "if", "args", "is", "None", ":", "args", "=", "\"\"", "args", "=", "StringArgs", "(", "args", ")", "command", "=", "self", ".", "application", ".", "get_command", "(", "name", ")", "return", "command", ".", "run", "(", "args", ",", "NullIO", "(", ")", ")" ]
Call another command.
[ "Call", "another", "command", "." ]
python
train
databio/pypiper
pypiper/utils.py
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/utils.py#L459-L480
def parse_cores(cores, pm, default): """ Framework to finalize number of cores for an operation. Some calls to a function may directly provide a desired number of cores, others may not. Similarly, some pipeline managers may define a cores count while others will not. This utility provides a single via which the count of cores to use for an operation may be determined. If a cores count is given explicitly, use that. Then try pipeline manager for cores. Finally, fall back to a default. Force default to be defined (this function is intended to be partially applied, then reused within a module, class, etc. to standardize the way in which this value is determined within a scope.) :param int | str cores: direct specification of cores count :param pypiper.PipelineManager pm: pipeline manager perhaps defining cores :param int | str default: default number of cores, used if a value isn't directly given and the pipeline manager doesn't define core count. :return int: number of cores """ cores = cores or getattr(pm, "cores", default) return int(cores)
[ "def", "parse_cores", "(", "cores", ",", "pm", ",", "default", ")", ":", "cores", "=", "cores", "or", "getattr", "(", "pm", ",", "\"cores\"", ",", "default", ")", "return", "int", "(", "cores", ")" ]
Framework to finalize number of cores for an operation. Some calls to a function may directly provide a desired number of cores, others may not. Similarly, some pipeline managers may define a cores count while others will not. This utility provides a single via which the count of cores to use for an operation may be determined. If a cores count is given explicitly, use that. Then try pipeline manager for cores. Finally, fall back to a default. Force default to be defined (this function is intended to be partially applied, then reused within a module, class, etc. to standardize the way in which this value is determined within a scope.) :param int | str cores: direct specification of cores count :param pypiper.PipelineManager pm: pipeline manager perhaps defining cores :param int | str default: default number of cores, used if a value isn't directly given and the pipeline manager doesn't define core count. :return int: number of cores
[ "Framework", "to", "finalize", "number", "of", "cores", "for", "an", "operation", "." ]
python
train
geomet/geomet
geomet/wkt.py
https://github.com/geomet/geomet/blob/b82d7118113ab723751eba3de5df98c368423c2b/geomet/wkt.py#L236-L250
def _dump_polygon(obj, decimals): """ Dump a GeoJSON-like Polygon object to WKT. Input parameters and return value are the POLYGON equivalent to :func:`_dump_point`. """ coords = obj['coordinates'] poly = 'POLYGON (%s)' rings = (', '.join(' '.join(_round_and_pad(c, decimals) for c in pt) for pt in ring) for ring in coords) rings = ('(%s)' % r for r in rings) poly %= ', '.join(rings) return poly
[ "def", "_dump_polygon", "(", "obj", ",", "decimals", ")", ":", "coords", "=", "obj", "[", "'coordinates'", "]", "poly", "=", "'POLYGON (%s)'", "rings", "=", "(", "', '", ".", "join", "(", "' '", ".", "join", "(", "_round_and_pad", "(", "c", ",", "decimals", ")", "for", "c", "in", "pt", ")", "for", "pt", "in", "ring", ")", "for", "ring", "in", "coords", ")", "rings", "=", "(", "'(%s)'", "%", "r", "for", "r", "in", "rings", ")", "poly", "%=", "', '", ".", "join", "(", "rings", ")", "return", "poly" ]
Dump a GeoJSON-like Polygon object to WKT. Input parameters and return value are the POLYGON equivalent to :func:`_dump_point`.
[ "Dump", "a", "GeoJSON", "-", "like", "Polygon", "object", "to", "WKT", "." ]
python
train
resync/resync
resync/client.py
https://github.com/resync/resync/blob/98292c17b2c00f2d6f5191c6ab51fef8c292a018/resync/client.py#L74-L84
def sitemap_uri(self, basename): """Get full URI (filepath) for sitemap based on basename.""" if (re.match(r"\w+:", basename)): # looks like URI return(basename) elif (re.match(r"/", basename)): # looks like full path return(basename) else: # build from mapping with name appended return(self.mapper.default_src_uri() + '/' + basename)
[ "def", "sitemap_uri", "(", "self", ",", "basename", ")", ":", "if", "(", "re", ".", "match", "(", "r\"\\w+:\"", ",", "basename", ")", ")", ":", "# looks like URI", "return", "(", "basename", ")", "elif", "(", "re", ".", "match", "(", "r\"/\"", ",", "basename", ")", ")", ":", "# looks like full path", "return", "(", "basename", ")", "else", ":", "# build from mapping with name appended", "return", "(", "self", ".", "mapper", ".", "default_src_uri", "(", ")", "+", "'/'", "+", "basename", ")" ]
Get full URI (filepath) for sitemap based on basename.
[ "Get", "full", "URI", "(", "filepath", ")", "for", "sitemap", "based", "on", "basename", "." ]
python
train
dhylands/rshell
rshell/main.py
https://github.com/dhylands/rshell/blob/a92a8fa8074ac792241c83c640a51b394667c324/rshell/main.py#L1489-L1496
def remote_eval_last(self, func, *args, **kwargs): """Calls func with the indicated args on the micropython board, and converts the response back into python by using eval. """ result = self.remote(func, *args, **kwargs).split(b'\r\n') messages = result[0:-2] messages = b'\n'.join(messages).decode('utf-8') return (eval(result[-2]), messages)
[ "def", "remote_eval_last", "(", "self", ",", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "result", "=", "self", ".", "remote", "(", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")", ".", "split", "(", "b'\\r\\n'", ")", "messages", "=", "result", "[", "0", ":", "-", "2", "]", "messages", "=", "b'\\n'", ".", "join", "(", "messages", ")", ".", "decode", "(", "'utf-8'", ")", "return", "(", "eval", "(", "result", "[", "-", "2", "]", ")", ",", "messages", ")" ]
Calls func with the indicated args on the micropython board, and converts the response back into python by using eval.
[ "Calls", "func", "with", "the", "indicated", "args", "on", "the", "micropython", "board", "and", "converts", "the", "response", "back", "into", "python", "by", "using", "eval", "." ]
python
train
graphql-python/graphql-core-next
graphql/execution/execute.py
https://github.com/graphql-python/graphql-core-next/blob/073dce3f002f897d40f9348ffd8f107815160540/graphql/execution/execute.py#L306-L327
def build_response( self, data: AwaitableOrValue[Optional[Dict[str, Any]]] ) -> AwaitableOrValue[ExecutionResult]: """Build response. Given a completed execution context and data, build the (data, errors) response defined by the "Response" section of the GraphQL spec. """ if isawaitable(data): async def build_response_async(): return self.build_response(await data) return build_response_async() data = cast(Optional[Dict[str, Any]], data) errors = self.errors if not errors: return ExecutionResult(data, None) # Sort the error list in order to make it deterministic, since we might have # been using parallel execution. errors.sort(key=lambda error: (error.locations, error.path, error.message)) return ExecutionResult(data, errors)
[ "def", "build_response", "(", "self", ",", "data", ":", "AwaitableOrValue", "[", "Optional", "[", "Dict", "[", "str", ",", "Any", "]", "]", "]", ")", "->", "AwaitableOrValue", "[", "ExecutionResult", "]", ":", "if", "isawaitable", "(", "data", ")", ":", "async", "def", "build_response_async", "(", ")", ":", "return", "self", ".", "build_response", "(", "await", "data", ")", "return", "build_response_async", "(", ")", "data", "=", "cast", "(", "Optional", "[", "Dict", "[", "str", ",", "Any", "]", "]", ",", "data", ")", "errors", "=", "self", ".", "errors", "if", "not", "errors", ":", "return", "ExecutionResult", "(", "data", ",", "None", ")", "# Sort the error list in order to make it deterministic, since we might have", "# been using parallel execution.", "errors", ".", "sort", "(", "key", "=", "lambda", "error", ":", "(", "error", ".", "locations", ",", "error", ".", "path", ",", "error", ".", "message", ")", ")", "return", "ExecutionResult", "(", "data", ",", "errors", ")" ]
Build response. Given a completed execution context and data, build the (data, errors) response defined by the "Response" section of the GraphQL spec.
[ "Build", "response", "." ]
python
train
raiden-network/raiden
raiden/transfer/node.py
https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/transfer/node.py#L1183-L1232
def is_transaction_invalidated(transaction, state_change): """ True if the `transaction` is made invalid by `state_change`. Some transactions will fail due to race conditions. The races are: - Another transaction which has the same side effect is executed before. - Another transaction which *invalidates* the state of the smart contract required by the local transaction is executed before it. The first case is handled by the predicate `is_transaction_effect_satisfied`, where a transaction from a different source which does the same thing is considered. This predicate handles the second scenario. A transaction can **only** invalidate another iff both share a valid initial state but a different end state. Valid example: A close can invalidate a deposit, because both a close and a deposit can be executed from an opened state (same initial state), but a close transaction will transition the channel to a closed state which doesn't allow for deposits (different end state). Invalid example: A settle transaction cannot invalidate a deposit because a settle is only allowed for the closed state and deposits are only allowed for the open state. In such a case a deposit should never have been sent. The deposit transaction for an invalid state is a bug and not a transaction which was invalidated. """ # Most transactions cannot be invalidated by others. These are: # # - close transactions # - settle transactions # - batch unlocks # # Deposits and withdraws are invalidated by the close, but these are not # made atomic through the WAL. is_our_failed_update_transfer = ( isinstance(state_change, ContractReceiveChannelSettled) and isinstance(transaction, ContractSendChannelUpdateTransfer) and state_change.token_network_identifier == transaction.token_network_identifier and state_change.channel_identifier == transaction.channel_identifier ) if is_our_failed_update_transfer: return True return False
[ "def", "is_transaction_invalidated", "(", "transaction", ",", "state_change", ")", ":", "# Most transactions cannot be invalidated by others. These are:", "#", "# - close transactions", "# - settle transactions", "# - batch unlocks", "#", "# Deposits and withdraws are invalidated by the close, but these are not", "# made atomic through the WAL.", "is_our_failed_update_transfer", "=", "(", "isinstance", "(", "state_change", ",", "ContractReceiveChannelSettled", ")", "and", "isinstance", "(", "transaction", ",", "ContractSendChannelUpdateTransfer", ")", "and", "state_change", ".", "token_network_identifier", "==", "transaction", ".", "token_network_identifier", "and", "state_change", ".", "channel_identifier", "==", "transaction", ".", "channel_identifier", ")", "if", "is_our_failed_update_transfer", ":", "return", "True", "return", "False" ]
True if the `transaction` is made invalid by `state_change`. Some transactions will fail due to race conditions. The races are: - Another transaction which has the same side effect is executed before. - Another transaction which *invalidates* the state of the smart contract required by the local transaction is executed before it. The first case is handled by the predicate `is_transaction_effect_satisfied`, where a transaction from a different source which does the same thing is considered. This predicate handles the second scenario. A transaction can **only** invalidate another iff both share a valid initial state but a different end state. Valid example: A close can invalidate a deposit, because both a close and a deposit can be executed from an opened state (same initial state), but a close transaction will transition the channel to a closed state which doesn't allow for deposits (different end state). Invalid example: A settle transaction cannot invalidate a deposit because a settle is only allowed for the closed state and deposits are only allowed for the open state. In such a case a deposit should never have been sent. The deposit transaction for an invalid state is a bug and not a transaction which was invalidated.
[ "True", "if", "the", "transaction", "is", "made", "invalid", "by", "state_change", "." ]
python
train
mitsei/dlkit
dlkit/json_/assessment/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment/sessions.py#L9180-L9196
def get_child_bank_ids(self, bank_id): """Gets the child ``Ids`` of the given bank. arg: bank_id (osid.id.Id): the ``Id`` to query return: (osid.id.IdList) - the children of the bank raise: NotFound - ``bank_id`` is not found raise: NullArgument - ``bank_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinHierarchySession.get_child_bin_ids if self._catalog_session is not None: return self._catalog_session.get_child_catalog_ids(catalog_id=bank_id) return self._hierarchy_session.get_children(id_=bank_id)
[ "def", "get_child_bank_ids", "(", "self", ",", "bank_id", ")", ":", "# Implemented from template for", "# osid.resource.BinHierarchySession.get_child_bin_ids", "if", "self", ".", "_catalog_session", "is", "not", "None", ":", "return", "self", ".", "_catalog_session", ".", "get_child_catalog_ids", "(", "catalog_id", "=", "bank_id", ")", "return", "self", ".", "_hierarchy_session", ".", "get_children", "(", "id_", "=", "bank_id", ")" ]
Gets the child ``Ids`` of the given bank. arg: bank_id (osid.id.Id): the ``Id`` to query return: (osid.id.IdList) - the children of the bank raise: NotFound - ``bank_id`` is not found raise: NullArgument - ``bank_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
[ "Gets", "the", "child", "Ids", "of", "the", "given", "bank", "." ]
python
train
datasift/datasift-python
datasift/historics.py
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/historics.py#L160-L176
def pause(self, historics_id, reason=""): """ Pause an existing Historics query. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicspause :param historics_id: id of the job to pause :type historics_id: str :param reason: optional reason for pausing it :type reason: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {"id": historics_id} if reason != "": params["reason"] = reason return self.request.post('pause', data=params)
[ "def", "pause", "(", "self", ",", "historics_id", ",", "reason", "=", "\"\"", ")", ":", "params", "=", "{", "\"id\"", ":", "historics_id", "}", "if", "reason", "!=", "\"\"", ":", "params", "[", "\"reason\"", "]", "=", "reason", "return", "self", ".", "request", ".", "post", "(", "'pause'", ",", "data", "=", "params", ")" ]
Pause an existing Historics query. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicspause :param historics_id: id of the job to pause :type historics_id: str :param reason: optional reason for pausing it :type reason: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
[ "Pause", "an", "existing", "Historics", "query", "." ]
python
train
rckclmbr/pyportify
pyportify/pkcs1/primitives.py
https://github.com/rckclmbr/pyportify/blob/696a1caad8a47b191f3bec44cc8fc3c437779512/pyportify/pkcs1/primitives.py#L121-L126
def constant_time_cmp(a, b): '''Compare two strings using constant time.''' result = True for x, y in zip(a, b): result &= (x == y) return result
[ "def", "constant_time_cmp", "(", "a", ",", "b", ")", ":", "result", "=", "True", "for", "x", ",", "y", "in", "zip", "(", "a", ",", "b", ")", ":", "result", "&=", "(", "x", "==", "y", ")", "return", "result" ]
Compare two strings using constant time.
[ "Compare", "two", "strings", "using", "constant", "time", "." ]
python
train
OzymandiasTheGreat/python-libinput
libinput/device.py
https://github.com/OzymandiasTheGreat/python-libinput/blob/1f477ee9f1d56b284b20e0317ea8967c64ef1218/libinput/device.py#L1800-L1811
def capabilities(self): """A tuple of capabilities this device supports. Returns: (~libinput.constant.DeviceCapability): Device capabilities. """ caps = [] for cap in DeviceCapability: if self._libinput.libinput_device_has_capability(self._handle, cap): caps.append(cap) return tuple(caps)
[ "def", "capabilities", "(", "self", ")", ":", "caps", "=", "[", "]", "for", "cap", "in", "DeviceCapability", ":", "if", "self", ".", "_libinput", ".", "libinput_device_has_capability", "(", "self", ".", "_handle", ",", "cap", ")", ":", "caps", ".", "append", "(", "cap", ")", "return", "tuple", "(", "caps", ")" ]
A tuple of capabilities this device supports. Returns: (~libinput.constant.DeviceCapability): Device capabilities.
[ "A", "tuple", "of", "capabilities", "this", "device", "supports", "." ]
python
train
ralphbean/taskw
taskw/task.py
https://github.com/ralphbean/taskw/blob/11e2f9132eaedd157f514538de9b5f3b69c30a52/taskw/task.py#L149-L191
def get_changes(self, serialized=False, keep=False): """ Get a journal of changes that have occurred :param `serialized`: Return changes in the serialized format used by TaskWarrior. :param `keep_changes`: By default, the list of changes is reset after running ``.get_changes``; set this to `True` if you would like to keep the changes recorded following running this command. :returns: A dictionary of 2-tuples of changes, where the key is the name of the field that has changed, and the value is a 2-tuple containing the original value and the final value respectively. """ results = {} # Check for explicitly-registered changes for k, f, t in self._changes: if k not in results: results[k] = [f, None] results[k][1] = ( self._serialize(k, t, self._fields) if serialized else t ) # Check for changes on subordinate items for k, v in six.iteritems(self): if isinstance(v, Dirtyable): result = v.get_changes(keep=keep) if result: if not k in results: results[k] = [result[0], None] results[k][1] = ( self._serialize(k, result[1], self._fields) if serialized else result[1] ) # Clear out recorded changes if not keep: self._changes = [] return results
[ "def", "get_changes", "(", "self", ",", "serialized", "=", "False", ",", "keep", "=", "False", ")", ":", "results", "=", "{", "}", "# Check for explicitly-registered changes", "for", "k", ",", "f", ",", "t", "in", "self", ".", "_changes", ":", "if", "k", "not", "in", "results", ":", "results", "[", "k", "]", "=", "[", "f", ",", "None", "]", "results", "[", "k", "]", "[", "1", "]", "=", "(", "self", ".", "_serialize", "(", "k", ",", "t", ",", "self", ".", "_fields", ")", "if", "serialized", "else", "t", ")", "# Check for changes on subordinate items", "for", "k", ",", "v", "in", "six", ".", "iteritems", "(", "self", ")", ":", "if", "isinstance", "(", "v", ",", "Dirtyable", ")", ":", "result", "=", "v", ".", "get_changes", "(", "keep", "=", "keep", ")", "if", "result", ":", "if", "not", "k", "in", "results", ":", "results", "[", "k", "]", "=", "[", "result", "[", "0", "]", ",", "None", "]", "results", "[", "k", "]", "[", "1", "]", "=", "(", "self", ".", "_serialize", "(", "k", ",", "result", "[", "1", "]", ",", "self", ".", "_fields", ")", "if", "serialized", "else", "result", "[", "1", "]", ")", "# Clear out recorded changes", "if", "not", "keep", ":", "self", ".", "_changes", "=", "[", "]", "return", "results" ]
Get a journal of changes that have occurred :param `serialized`: Return changes in the serialized format used by TaskWarrior. :param `keep_changes`: By default, the list of changes is reset after running ``.get_changes``; set this to `True` if you would like to keep the changes recorded following running this command. :returns: A dictionary of 2-tuples of changes, where the key is the name of the field that has changed, and the value is a 2-tuple containing the original value and the final value respectively.
[ "Get", "a", "journal", "of", "changes", "that", "have", "occurred" ]
python
train
datastore/datastore
datastore/core/basic.py
https://github.com/datastore/datastore/blob/7ccf0cd4748001d3dbf5e6dda369b0f63e0269d3/datastore/core/basic.py#L932-L954
def delete(self, key): '''Removes the object named by `key`. DirectoryTreeDatastore removes the directory entry. ''' super(DirectoryTreeDatastore, self).delete(key) str_key = str(key) # ignore root if str_key == '/': return # retrieve directory, to remove entry dir_key = key.parent.instance('directory') directory = self.directory(dir_key) # ensure key is not in directory if directory and str_key in directory: directory.remove(str_key) if len(directory) > 0: super(DirectoryTreeDatastore, self).put(dir_key, directory) else: super(DirectoryTreeDatastore, self).delete(dir_key)
[ "def", "delete", "(", "self", ",", "key", ")", ":", "super", "(", "DirectoryTreeDatastore", ",", "self", ")", ".", "delete", "(", "key", ")", "str_key", "=", "str", "(", "key", ")", "# ignore root", "if", "str_key", "==", "'/'", ":", "return", "# retrieve directory, to remove entry", "dir_key", "=", "key", ".", "parent", ".", "instance", "(", "'directory'", ")", "directory", "=", "self", ".", "directory", "(", "dir_key", ")", "# ensure key is not in directory", "if", "directory", "and", "str_key", "in", "directory", ":", "directory", ".", "remove", "(", "str_key", ")", "if", "len", "(", "directory", ")", ">", "0", ":", "super", "(", "DirectoryTreeDatastore", ",", "self", ")", ".", "put", "(", "dir_key", ",", "directory", ")", "else", ":", "super", "(", "DirectoryTreeDatastore", ",", "self", ")", ".", "delete", "(", "dir_key", ")" ]
Removes the object named by `key`. DirectoryTreeDatastore removes the directory entry.
[ "Removes", "the", "object", "named", "by", "key", ".", "DirectoryTreeDatastore", "removes", "the", "directory", "entry", "." ]
python
train
KelSolaar/Umbra
umbra/managers/layouts_manager.py
https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/managers/layouts_manager.py#L476-L489
def restore_startup_layout(self): """ Restores the startup layout. :return: Method success. :rtype: bool """ LOGGER.debug("> Restoring startup layout.") if self.restore_layout(UiConstants.startup_layout): not self.__restore_geometry_on_layout_change and self.__container.restoreGeometry( self.__settings.get_key("Layouts", "{0}_geometry".format(UiConstants.startup_layout)).toByteArray()) return True
[ "def", "restore_startup_layout", "(", "self", ")", ":", "LOGGER", ".", "debug", "(", "\"> Restoring startup layout.\"", ")", "if", "self", ".", "restore_layout", "(", "UiConstants", ".", "startup_layout", ")", ":", "not", "self", ".", "__restore_geometry_on_layout_change", "and", "self", ".", "__container", ".", "restoreGeometry", "(", "self", ".", "__settings", ".", "get_key", "(", "\"Layouts\"", ",", "\"{0}_geometry\"", ".", "format", "(", "UiConstants", ".", "startup_layout", ")", ")", ".", "toByteArray", "(", ")", ")", "return", "True" ]
Restores the startup layout. :return: Method success. :rtype: bool
[ "Restores", "the", "startup", "layout", "." ]
python
train
pyroscope/pyrocore
src/pyrocore/torrent/queue.py
https://github.com/pyroscope/pyrocore/blob/89ad01346a570943d20311a0b488440975876612/src/pyrocore/torrent/queue.py#L63-L132
def _start(self, items): """ Start some items if conditions are met. """ # TODO: Filter by a custom date field, for scheduled downloads starting at a certain time, or after a given delay # TODO: Don't start anything more if download BW is used >= config threshold in % # Check if anything more is ready to start downloading startable = [i for i in items if self.config.startable.match(i)] if not startable: self.LOG.debug("Checked %d item(s), none startable according to [ %s ]", len(items), self.config.startable) return # Check intermission delay now = time.time() if now < self.last_start: # compensate for summer time and other oddities self.last_start = now delayed = int(self.last_start + self.config.intermission - now) if delayed > 0: self.LOG.debug("Delaying start of {:d} item(s)," " due to {:d}s intermission with {:d}s left" .format(len(startable), self.config.intermission, delayed)) return # TODO: sort by priority, then loaded time # Stick to "start_at_once" parameter, unless "downloading_min" is violated downloading = [i for i in items if self.config.downloading.match(i)] start_now = max(self.config.start_at_once, self.config.downloading_min - len(downloading)) start_now = min(start_now, len(startable)) #down_traffic = sum(i.down for i in downloading) ##self.LOG.info("%d downloading, down %d" % (len(downloading), down_traffic)) # Start eligible items for idx, item in enumerate(startable): # Check if we reached 'start_now' in this run if idx >= start_now: self.LOG.debug("Only starting %d item(s) in this run, %d more could be downloading" % ( start_now, len(startable)-idx,)) break # TODO: Prevent start of more torrents that can fit on the drive (taking "off" files into account) # (restarts items that were stopped due to the "low_diskspace" schedule, and also avoids triggering it at all) # Only check the other conditions when we have `downloading_min` covered if len(downloading) < self.config.downloading_min: self.LOG.debug("Catching up from %d to a minimum of %d downloading item(s)" % ( len(downloading), self.config.downloading_min)) else: # Limit to the given maximum of downloading items if len(downloading) >= self.config.downloading_max: self.LOG.debug("Already downloading %d item(s) out of %d max, %d more could be downloading" % ( len(downloading), self.config.downloading_max, len(startable)-idx,)) break # If we made it here, start it! self.last_start = now downloading.append(item) self.LOG.info("%s '%s' [%s, #%s]" % ( "WOULD start" if self.config.dry_run else "Starting", fmt.to_utf8(item.name), item.alias, item.hash)) if not self.config.dry_run: item.start() if not self.config.quiet: self.proxy.log(xmlrpc.NOHASH, "%s: Started '%s' {%s}" % ( self.__class__.__name__, fmt.to_utf8(item.name), item.alias, ))
[ "def", "_start", "(", "self", ",", "items", ")", ":", "# TODO: Filter by a custom date field, for scheduled downloads starting at a certain time, or after a given delay", "# TODO: Don't start anything more if download BW is used >= config threshold in %", "# Check if anything more is ready to start downloading", "startable", "=", "[", "i", "for", "i", "in", "items", "if", "self", ".", "config", ".", "startable", ".", "match", "(", "i", ")", "]", "if", "not", "startable", ":", "self", ".", "LOG", ".", "debug", "(", "\"Checked %d item(s), none startable according to [ %s ]\"", ",", "len", "(", "items", ")", ",", "self", ".", "config", ".", "startable", ")", "return", "# Check intermission delay", "now", "=", "time", ".", "time", "(", ")", "if", "now", "<", "self", ".", "last_start", ":", "# compensate for summer time and other oddities", "self", ".", "last_start", "=", "now", "delayed", "=", "int", "(", "self", ".", "last_start", "+", "self", ".", "config", ".", "intermission", "-", "now", ")", "if", "delayed", ">", "0", ":", "self", ".", "LOG", ".", "debug", "(", "\"Delaying start of {:d} item(s),\"", "\" due to {:d}s intermission with {:d}s left\"", ".", "format", "(", "len", "(", "startable", ")", ",", "self", ".", "config", ".", "intermission", ",", "delayed", ")", ")", "return", "# TODO: sort by priority, then loaded time", "# Stick to \"start_at_once\" parameter, unless \"downloading_min\" is violated", "downloading", "=", "[", "i", "for", "i", "in", "items", "if", "self", ".", "config", ".", "downloading", ".", "match", "(", "i", ")", "]", "start_now", "=", "max", "(", "self", ".", "config", ".", "start_at_once", ",", "self", ".", "config", ".", "downloading_min", "-", "len", "(", "downloading", ")", ")", "start_now", "=", "min", "(", "start_now", ",", "len", "(", "startable", ")", ")", "#down_traffic = sum(i.down for i in downloading)", "##self.LOG.info(\"%d downloading, down %d\" % (len(downloading), down_traffic))", "# Start eligible items", "for", "idx", ",", "item", "in", "enumerate", "(", "startable", ")", ":", "# Check if we reached 'start_now' in this run", "if", "idx", ">=", "start_now", ":", "self", ".", "LOG", ".", "debug", "(", "\"Only starting %d item(s) in this run, %d more could be downloading\"", "%", "(", "start_now", ",", "len", "(", "startable", ")", "-", "idx", ",", ")", ")", "break", "# TODO: Prevent start of more torrents that can fit on the drive (taking \"off\" files into account)", "# (restarts items that were stopped due to the \"low_diskspace\" schedule, and also avoids triggering it at all)", "# Only check the other conditions when we have `downloading_min` covered", "if", "len", "(", "downloading", ")", "<", "self", ".", "config", ".", "downloading_min", ":", "self", ".", "LOG", ".", "debug", "(", "\"Catching up from %d to a minimum of %d downloading item(s)\"", "%", "(", "len", "(", "downloading", ")", ",", "self", ".", "config", ".", "downloading_min", ")", ")", "else", ":", "# Limit to the given maximum of downloading items", "if", "len", "(", "downloading", ")", ">=", "self", ".", "config", ".", "downloading_max", ":", "self", ".", "LOG", ".", "debug", "(", "\"Already downloading %d item(s) out of %d max, %d more could be downloading\"", "%", "(", "len", "(", "downloading", ")", ",", "self", ".", "config", ".", "downloading_max", ",", "len", "(", "startable", ")", "-", "idx", ",", ")", ")", "break", "# If we made it here, start it!", "self", ".", "last_start", "=", "now", "downloading", ".", "append", "(", "item", ")", "self", ".", "LOG", ".", "info", "(", "\"%s '%s' [%s, #%s]\"", "%", "(", "\"WOULD start\"", "if", "self", ".", "config", ".", "dry_run", "else", "\"Starting\"", ",", "fmt", ".", "to_utf8", "(", "item", ".", "name", ")", ",", "item", ".", "alias", ",", "item", ".", "hash", ")", ")", "if", "not", "self", ".", "config", ".", "dry_run", ":", "item", ".", "start", "(", ")", "if", "not", "self", ".", "config", ".", "quiet", ":", "self", ".", "proxy", ".", "log", "(", "xmlrpc", ".", "NOHASH", ",", "\"%s: Started '%s' {%s}\"", "%", "(", "self", ".", "__class__", ".", "__name__", ",", "fmt", ".", "to_utf8", "(", "item", ".", "name", ")", ",", "item", ".", "alias", ",", ")", ")" ]
Start some items if conditions are met.
[ "Start", "some", "items", "if", "conditions", "are", "met", "." ]
python
train
iotile/coretools
transport_plugins/bled112/iotile_transport_bled112/bled112_cmd.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/transport_plugins/bled112/iotile_transport_bled112/bled112_cmd.py#L685-L704
def _send_command(self, cmd_class, command, payload, timeout=3.0): """ Send a BGAPI packet to the dongle and return the response """ if len(payload) > 60: return ValueError("Attempting to send a BGAPI packet with length > 60 is not allowed", actual_length=len(payload), command=command, command_class=cmd_class) header = bytearray(4) header[0] = 0 header[1] = len(payload) header[2] = cmd_class header[3] = command packet = header + bytearray(payload) self._stream.write(bytes(packet)) #Every command has a response so wait for the response here response = self._receive_packet(timeout) return response
[ "def", "_send_command", "(", "self", ",", "cmd_class", ",", "command", ",", "payload", ",", "timeout", "=", "3.0", ")", ":", "if", "len", "(", "payload", ")", ">", "60", ":", "return", "ValueError", "(", "\"Attempting to send a BGAPI packet with length > 60 is not allowed\"", ",", "actual_length", "=", "len", "(", "payload", ")", ",", "command", "=", "command", ",", "command_class", "=", "cmd_class", ")", "header", "=", "bytearray", "(", "4", ")", "header", "[", "0", "]", "=", "0", "header", "[", "1", "]", "=", "len", "(", "payload", ")", "header", "[", "2", "]", "=", "cmd_class", "header", "[", "3", "]", "=", "command", "packet", "=", "header", "+", "bytearray", "(", "payload", ")", "self", ".", "_stream", ".", "write", "(", "bytes", "(", "packet", ")", ")", "#Every command has a response so wait for the response here", "response", "=", "self", ".", "_receive_packet", "(", "timeout", ")", "return", "response" ]
Send a BGAPI packet to the dongle and return the response
[ "Send", "a", "BGAPI", "packet", "to", "the", "dongle", "and", "return", "the", "response" ]
python
train