repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
openstates/billy
billy/importers/utils.py
https://github.com/openstates/billy/blob/5fc795347f12a949e410a8cfad0c911ea6bced67/billy/importers/utils.py#L199-L212
def split_name(obj): """ If the supplied legislator/person object is missing 'first_name' or 'last_name' then use name_tools to split. """ if obj['_type'] in ('person', 'legislator'): for key in ('first_name', 'last_name'): if key not in obj or not obj[key]: # Need to split (obj['first_name'], obj['last_name'], obj['suffixes']) = name_tools.split(obj['full_name'])[1:] break return obj
[ "def", "split_name", "(", "obj", ")", ":", "if", "obj", "[", "'_type'", "]", "in", "(", "'person'", ",", "'legislator'", ")", ":", "for", "key", "in", "(", "'first_name'", ",", "'last_name'", ")", ":", "if", "key", "not", "in", "obj", "or", "not", "obj", "[", "key", "]", ":", "# Need to split", "(", "obj", "[", "'first_name'", "]", ",", "obj", "[", "'last_name'", "]", ",", "obj", "[", "'suffixes'", "]", ")", "=", "name_tools", ".", "split", "(", "obj", "[", "'full_name'", "]", ")", "[", "1", ":", "]", "break", "return", "obj" ]
If the supplied legislator/person object is missing 'first_name' or 'last_name' then use name_tools to split.
[ "If", "the", "supplied", "legislator", "/", "person", "object", "is", "missing", "first_name", "or", "last_name", "then", "use", "name_tools", "to", "split", "." ]
python
train
graphql-python/graphql-core
graphql/execution/executor.py
https://github.com/graphql-python/graphql-core/blob/d8e9d3abe7c209eb2f51cf001402783bfd480596/graphql/execution/executor.py#L630-L676
def complete_abstract_value( exe_context, # type: ExecutionContext return_type, # type: Union[GraphQLInterfaceType, GraphQLUnionType] field_asts, # type: List[Field] info, # type: ResolveInfo path, # type: List[Union[int, str]] result, # type: Any ): # type: (...) -> Dict[str, Any] """ Complete an value of an abstract type by determining the runtime type of that value, then completing based on that type. """ runtime_type = None # type: Union[str, GraphQLObjectType, None] # Field type must be Object, Interface or Union and expect sub-selections. if isinstance(return_type, (GraphQLInterfaceType, GraphQLUnionType)): if return_type.resolve_type: runtime_type = return_type.resolve_type(result, info) else: runtime_type = get_default_resolve_type_fn(result, info, return_type) if isinstance(runtime_type, string_types): runtime_type = info.schema.get_type(runtime_type) # type: ignore if not isinstance(runtime_type, GraphQLObjectType): raise GraphQLError( ( "Abstract type {} must resolve to an Object type at runtime " + 'for field {}.{} with value "{}", received "{}".' ).format( return_type, info.parent_type, info.field_name, result, runtime_type ), field_asts, ) if not exe_context.schema.is_possible_type(return_type, runtime_type): raise GraphQLError( u'Runtime Object type "{}" is not a possible type for "{}".'.format( runtime_type, return_type ), field_asts, ) return complete_object_value( exe_context, runtime_type, field_asts, info, path, result )
[ "def", "complete_abstract_value", "(", "exe_context", ",", "# type: ExecutionContext", "return_type", ",", "# type: Union[GraphQLInterfaceType, GraphQLUnionType]", "field_asts", ",", "# type: List[Field]", "info", ",", "# type: ResolveInfo", "path", ",", "# type: List[Union[int, str]]", "result", ",", "# type: Any", ")", ":", "# type: (...) -> Dict[str, Any]", "runtime_type", "=", "None", "# type: Union[str, GraphQLObjectType, None]", "# Field type must be Object, Interface or Union and expect sub-selections.", "if", "isinstance", "(", "return_type", ",", "(", "GraphQLInterfaceType", ",", "GraphQLUnionType", ")", ")", ":", "if", "return_type", ".", "resolve_type", ":", "runtime_type", "=", "return_type", ".", "resolve_type", "(", "result", ",", "info", ")", "else", ":", "runtime_type", "=", "get_default_resolve_type_fn", "(", "result", ",", "info", ",", "return_type", ")", "if", "isinstance", "(", "runtime_type", ",", "string_types", ")", ":", "runtime_type", "=", "info", ".", "schema", ".", "get_type", "(", "runtime_type", ")", "# type: ignore", "if", "not", "isinstance", "(", "runtime_type", ",", "GraphQLObjectType", ")", ":", "raise", "GraphQLError", "(", "(", "\"Abstract type {} must resolve to an Object type at runtime \"", "+", "'for field {}.{} with value \"{}\", received \"{}\".'", ")", ".", "format", "(", "return_type", ",", "info", ".", "parent_type", ",", "info", ".", "field_name", ",", "result", ",", "runtime_type", ")", ",", "field_asts", ",", ")", "if", "not", "exe_context", ".", "schema", ".", "is_possible_type", "(", "return_type", ",", "runtime_type", ")", ":", "raise", "GraphQLError", "(", "u'Runtime Object type \"{}\" is not a possible type for \"{}\".'", ".", "format", "(", "runtime_type", ",", "return_type", ")", ",", "field_asts", ",", ")", "return", "complete_object_value", "(", "exe_context", ",", "runtime_type", ",", "field_asts", ",", "info", ",", "path", ",", "result", ")" ]
Complete an value of an abstract type by determining the runtime type of that value, then completing based on that type.
[ "Complete", "an", "value", "of", "an", "abstract", "type", "by", "determining", "the", "runtime", "type", "of", "that", "value", "then", "completing", "based", "on", "that", "type", "." ]
python
train
mozilla-iot/webthing-python
webthing/thing.py
https://github.com/mozilla-iot/webthing-python/blob/65d467c89ed79d0bbc42b8b3c8f9e5a320edd237/webthing/thing.py#L442-L457
def action_notify(self, action): """ Notify all subscribers of an action status change. action -- the action whose status changed """ message = json.dumps({ 'messageType': 'actionStatus', 'data': action.as_action_description(), }) for subscriber in list(self.subscribers): try: subscriber.write_message(message) except tornado.websocket.WebSocketClosedError: pass
[ "def", "action_notify", "(", "self", ",", "action", ")", ":", "message", "=", "json", ".", "dumps", "(", "{", "'messageType'", ":", "'actionStatus'", ",", "'data'", ":", "action", ".", "as_action_description", "(", ")", ",", "}", ")", "for", "subscriber", "in", "list", "(", "self", ".", "subscribers", ")", ":", "try", ":", "subscriber", ".", "write_message", "(", "message", ")", "except", "tornado", ".", "websocket", ".", "WebSocketClosedError", ":", "pass" ]
Notify all subscribers of an action status change. action -- the action whose status changed
[ "Notify", "all", "subscribers", "of", "an", "action", "status", "change", "." ]
python
test
jlmadurga/permabots
permabots/views/api/handler.py
https://github.com/jlmadurga/permabots/blob/781a91702529a23fe7bc2aa84c5d88e961412466/permabots/views/api/handler.py#L159-L168
def get(self, request, bot_id, id, format=None): """ Get list of header parameters of a handler --- serializer: AbsParamSerializer responseMessages: - code: 401 message: Not authenticated """ return super(HeaderParameterList, self).get(request, bot_id, id, format)
[ "def", "get", "(", "self", ",", "request", ",", "bot_id", ",", "id", ",", "format", "=", "None", ")", ":", "return", "super", "(", "HeaderParameterList", ",", "self", ")", ".", "get", "(", "request", ",", "bot_id", ",", "id", ",", "format", ")" ]
Get list of header parameters of a handler --- serializer: AbsParamSerializer responseMessages: - code: 401 message: Not authenticated
[ "Get", "list", "of", "header", "parameters", "of", "a", "handler", "---", "serializer", ":", "AbsParamSerializer", "responseMessages", ":", "-", "code", ":", "401", "message", ":", "Not", "authenticated" ]
python
train
nickoala/telepot
telepot/__init__.py
https://github.com/nickoala/telepot/blob/3792fde251d0f1d5a6ca16c8ad1a71f89360c41d/telepot/__init__.py#L961-L967
def addStickerToSet(self, user_id, name, png_sticker, emojis, mask_position=None): """ See: https://core.telegram.org/bots/api#addstickertoset """ p = _strip(locals(), more=['png_sticker']) return self._api_request_with_file('addStickerToSet', _rectify(p), 'png_sticker', png_sticker)
[ "def", "addStickerToSet", "(", "self", ",", "user_id", ",", "name", ",", "png_sticker", ",", "emojis", ",", "mask_position", "=", "None", ")", ":", "p", "=", "_strip", "(", "locals", "(", ")", ",", "more", "=", "[", "'png_sticker'", "]", ")", "return", "self", ".", "_api_request_with_file", "(", "'addStickerToSet'", ",", "_rectify", "(", "p", ")", ",", "'png_sticker'", ",", "png_sticker", ")" ]
See: https://core.telegram.org/bots/api#addstickertoset
[ "See", ":", "https", ":", "//", "core", ".", "telegram", ".", "org", "/", "bots", "/", "api#addstickertoset" ]
python
train
google/prettytensor
prettytensor/functions.py
https://github.com/google/prettytensor/blob/75daa0b11252590f548da5647addc0ea610c4c45/prettytensor/functions.py#L129-L146
def cos_distance(t1, t2, epsilon=1e-12, name=None): """Cos distance between t1 and t2 and caps the gradient of the Square Root. Args: t1: A tensor t2: A tensor that can be multiplied by t1. epsilon: A lower bound value for the distance. The square root is used as the normalizer. name: Optional name for this op. Returns: The cos distance between t1 and t2. """ with tf.name_scope(name, 'cos_distance', [t1, t2]) as scope: t1 = tf.convert_to_tensor(t1, name='t1') t2 = tf.convert_to_tensor(t2, name='t2') x_inv_norm = tf.rsqrt(tf.maximum(length_squared(t1) * length_squared(t2), epsilon)) return tf.subtract(1.0, dot_product(t1, t2) * x_inv_norm, name=scope)
[ "def", "cos_distance", "(", "t1", ",", "t2", ",", "epsilon", "=", "1e-12", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "name_scope", "(", "name", ",", "'cos_distance'", ",", "[", "t1", ",", "t2", "]", ")", "as", "scope", ":", "t1", "=", "tf", ".", "convert_to_tensor", "(", "t1", ",", "name", "=", "'t1'", ")", "t2", "=", "tf", ".", "convert_to_tensor", "(", "t2", ",", "name", "=", "'t2'", ")", "x_inv_norm", "=", "tf", ".", "rsqrt", "(", "tf", ".", "maximum", "(", "length_squared", "(", "t1", ")", "*", "length_squared", "(", "t2", ")", ",", "epsilon", ")", ")", "return", "tf", ".", "subtract", "(", "1.0", ",", "dot_product", "(", "t1", ",", "t2", ")", "*", "x_inv_norm", ",", "name", "=", "scope", ")" ]
Cos distance between t1 and t2 and caps the gradient of the Square Root. Args: t1: A tensor t2: A tensor that can be multiplied by t1. epsilon: A lower bound value for the distance. The square root is used as the normalizer. name: Optional name for this op. Returns: The cos distance between t1 and t2.
[ "Cos", "distance", "between", "t1", "and", "t2", "and", "caps", "the", "gradient", "of", "the", "Square", "Root", "." ]
python
train
cni/MRS
MRS/qc.py
https://github.com/cni/MRS/blob/16098b3cf4830780efd787fee9efa46513850283/MRS/qc.py#L11-L88
def motioncheck(ref_file, end_file, out_path=None, thres=5.0): """ Checks motion between structural scans of the same modality. Ideally obtained at the beginning and end of a scanning session. Parameters ---------- ref_file: nifti file Nifti file of first localizer acquired at the beginning of the session end_file: nifti nifti file of the localizer acquired at the end of the session thres: float threshold in mm of maximum allowed motion. Default 5mm Returns ------- rms : float root mean square of xyz translation passed: boolean indicates if motion passed threshold: 1 if passed, 0 if failed. """ ref = nib.load(ref_file) end = nib.load(end_file) ref_data = ref.get_data() end_data = end.get_data() # Check if same affine space. modality must be the same to use realign, # and prescription must be the same to deduce motion ref_aff=ref.get_affine() end_aff=end.get_affine() if np.array_equal(ref_aff, end_aff): print('affines match') else: raise ValueError("Affines of start and end images do not match") # save only axials refax = ref_data[:, :, :, 0, np.newaxis] endax = end_data[:, :, :, 0, np.newaxis] if out_path is None: path = os.path.dirname(ref_file) refax_img = nib.Nifti1Image(refax, ref_aff) nib.save(refax_img, op.join(out_path, 'refax.nii.gz')) endax_img = nib.Nifti1Image(endax, ref_aff) nib.save(endax_img, op.join(out_path, 'endax.nii.gz')) # realignment ref_file = op.join(out_path, 'refax.nii.gz') in_file = op.join(out_path, 'endax.nii.gz') mat_file = op.join(out_path, 'mat.nii.gz') mcflt = fsl.MCFLIRT(in_file=in_file, ref_file=ref_file, save_mats=True, cost='mutualinfo') res = mcflt.run() print('realignment affine matrix saved in mat_file: %s' %res.outputs.mat_file) aff_file=res.outputs.mat_file aff = np.loadtxt(aff_file, dtype=float) # compute RMS as indicator of motion rel=aff[0:3, 3] rms = np.sqrt(np.mean(rel**2)) if rms>=thres: passed=False else: passed=True return rms, passed
[ "def", "motioncheck", "(", "ref_file", ",", "end_file", ",", "out_path", "=", "None", ",", "thres", "=", "5.0", ")", ":", "ref", "=", "nib", ".", "load", "(", "ref_file", ")", "end", "=", "nib", ".", "load", "(", "end_file", ")", "ref_data", "=", "ref", ".", "get_data", "(", ")", "end_data", "=", "end", ".", "get_data", "(", ")", "# Check if same affine space. modality must be the same to use realign, ", "# and prescription must be the same to deduce motion", "ref_aff", "=", "ref", ".", "get_affine", "(", ")", "end_aff", "=", "end", ".", "get_affine", "(", ")", "if", "np", ".", "array_equal", "(", "ref_aff", ",", "end_aff", ")", ":", "print", "(", "'affines match'", ")", "else", ":", "raise", "ValueError", "(", "\"Affines of start and end images do not match\"", ")", "# save only axials", "refax", "=", "ref_data", "[", ":", ",", ":", ",", ":", ",", "0", ",", "np", ".", "newaxis", "]", "endax", "=", "end_data", "[", ":", ",", ":", ",", ":", ",", "0", ",", "np", ".", "newaxis", "]", "if", "out_path", "is", "None", ":", "path", "=", "os", ".", "path", ".", "dirname", "(", "ref_file", ")", "refax_img", "=", "nib", ".", "Nifti1Image", "(", "refax", ",", "ref_aff", ")", "nib", ".", "save", "(", "refax_img", ",", "op", ".", "join", "(", "out_path", ",", "'refax.nii.gz'", ")", ")", "endax_img", "=", "nib", ".", "Nifti1Image", "(", "endax", ",", "ref_aff", ")", "nib", ".", "save", "(", "endax_img", ",", "op", ".", "join", "(", "out_path", ",", "'endax.nii.gz'", ")", ")", "# realignment", "ref_file", "=", "op", ".", "join", "(", "out_path", ",", "'refax.nii.gz'", ")", "in_file", "=", "op", ".", "join", "(", "out_path", ",", "'endax.nii.gz'", ")", "mat_file", "=", "op", ".", "join", "(", "out_path", ",", "'mat.nii.gz'", ")", "mcflt", "=", "fsl", ".", "MCFLIRT", "(", "in_file", "=", "in_file", ",", "ref_file", "=", "ref_file", ",", "save_mats", "=", "True", ",", "cost", "=", "'mutualinfo'", ")", "res", "=", "mcflt", ".", "run", "(", ")", "print", "(", "'realignment affine matrix saved in mat_file: %s'", "%", "res", ".", "outputs", ".", "mat_file", ")", "aff_file", "=", "res", ".", "outputs", ".", "mat_file", "aff", "=", "np", ".", "loadtxt", "(", "aff_file", ",", "dtype", "=", "float", ")", "# compute RMS as indicator of motion", "rel", "=", "aff", "[", "0", ":", "3", ",", "3", "]", "rms", "=", "np", ".", "sqrt", "(", "np", ".", "mean", "(", "rel", "**", "2", ")", ")", "if", "rms", ">=", "thres", ":", "passed", "=", "False", "else", ":", "passed", "=", "True", "return", "rms", ",", "passed" ]
Checks motion between structural scans of the same modality. Ideally obtained at the beginning and end of a scanning session. Parameters ---------- ref_file: nifti file Nifti file of first localizer acquired at the beginning of the session end_file: nifti nifti file of the localizer acquired at the end of the session thres: float threshold in mm of maximum allowed motion. Default 5mm Returns ------- rms : float root mean square of xyz translation passed: boolean indicates if motion passed threshold: 1 if passed, 0 if failed.
[ "Checks", "motion", "between", "structural", "scans", "of", "the", "same", "modality", ".", "Ideally", "obtained", "at", "the", "beginning", "and", "end", "of", "a", "scanning", "session", "." ]
python
train
Riffstation/flask-philo
flask_philo/views.py
https://github.com/Riffstation/flask-philo/blob/76c9d562edb4a77010c8da6dfdb6489fa29cbc9e/flask_philo/views.py#L49-L59
def template_response(self, template_name, headers={}, **values): """ Constructs a response, allowing custom template name and content_type """ response = make_response( self.render_template(template_name, **values)) for field, value in headers.items(): response.headers.set(field, value) return response
[ "def", "template_response", "(", "self", ",", "template_name", ",", "headers", "=", "{", "}", ",", "*", "*", "values", ")", ":", "response", "=", "make_response", "(", "self", ".", "render_template", "(", "template_name", ",", "*", "*", "values", ")", ")", "for", "field", ",", "value", "in", "headers", ".", "items", "(", ")", ":", "response", ".", "headers", ".", "set", "(", "field", ",", "value", ")", "return", "response" ]
Constructs a response, allowing custom template name and content_type
[ "Constructs", "a", "response", "allowing", "custom", "template", "name", "and", "content_type" ]
python
train
apache/incubator-heron
heron/tools/cli/src/python/submit.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/tools/cli/src/python/submit.py#L253-L291
def submit_fatjar(cl_args, unknown_args, tmp_dir): ''' We use the packer to make a package for the jar and dump it to a well-known location. We then run the main method of class with the specified arguments. We pass arguments as an environment variable HERON_OPTIONS. This will run the jar file with the topology_class_name. The submitter inside will write out the topology defn file to a location that we specify. Then we write the topology defn file to a well known location. We then write to appropriate places in zookeeper and launch the scheduler jobs :param cl_args: :param unknown_args: :param tmp_dir: :return: ''' # execute main of the topology to create the topology definition topology_file = cl_args['topology-file-name'] main_class = cl_args['topology-class-name'] res = execute.heron_class( class_name=main_class, lib_jars=config.get_heron_libs(jars.topology_jars()), extra_jars=[topology_file], args=tuple(unknown_args), java_defines=cl_args['topology_main_jvm_property']) result.render(res) if not result.is_successful(res): err_context = ("Failed to create topology definition " \ "file when executing class '%s' of file '%s'") % (main_class, topology_file) res.add_context(err_context) return res results = launch_topologies(cl_args, topology_file, tmp_dir) return results
[ "def", "submit_fatjar", "(", "cl_args", ",", "unknown_args", ",", "tmp_dir", ")", ":", "# execute main of the topology to create the topology definition", "topology_file", "=", "cl_args", "[", "'topology-file-name'", "]", "main_class", "=", "cl_args", "[", "'topology-class-name'", "]", "res", "=", "execute", ".", "heron_class", "(", "class_name", "=", "main_class", ",", "lib_jars", "=", "config", ".", "get_heron_libs", "(", "jars", ".", "topology_jars", "(", ")", ")", ",", "extra_jars", "=", "[", "topology_file", "]", ",", "args", "=", "tuple", "(", "unknown_args", ")", ",", "java_defines", "=", "cl_args", "[", "'topology_main_jvm_property'", "]", ")", "result", ".", "render", "(", "res", ")", "if", "not", "result", ".", "is_successful", "(", "res", ")", ":", "err_context", "=", "(", "\"Failed to create topology definition \"", "\"file when executing class '%s' of file '%s'\"", ")", "%", "(", "main_class", ",", "topology_file", ")", "res", ".", "add_context", "(", "err_context", ")", "return", "res", "results", "=", "launch_topologies", "(", "cl_args", ",", "topology_file", ",", "tmp_dir", ")", "return", "results" ]
We use the packer to make a package for the jar and dump it to a well-known location. We then run the main method of class with the specified arguments. We pass arguments as an environment variable HERON_OPTIONS. This will run the jar file with the topology_class_name. The submitter inside will write out the topology defn file to a location that we specify. Then we write the topology defn file to a well known location. We then write to appropriate places in zookeeper and launch the scheduler jobs :param cl_args: :param unknown_args: :param tmp_dir: :return:
[ "We", "use", "the", "packer", "to", "make", "a", "package", "for", "the", "jar", "and", "dump", "it", "to", "a", "well", "-", "known", "location", ".", "We", "then", "run", "the", "main", "method", "of", "class", "with", "the", "specified", "arguments", ".", "We", "pass", "arguments", "as", "an", "environment", "variable", "HERON_OPTIONS", "." ]
python
valid
apache/incubator-heron
heron/tools/ui/src/python/handlers/topology.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/tools/ui/src/python/handlers/topology.py#L99-L115
def get(self): ''' :return: ''' clusters = yield access.get_clusters() # pylint: disable=no-member options = dict( topologies=[], # no topologies clusters=[str(cluster) for cluster in clusters], active="topologies", # active icon the nav bar function=common.className, baseUrl=self.baseUrl ) # send the all topologies page self.render("topologies.html", **options)
[ "def", "get", "(", "self", ")", ":", "clusters", "=", "yield", "access", ".", "get_clusters", "(", ")", "# pylint: disable=no-member", "options", "=", "dict", "(", "topologies", "=", "[", "]", ",", "# no topologies", "clusters", "=", "[", "str", "(", "cluster", ")", "for", "cluster", "in", "clusters", "]", ",", "active", "=", "\"topologies\"", ",", "# active icon the nav bar", "function", "=", "common", ".", "className", ",", "baseUrl", "=", "self", ".", "baseUrl", ")", "# send the all topologies page", "self", ".", "render", "(", "\"topologies.html\"", ",", "*", "*", "options", ")" ]
:return:
[ ":", "return", ":" ]
python
valid
mitsei/dlkit
dlkit/handcar/osid/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/handcar/osid/sessions.py#L101-L109
def _init_object(self, catalog_id, proxy, runtime, cat_name, cat_class): """Initialize this object as an OsidObject....do we need this?? From the Mongo learning impl, but seems unnecessary for Handcar""" self._catalog_identifier = None self._init_proxy_and_runtime(proxy, runtime) self._catalog = cat_class(self._my_catalog_map) self._catalog._authority = self._authority # there should be a better way... self._catalog_id = self._catalog.get_id() self._forms = dict()
[ "def", "_init_object", "(", "self", ",", "catalog_id", ",", "proxy", ",", "runtime", ",", "cat_name", ",", "cat_class", ")", ":", "self", ".", "_catalog_identifier", "=", "None", "self", ".", "_init_proxy_and_runtime", "(", "proxy", ",", "runtime", ")", "self", ".", "_catalog", "=", "cat_class", "(", "self", ".", "_my_catalog_map", ")", "self", ".", "_catalog", ".", "_authority", "=", "self", ".", "_authority", "# there should be a better way...", "self", ".", "_catalog_id", "=", "self", ".", "_catalog", ".", "get_id", "(", ")", "self", ".", "_forms", "=", "dict", "(", ")" ]
Initialize this object as an OsidObject....do we need this?? From the Mongo learning impl, but seems unnecessary for Handcar
[ "Initialize", "this", "object", "as", "an", "OsidObject", "....", "do", "we", "need", "this??", "From", "the", "Mongo", "learning", "impl", "but", "seems", "unnecessary", "for", "Handcar" ]
python
train
WebarchivCZ/WA-KAT
src/wa_kat/settings.py
https://github.com/WebarchivCZ/WA-KAT/blob/16d064a3a775dc1d2713debda7847ded52dd2a06/src/wa_kat/settings.py#L108-L123
def _get_all_constants(): """ Get list of all uppercase, non-private globals (doesn't start with ``_``). Returns: list: Uppercase names defined in `globals()` (variables from this \ module). """ return [ key for key in globals().keys() if all([ not key.startswith("_"), # publicly accesible key.upper() == key, # uppercase type(globals()[key]) in _ALLOWED # and with type from _ALLOWED ]) ]
[ "def", "_get_all_constants", "(", ")", ":", "return", "[", "key", "for", "key", "in", "globals", "(", ")", ".", "keys", "(", ")", "if", "all", "(", "[", "not", "key", ".", "startswith", "(", "\"_\"", ")", ",", "# publicly accesible", "key", ".", "upper", "(", ")", "==", "key", ",", "# uppercase", "type", "(", "globals", "(", ")", "[", "key", "]", ")", "in", "_ALLOWED", "# and with type from _ALLOWED", "]", ")", "]" ]
Get list of all uppercase, non-private globals (doesn't start with ``_``). Returns: list: Uppercase names defined in `globals()` (variables from this \ module).
[ "Get", "list", "of", "all", "uppercase", "non", "-", "private", "globals", "(", "doesn", "t", "start", "with", "_", ")", "." ]
python
train
ThreatConnect-Inc/tcex
tcex/tcex_ti/tcex_ti.py
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_ti/tcex_ti.py#L80-L92
def email_address(self, address, owner=None, **kwargs): """ Create the Email Address TI object. Args: owner: address: **kwargs: Return: """ return EmailAddress(self.tcex, address, owner=owner, **kwargs)
[ "def", "email_address", "(", "self", ",", "address", ",", "owner", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "EmailAddress", "(", "self", ".", "tcex", ",", "address", ",", "owner", "=", "owner", ",", "*", "*", "kwargs", ")" ]
Create the Email Address TI object. Args: owner: address: **kwargs: Return:
[ "Create", "the", "Email", "Address", "TI", "object", "." ]
python
train
StackStorm/pybind
pybind/slxos/v17r_1_01a/load_balance_lag/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17r_1_01a/load_balance_lag/__init__.py#L94-L115
def _set_load_balance(self, v, load=False): """ Setter method for load_balance, mapped from YANG variable /load_balance_lag/load_balance (container) If this variable is read-only (config: false) in the source YANG file, then _set_load_balance is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_load_balance() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=load_balance.load_balance, is_container='container', presence=False, yang_name="load-balance", rest_name="load-balance", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'load-balance'}}, namespace='urn:brocade.com:mgmt:brocade-rbridge-lag', defining_module='brocade-rbridge-lag', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """load_balance must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=load_balance.load_balance, is_container='container', presence=False, yang_name="load-balance", rest_name="load-balance", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'load-balance'}}, namespace='urn:brocade.com:mgmt:brocade-rbridge-lag', defining_module='brocade-rbridge-lag', yang_type='container', is_config=True)""", }) self.__load_balance = t if hasattr(self, '_set'): self._set()
[ "def", "_set_load_balance", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "load_balance", ".", "load_balance", ",", "is_container", "=", "'container'", ",", "presence", "=", "False", ",", "yang_name", "=", "\"load-balance\"", ",", "rest_name", "=", "\"load-balance\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'load-balance'", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-rbridge-lag'", ",", "defining_module", "=", "'brocade-rbridge-lag'", ",", "yang_type", "=", "'container'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"load_balance must be of a type compatible with container\"\"\"", ",", "'defined-type'", ":", "\"container\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=load_balance.load_balance, is_container='container', presence=False, yang_name=\"load-balance\", rest_name=\"load-balance\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'load-balance'}}, namespace='urn:brocade.com:mgmt:brocade-rbridge-lag', defining_module='brocade-rbridge-lag', yang_type='container', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__load_balance", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for load_balance, mapped from YANG variable /load_balance_lag/load_balance (container) If this variable is read-only (config: false) in the source YANG file, then _set_load_balance is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_load_balance() directly.
[ "Setter", "method", "for", "load_balance", "mapped", "from", "YANG", "variable", "/", "load_balance_lag", "/", "load_balance", "(", "container", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_load_balance", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_load_balance", "()", "directly", "." ]
python
train
RudolfCardinal/pythonlib
cardinal_pythonlib/rnc_db.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/rnc_db.py#L1615-L1647
def connect(self, engine: str = None, interface: str = None, host: str = None, port: int = None, database: str = None, driver: str = None, dsn: str = None, odbc_connection_string: str = None, user: str = None, password: str = None, autocommit: bool = True, charset: str = "utf8", use_unicode: bool = True) -> bool: """ engine: access, mysql, sqlserver interface: mysql, odbc, jdbc """ # Catch all exceptions, so the error-catcher never shows a password. # Note also that higher-level things may catch exceptions, so use the # logger as well. try: return self._connect( engine=engine, interface=interface, host=host, port=port, database=database, driver=driver, dsn=dsn, odbc_connection_string=odbc_connection_string, user=user, password=password, autocommit=autocommit, charset=charset, use_unicode=use_unicode) except Exception as e: self.reraise_connection_exception(e)
[ "def", "connect", "(", "self", ",", "engine", ":", "str", "=", "None", ",", "interface", ":", "str", "=", "None", ",", "host", ":", "str", "=", "None", ",", "port", ":", "int", "=", "None", ",", "database", ":", "str", "=", "None", ",", "driver", ":", "str", "=", "None", ",", "dsn", ":", "str", "=", "None", ",", "odbc_connection_string", ":", "str", "=", "None", ",", "user", ":", "str", "=", "None", ",", "password", ":", "str", "=", "None", ",", "autocommit", ":", "bool", "=", "True", ",", "charset", ":", "str", "=", "\"utf8\"", ",", "use_unicode", ":", "bool", "=", "True", ")", "->", "bool", ":", "# Catch all exceptions, so the error-catcher never shows a password.", "# Note also that higher-level things may catch exceptions, so use the", "# logger as well.", "try", ":", "return", "self", ".", "_connect", "(", "engine", "=", "engine", ",", "interface", "=", "interface", ",", "host", "=", "host", ",", "port", "=", "port", ",", "database", "=", "database", ",", "driver", "=", "driver", ",", "dsn", "=", "dsn", ",", "odbc_connection_string", "=", "odbc_connection_string", ",", "user", "=", "user", ",", "password", "=", "password", ",", "autocommit", "=", "autocommit", ",", "charset", "=", "charset", ",", "use_unicode", "=", "use_unicode", ")", "except", "Exception", "as", "e", ":", "self", ".", "reraise_connection_exception", "(", "e", ")" ]
engine: access, mysql, sqlserver interface: mysql, odbc, jdbc
[ "engine", ":", "access", "mysql", "sqlserver", "interface", ":", "mysql", "odbc", "jdbc" ]
python
train
deepmipt/DeepPavlov
deeppavlov/core/common/params.py
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/core/common/params.py#L57-L115
def from_params(params: Dict, mode: str = 'infer', serialized: Any = None, **kwargs) -> Component: """Builds and returns the Component from corresponding dictionary of parameters.""" # what is passed in json: config_params = {k: _resolve(v) for k, v in params.items()} # get component by reference (if any) if 'ref' in config_params: try: component = _refs[config_params['ref']] if serialized is not None: component.deserialize(serialized) return component except KeyError: e = ConfigError('Component with id "{id}" was referenced but not initialized' .format(id=config_params['ref'])) log.exception(e) raise e elif 'config_path' in config_params: from deeppavlov.core.commands.infer import build_model refs = _refs.copy() _refs.clear() config = parse_config(expand_path(config_params['config_path'])) model = build_model(config, serialized=serialized) _refs.clear() _refs.update(refs) try: _refs[config_params['id']] = model except KeyError: pass return model cls_name = config_params.pop('class_name', None) if not cls_name: e = ConfigError('Component config has no `class_name` nor `ref` fields') log.exception(e) raise e cls = get_model(cls_name) # find the submodels params recursively config_params = {k: _init_param(v, mode) for k, v in config_params.items()} try: spec = inspect.getfullargspec(cls) if 'mode' in spec.args+spec.kwonlyargs or spec.varkw is not None: kwargs['mode'] = mode component = cls(**dict(config_params, **kwargs)) try: _refs[config_params['id']] = component except KeyError: pass except Exception: log.exception("Exception in {}".format(cls)) raise if serialized is not None: component.deserialize(serialized) return component
[ "def", "from_params", "(", "params", ":", "Dict", ",", "mode", ":", "str", "=", "'infer'", ",", "serialized", ":", "Any", "=", "None", ",", "*", "*", "kwargs", ")", "->", "Component", ":", "# what is passed in json:", "config_params", "=", "{", "k", ":", "_resolve", "(", "v", ")", "for", "k", ",", "v", "in", "params", ".", "items", "(", ")", "}", "# get component by reference (if any)", "if", "'ref'", "in", "config_params", ":", "try", ":", "component", "=", "_refs", "[", "config_params", "[", "'ref'", "]", "]", "if", "serialized", "is", "not", "None", ":", "component", ".", "deserialize", "(", "serialized", ")", "return", "component", "except", "KeyError", ":", "e", "=", "ConfigError", "(", "'Component with id \"{id}\" was referenced but not initialized'", ".", "format", "(", "id", "=", "config_params", "[", "'ref'", "]", ")", ")", "log", ".", "exception", "(", "e", ")", "raise", "e", "elif", "'config_path'", "in", "config_params", ":", "from", "deeppavlov", ".", "core", ".", "commands", ".", "infer", "import", "build_model", "refs", "=", "_refs", ".", "copy", "(", ")", "_refs", ".", "clear", "(", ")", "config", "=", "parse_config", "(", "expand_path", "(", "config_params", "[", "'config_path'", "]", ")", ")", "model", "=", "build_model", "(", "config", ",", "serialized", "=", "serialized", ")", "_refs", ".", "clear", "(", ")", "_refs", ".", "update", "(", "refs", ")", "try", ":", "_refs", "[", "config_params", "[", "'id'", "]", "]", "=", "model", "except", "KeyError", ":", "pass", "return", "model", "cls_name", "=", "config_params", ".", "pop", "(", "'class_name'", ",", "None", ")", "if", "not", "cls_name", ":", "e", "=", "ConfigError", "(", "'Component config has no `class_name` nor `ref` fields'", ")", "log", ".", "exception", "(", "e", ")", "raise", "e", "cls", "=", "get_model", "(", "cls_name", ")", "# find the submodels params recursively", "config_params", "=", "{", "k", ":", "_init_param", "(", "v", ",", "mode", ")", "for", "k", ",", "v", "in", "config_params", ".", "items", "(", ")", "}", "try", ":", "spec", "=", "inspect", ".", "getfullargspec", "(", "cls", ")", "if", "'mode'", "in", "spec", ".", "args", "+", "spec", ".", "kwonlyargs", "or", "spec", ".", "varkw", "is", "not", "None", ":", "kwargs", "[", "'mode'", "]", "=", "mode", "component", "=", "cls", "(", "*", "*", "dict", "(", "config_params", ",", "*", "*", "kwargs", ")", ")", "try", ":", "_refs", "[", "config_params", "[", "'id'", "]", "]", "=", "component", "except", "KeyError", ":", "pass", "except", "Exception", ":", "log", ".", "exception", "(", "\"Exception in {}\"", ".", "format", "(", "cls", ")", ")", "raise", "if", "serialized", "is", "not", "None", ":", "component", ".", "deserialize", "(", "serialized", ")", "return", "component" ]
Builds and returns the Component from corresponding dictionary of parameters.
[ "Builds", "and", "returns", "the", "Component", "from", "corresponding", "dictionary", "of", "parameters", "." ]
python
test
casouri/launchdman
launchdman/__init__.py
https://github.com/casouri/launchdman/blob/c83840e640cb075fab2534049f1e25fac6933c64/launchdman/__init__.py#L337-L347
def remove(self, *l): ''' remove elements from self.value by matching. Create the exactly same single you want to delete and pass it(them) in. Normally this method needs to be overwrited by subclass. It only looks inside current instance's value, not recursive. There is no need for a recursive one anyway. Args: *l: a single element, a bunch of element seperated by comma, or a list of elements, or any combination. Element is what you match with. ''' removeList = list(flatten(l)) self._remove(removeList, self.value)
[ "def", "remove", "(", "self", ",", "*", "l", ")", ":", "removeList", "=", "list", "(", "flatten", "(", "l", ")", ")", "self", ".", "_remove", "(", "removeList", ",", "self", ".", "value", ")" ]
remove elements from self.value by matching. Create the exactly same single you want to delete and pass it(them) in. Normally this method needs to be overwrited by subclass. It only looks inside current instance's value, not recursive. There is no need for a recursive one anyway. Args: *l: a single element, a bunch of element seperated by comma, or a list of elements, or any combination. Element is what you match with.
[ "remove", "elements", "from", "self", ".", "value", "by", "matching", "." ]
python
train
inveniosoftware/invenio-config
invenio_config/utils.py
https://github.com/inveniosoftware/invenio-config/blob/8d1e63ac045cd9c58a3399c6b58845e6daa06102/invenio_config/utils.py#L20-L62
def create_config_loader(config=None, env_prefix='APP'): """Create a default configuration loader. A configuration loader takes a Flask application and keyword arguments and updates the Flask application's configuration as it sees fit. This default configuration loader will load configuration in the following order: 1. Load configuration from ``invenio_config.module`` entry points group, following the alphabetical ascending order in case of multiple entry points defined. For example, the config of an app with entry point name ``10_app`` will be loaded after the config of an app with entry point name ``00_app``. 2. Load configuration from ``config`` module if provided as argument. 3. Load configuration from the instance folder: ``<app.instance_path>/<app.name>.cfg``. 4. Load configuration keyword arguments provided. 5. Load configuration from environment variables with the prefix ``env_prefix``. If no secret key has been set a warning will be issued. :param config: Either an import string to a module with configuration or alternatively the module itself. :param env_prefix: Environment variable prefix to import configuration from. :return: A callable with the method signature ``config_loader(app, **kwargs)``. .. versionadded:: 1.0.0 """ def _config_loader(app, **kwargs_config): InvenioConfigEntryPointModule(app=app) if config: InvenioConfigModule(app=app, module=config) InvenioConfigInstanceFolder(app=app) app.config.update(**kwargs_config) InvenioConfigEnvironment(app=app, prefix='{0}_'.format(env_prefix)) InvenioConfigDefault(app=app) return _config_loader
[ "def", "create_config_loader", "(", "config", "=", "None", ",", "env_prefix", "=", "'APP'", ")", ":", "def", "_config_loader", "(", "app", ",", "*", "*", "kwargs_config", ")", ":", "InvenioConfigEntryPointModule", "(", "app", "=", "app", ")", "if", "config", ":", "InvenioConfigModule", "(", "app", "=", "app", ",", "module", "=", "config", ")", "InvenioConfigInstanceFolder", "(", "app", "=", "app", ")", "app", ".", "config", ".", "update", "(", "*", "*", "kwargs_config", ")", "InvenioConfigEnvironment", "(", "app", "=", "app", ",", "prefix", "=", "'{0}_'", ".", "format", "(", "env_prefix", ")", ")", "InvenioConfigDefault", "(", "app", "=", "app", ")", "return", "_config_loader" ]
Create a default configuration loader. A configuration loader takes a Flask application and keyword arguments and updates the Flask application's configuration as it sees fit. This default configuration loader will load configuration in the following order: 1. Load configuration from ``invenio_config.module`` entry points group, following the alphabetical ascending order in case of multiple entry points defined. For example, the config of an app with entry point name ``10_app`` will be loaded after the config of an app with entry point name ``00_app``. 2. Load configuration from ``config`` module if provided as argument. 3. Load configuration from the instance folder: ``<app.instance_path>/<app.name>.cfg``. 4. Load configuration keyword arguments provided. 5. Load configuration from environment variables with the prefix ``env_prefix``. If no secret key has been set a warning will be issued. :param config: Either an import string to a module with configuration or alternatively the module itself. :param env_prefix: Environment variable prefix to import configuration from. :return: A callable with the method signature ``config_loader(app, **kwargs)``. .. versionadded:: 1.0.0
[ "Create", "a", "default", "configuration", "loader", "." ]
python
train
aliyun/aliyun-odps-python-sdk
odps/lib/tzlocal/win32.py
https://github.com/aliyun/aliyun-odps-python-sdk/blob/4b0de18f5864386df6068f26f026e62f932c41e4/odps/lib/tzlocal/win32.py#L99-L104
def reload_localzone(): """Reload the cached localzone. You need to call this if the timezone has changed.""" global _cache_tz _cache_tz = pytz.timezone(get_localzone_name()) utils.assert_tz_offset(_cache_tz) return _cache_tz
[ "def", "reload_localzone", "(", ")", ":", "global", "_cache_tz", "_cache_tz", "=", "pytz", ".", "timezone", "(", "get_localzone_name", "(", ")", ")", "utils", ".", "assert_tz_offset", "(", "_cache_tz", ")", "return", "_cache_tz" ]
Reload the cached localzone. You need to call this if the timezone has changed.
[ "Reload", "the", "cached", "localzone", ".", "You", "need", "to", "call", "this", "if", "the", "timezone", "has", "changed", "." ]
python
train
DataBiosphere/dsub
dsub/providers/google_base.py
https://github.com/DataBiosphere/dsub/blob/443ce31daa6023dc2fd65ef2051796e19d18d5a7/dsub/providers/google_base.py#L577-L593
def execute(api): """Executes operation. Args: api: The base API object Returns: A response body object """ try: return api.execute() except Exception as exception: now = datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f') _print_error('%s: Exception %s: %s' % (now, type(exception).__name__, str(exception))) # Re-raise exception to be handled by retry logic raise exception
[ "def", "execute", "(", "api", ")", ":", "try", ":", "return", "api", ".", "execute", "(", ")", "except", "Exception", "as", "exception", ":", "now", "=", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "'%Y-%m-%d %H:%M:%S.%f'", ")", "_print_error", "(", "'%s: Exception %s: %s'", "%", "(", "now", ",", "type", "(", "exception", ")", ".", "__name__", ",", "str", "(", "exception", ")", ")", ")", "# Re-raise exception to be handled by retry logic", "raise", "exception" ]
Executes operation. Args: api: The base API object Returns: A response body object
[ "Executes", "operation", "." ]
python
valid
awslabs/sockeye
sockeye/decoder.py
https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/decoder.py#L176-L191
def state_shapes(self, batch_size: int, target_max_length: int, source_encoded_max_length: int, source_encoded_depth: int) -> List[mx.io.DataDesc]: """ Returns a list of shape descriptions given batch size, encoded source max length and encoded source depth. Used for inference. :param batch_size: Batch size during inference. :param target_max_length: Current target sequence length. :param source_encoded_max_length: Size of encoder time dimension. :param source_encoded_depth: Depth of encoded source. :return: List of shape descriptions. """ pass
[ "def", "state_shapes", "(", "self", ",", "batch_size", ":", "int", ",", "target_max_length", ":", "int", ",", "source_encoded_max_length", ":", "int", ",", "source_encoded_depth", ":", "int", ")", "->", "List", "[", "mx", ".", "io", ".", "DataDesc", "]", ":", "pass" ]
Returns a list of shape descriptions given batch size, encoded source max length and encoded source depth. Used for inference. :param batch_size: Batch size during inference. :param target_max_length: Current target sequence length. :param source_encoded_max_length: Size of encoder time dimension. :param source_encoded_depth: Depth of encoded source. :return: List of shape descriptions.
[ "Returns", "a", "list", "of", "shape", "descriptions", "given", "batch", "size", "encoded", "source", "max", "length", "and", "encoded", "source", "depth", ".", "Used", "for", "inference", "." ]
python
train
FulcrumTechnologies/pyconfluence
pyconfluence/api.py
https://github.com/FulcrumTechnologies/pyconfluence/blob/a999726dbc1cbdd3d9062234698eeae799ce84ce/pyconfluence/api.py#L21-L36
def load_variables(): """Load variables from environment variables.""" if (not os.environ.get("PYCONFLUENCE_TOKEN") or not os.environ.get("PYCONFLUENCE_USER") or not os.environ.get("PYCONFLUENCE_ORG")): print ("One or more pyconfluence environment variables are not set. " "See README for directions on how to resolve this.") sys.exit("Error") global token global user global base_url token = os.environ["PYCONFLUENCE_TOKEN"] user = os.environ["PYCONFLUENCE_USER"] base_url = ("https://" + os.environ["PYCONFLUENCE_ORG"] + ".atlassian" ".net/wiki/rest/api/content")
[ "def", "load_variables", "(", ")", ":", "if", "(", "not", "os", ".", "environ", ".", "get", "(", "\"PYCONFLUENCE_TOKEN\"", ")", "or", "not", "os", ".", "environ", ".", "get", "(", "\"PYCONFLUENCE_USER\"", ")", "or", "not", "os", ".", "environ", ".", "get", "(", "\"PYCONFLUENCE_ORG\"", ")", ")", ":", "print", "(", "\"One or more pyconfluence environment variables are not set. \"", "\"See README for directions on how to resolve this.\"", ")", "sys", ".", "exit", "(", "\"Error\"", ")", "global", "token", "global", "user", "global", "base_url", "token", "=", "os", ".", "environ", "[", "\"PYCONFLUENCE_TOKEN\"", "]", "user", "=", "os", ".", "environ", "[", "\"PYCONFLUENCE_USER\"", "]", "base_url", "=", "(", "\"https://\"", "+", "os", ".", "environ", "[", "\"PYCONFLUENCE_ORG\"", "]", "+", "\".atlassian\"", "\".net/wiki/rest/api/content\"", ")" ]
Load variables from environment variables.
[ "Load", "variables", "from", "environment", "variables", "." ]
python
train
apache/incubator-heron
heron/tools/tracker/src/python/topology.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/tools/tracker/src/python/topology.py#L174-L191
def num_instances(self): """ Number of spouts + bolts """ num = 0 # Get all the components components = self.spouts() + self.bolts() # Get instances for each worker for component in components: config = component.comp.config for kvs in config.kvs: if kvs.key == api_constants.TOPOLOGY_COMPONENT_PARALLELISM: num += int(kvs.value) break return num
[ "def", "num_instances", "(", "self", ")", ":", "num", "=", "0", "# Get all the components", "components", "=", "self", ".", "spouts", "(", ")", "+", "self", ".", "bolts", "(", ")", "# Get instances for each worker", "for", "component", "in", "components", ":", "config", "=", "component", ".", "comp", ".", "config", "for", "kvs", "in", "config", ".", "kvs", ":", "if", "kvs", ".", "key", "==", "api_constants", ".", "TOPOLOGY_COMPONENT_PARALLELISM", ":", "num", "+=", "int", "(", "kvs", ".", "value", ")", "break", "return", "num" ]
Number of spouts + bolts
[ "Number", "of", "spouts", "+", "bolts" ]
python
valid
MacHu-GWU/crawlib-project
crawlib/pipeline/mongodb/query_builder.py
https://github.com/MacHu-GWU/crawlib-project/blob/241516f2a7a0a32c692f7af35a1f44064e8ce1ab/crawlib/pipeline/mongodb/query_builder.py#L18-L42
def finished(finished_status, update_interval, status_key, edit_at_key): """ Create dict query for pymongo that getting all finished task. :param finished_status: int, status code that greater or equal than this will be considered as finished. :param update_interval: int, the record will be updated every x seconds. :param status_key: status code field key, support dot notation. :param edit_at_key: edit_at time field key, support dot notation. :return: dict, a pymongo filter. **中文文档** 状态码大于某个值, 并且, 更新时间在最近一段时间以内. """ return { status_key: {"$gte": finished_status}, edit_at_key: { "$gte": x_seconds_before_now(update_interval), }, }
[ "def", "finished", "(", "finished_status", ",", "update_interval", ",", "status_key", ",", "edit_at_key", ")", ":", "return", "{", "status_key", ":", "{", "\"$gte\"", ":", "finished_status", "}", ",", "edit_at_key", ":", "{", "\"$gte\"", ":", "x_seconds_before_now", "(", "update_interval", ")", ",", "}", ",", "}" ]
Create dict query for pymongo that getting all finished task. :param finished_status: int, status code that greater or equal than this will be considered as finished. :param update_interval: int, the record will be updated every x seconds. :param status_key: status code field key, support dot notation. :param edit_at_key: edit_at time field key, support dot notation. :return: dict, a pymongo filter. **中文文档** 状态码大于某个值, 并且, 更新时间在最近一段时间以内.
[ "Create", "dict", "query", "for", "pymongo", "that", "getting", "all", "finished", "task", "." ]
python
train
sammchardy/python-binance
binance/client.py
https://github.com/sammchardy/python-binance/blob/31c0d0a32f9edd528c6c2c1dd3044d9a34ce43cc/binance/client.py#L1798-L1835
def get_asset_details(self, **params): """Fetch details on assets. https://github.com/binance-exchange/binance-official-api-docs/blob/master/wapi-api.md#asset-detail-user_data :param recvWindow: the number of milliseconds the request is valid for :type recvWindow: int :returns: API response .. code-block:: python { "success": true, "assetDetail": { "CTR": { "minWithdrawAmount": "70.00000000", //min withdraw amount "depositStatus": false,//deposit status "withdrawFee": 35, // withdraw fee "withdrawStatus": true, //withdraw status "depositTip": "Delisted, Deposit Suspended" //reason }, "SKY": { "minWithdrawAmount": "0.02000000", "depositStatus": true, "withdrawFee": 0.01, "withdrawStatus": true } } } :raises: BinanceWithdrawException """ res = self._request_withdraw_api('get', 'assetDetail.html', True, data=params) if not res['success']: raise BinanceWithdrawException(res['msg']) return res
[ "def", "get_asset_details", "(", "self", ",", "*", "*", "params", ")", ":", "res", "=", "self", ".", "_request_withdraw_api", "(", "'get'", ",", "'assetDetail.html'", ",", "True", ",", "data", "=", "params", ")", "if", "not", "res", "[", "'success'", "]", ":", "raise", "BinanceWithdrawException", "(", "res", "[", "'msg'", "]", ")", "return", "res" ]
Fetch details on assets. https://github.com/binance-exchange/binance-official-api-docs/blob/master/wapi-api.md#asset-detail-user_data :param recvWindow: the number of milliseconds the request is valid for :type recvWindow: int :returns: API response .. code-block:: python { "success": true, "assetDetail": { "CTR": { "minWithdrawAmount": "70.00000000", //min withdraw amount "depositStatus": false,//deposit status "withdrawFee": 35, // withdraw fee "withdrawStatus": true, //withdraw status "depositTip": "Delisted, Deposit Suspended" //reason }, "SKY": { "minWithdrawAmount": "0.02000000", "depositStatus": true, "withdrawFee": 0.01, "withdrawStatus": true } } } :raises: BinanceWithdrawException
[ "Fetch", "details", "on", "assets", "." ]
python
train
teepark/junction
junction/hub.py
https://github.com/teepark/junction/blob/481d135d9e53acb55c72686e2eb4483432f35fa6/junction/hub.py#L378-L383
def add_peer(self, peer_addr): "Build a connection to the Hub at a given ``(host, port)`` address" peer = connection.Peer( self._ident, self._dispatcher, peer_addr, backend.Socket()) peer.start() self._started_peers[peer_addr] = peer
[ "def", "add_peer", "(", "self", ",", "peer_addr", ")", ":", "peer", "=", "connection", ".", "Peer", "(", "self", ".", "_ident", ",", "self", ".", "_dispatcher", ",", "peer_addr", ",", "backend", ".", "Socket", "(", ")", ")", "peer", ".", "start", "(", ")", "self", ".", "_started_peers", "[", "peer_addr", "]", "=", "peer" ]
Build a connection to the Hub at a given ``(host, port)`` address
[ "Build", "a", "connection", "to", "the", "Hub", "at", "a", "given", "(", "host", "port", ")", "address" ]
python
train
NASA-AMMOS/AIT-Core
ait/core/seq.py
https://github.com/NASA-AMMOS/AIT-Core/blob/9d85bd9c738e7a6a6fbdff672bea708238b02a3a/ait/core/seq.py#L720-L730
def location (self, pos): """Formats the location of the given SeqPos as: filename:line:col: """ result = '' if self.filename: result += self.filename + ':' if pos: result += str(pos) return result
[ "def", "location", "(", "self", ",", "pos", ")", ":", "result", "=", "''", "if", "self", ".", "filename", ":", "result", "+=", "self", ".", "filename", "+", "':'", "if", "pos", ":", "result", "+=", "str", "(", "pos", ")", "return", "result" ]
Formats the location of the given SeqPos as: filename:line:col:
[ "Formats", "the", "location", "of", "the", "given", "SeqPos", "as", ":" ]
python
train
MinchinWeb/colourettu
make_release.py
https://github.com/MinchinWeb/colourettu/blob/f0b2f6b1d44055f3ccee62ac2759829f1e16a252/make_release.py#L171-L196
def add_release_to_changelog(version): """Add release line at the top of the first list it finds Assumes your changelog in managed with `releases`""" temp_file = changelog_file().parent / ("~" + changelog_file().name) now = datetime.today() release_added = False with open(str(temp_file), 'w') as g: with open(str(changelog_file()), 'r') as f: for line in f: list_match = list_match_re.match(line) if list_match and not release_added: release_line = "{}{} :release:`{} <{}-{:02}-{:02}>`".format( list_match.group("leading"), list_match.group("mark"), version, now.year, now.month, now.day) print(release_line, file=g) release_added = True print(line, file=g, end="") if not release_added: release_line = "{}{} :release:`{} <{}-{:02}-{:02}>`".format( " ", "-", version, now.year, now.month, now.day) print(release_line, file=g) print('', file=g) # add a blank line at the end of the file shutil.copyfile(str(temp_file), str(changelog_file())) os.remove(str(temp_file))
[ "def", "add_release_to_changelog", "(", "version", ")", ":", "temp_file", "=", "changelog_file", "(", ")", ".", "parent", "/", "(", "\"~\"", "+", "changelog_file", "(", ")", ".", "name", ")", "now", "=", "datetime", ".", "today", "(", ")", "release_added", "=", "False", "with", "open", "(", "str", "(", "temp_file", ")", ",", "'w'", ")", "as", "g", ":", "with", "open", "(", "str", "(", "changelog_file", "(", ")", ")", ",", "'r'", ")", "as", "f", ":", "for", "line", "in", "f", ":", "list_match", "=", "list_match_re", ".", "match", "(", "line", ")", "if", "list_match", "and", "not", "release_added", ":", "release_line", "=", "\"{}{} :release:`{} <{}-{:02}-{:02}>`\"", ".", "format", "(", "list_match", ".", "group", "(", "\"leading\"", ")", ",", "list_match", ".", "group", "(", "\"mark\"", ")", ",", "version", ",", "now", ".", "year", ",", "now", ".", "month", ",", "now", ".", "day", ")", "print", "(", "release_line", ",", "file", "=", "g", ")", "release_added", "=", "True", "print", "(", "line", ",", "file", "=", "g", ",", "end", "=", "\"\"", ")", "if", "not", "release_added", ":", "release_line", "=", "\"{}{} :release:`{} <{}-{:02}-{:02}>`\"", ".", "format", "(", "\" \"", ",", "\"-\"", ",", "version", ",", "now", ".", "year", ",", "now", ".", "month", ",", "now", ".", "day", ")", "print", "(", "release_line", ",", "file", "=", "g", ")", "print", "(", "''", ",", "file", "=", "g", ")", "# add a blank line at the end of the file", "shutil", ".", "copyfile", "(", "str", "(", "temp_file", ")", ",", "str", "(", "changelog_file", "(", ")", ")", ")", "os", ".", "remove", "(", "str", "(", "temp_file", ")", ")" ]
Add release line at the top of the first list it finds Assumes your changelog in managed with `releases`
[ "Add", "release", "line", "at", "the", "top", "of", "the", "first", "list", "it", "finds" ]
python
train
Legobot/Legobot
Legobot/Connectors/IRC.py
https://github.com/Legobot/Legobot/blob/d13da172960a149681cb5151ce34b2f3a58ad32b/Legobot/Connectors/IRC.py#L133-L152
def on_welcome(self, c, e): """ This function runs when the bot successfully connects to the IRC server """ self.backoff = 1 # Assume we had a good connection. Reset backoff. if self.nickserv: if Utilities.isNotEmpty(self.nickserv_pass): self.identify(c, e, self.nickserv_pass) time.sleep(3) # Make sure Nickserv really sees us else: logger.error('If nickserv is enabled, you must supply' ' a password') if self.nickserv is False and self.nickserv_pass is not None: logger.warn('It appears you provided a nickserv password but ' 'did not enable nickserv authentication') for channel in self.my_channels: logger.debug('Attempting to join {0!s}'.format(channel)) c.join(channel)
[ "def", "on_welcome", "(", "self", ",", "c", ",", "e", ")", ":", "self", ".", "backoff", "=", "1", "# Assume we had a good connection. Reset backoff.", "if", "self", ".", "nickserv", ":", "if", "Utilities", ".", "isNotEmpty", "(", "self", ".", "nickserv_pass", ")", ":", "self", ".", "identify", "(", "c", ",", "e", ",", "self", ".", "nickserv_pass", ")", "time", ".", "sleep", "(", "3", ")", "# Make sure Nickserv really sees us", "else", ":", "logger", ".", "error", "(", "'If nickserv is enabled, you must supply'", "' a password'", ")", "if", "self", ".", "nickserv", "is", "False", "and", "self", ".", "nickserv_pass", "is", "not", "None", ":", "logger", ".", "warn", "(", "'It appears you provided a nickserv password but '", "'did not enable nickserv authentication'", ")", "for", "channel", "in", "self", ".", "my_channels", ":", "logger", ".", "debug", "(", "'Attempting to join {0!s}'", ".", "format", "(", "channel", ")", ")", "c", ".", "join", "(", "channel", ")" ]
This function runs when the bot successfully connects to the IRC server
[ "This", "function", "runs", "when", "the", "bot", "successfully", "connects", "to", "the", "IRC", "server" ]
python
train
Blueqat/Blueqat
blueqat/gate.py
https://github.com/Blueqat/Blueqat/blob/2ac8592c79e7acf4f385d982af82fbd68dafa5cc/blueqat/gate.py#L354-L360
def slicing(args, length): """Internally used.""" if isinstance(args, tuple): for arg in args: yield from slicing_singlevalue(arg, length) else: yield from slicing_singlevalue(args, length)
[ "def", "slicing", "(", "args", ",", "length", ")", ":", "if", "isinstance", "(", "args", ",", "tuple", ")", ":", "for", "arg", "in", "args", ":", "yield", "from", "slicing_singlevalue", "(", "arg", ",", "length", ")", "else", ":", "yield", "from", "slicing_singlevalue", "(", "args", ",", "length", ")" ]
Internally used.
[ "Internally", "used", "." ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/ietf_netconf_monitoring.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/ietf_netconf_monitoring.py#L252-L265
def netconf_state_sessions_session_source_host(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") netconf_state = ET.SubElement(config, "netconf-state", xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-monitoring") sessions = ET.SubElement(netconf_state, "sessions") session = ET.SubElement(sessions, "session") session_id_key = ET.SubElement(session, "session-id") session_id_key.text = kwargs.pop('session_id') source_host = ET.SubElement(session, "source-host") source_host.text = kwargs.pop('source_host') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "netconf_state_sessions_session_source_host", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "netconf_state", "=", "ET", ".", "SubElement", "(", "config", ",", "\"netconf-state\"", ",", "xmlns", "=", "\"urn:ietf:params:xml:ns:yang:ietf-netconf-monitoring\"", ")", "sessions", "=", "ET", ".", "SubElement", "(", "netconf_state", ",", "\"sessions\"", ")", "session", "=", "ET", ".", "SubElement", "(", "sessions", ",", "\"session\"", ")", "session_id_key", "=", "ET", ".", "SubElement", "(", "session", ",", "\"session-id\"", ")", "session_id_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'session_id'", ")", "source_host", "=", "ET", ".", "SubElement", "(", "session", ",", "\"source-host\"", ")", "source_host", ".", "text", "=", "kwargs", ".", "pop", "(", "'source_host'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
apache/spark
python/pyspark/mllib/classification.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/classification.py#L657-L679
def train(cls, data, lambda_=1.0): """ Train a Naive Bayes model given an RDD of (label, features) vectors. This is the Multinomial NB (U{http://tinyurl.com/lsdw6p}) which can handle all kinds of discrete data. For example, by converting documents into TF-IDF vectors, it can be used for document classification. By making every vector a 0-1 vector, it can also be used as Bernoulli NB (U{http://tinyurl.com/p7c96j6}). The input feature values must be nonnegative. :param data: RDD of LabeledPoint. :param lambda_: The smoothing parameter. (default: 1.0) """ first = data.first() if not isinstance(first, LabeledPoint): raise ValueError("`data` should be an RDD of LabeledPoint") labels, pi, theta = callMLlibFunc("trainNaiveBayesModel", data, lambda_) return NaiveBayesModel(labels.toArray(), pi.toArray(), numpy.array(theta))
[ "def", "train", "(", "cls", ",", "data", ",", "lambda_", "=", "1.0", ")", ":", "first", "=", "data", ".", "first", "(", ")", "if", "not", "isinstance", "(", "first", ",", "LabeledPoint", ")", ":", "raise", "ValueError", "(", "\"`data` should be an RDD of LabeledPoint\"", ")", "labels", ",", "pi", ",", "theta", "=", "callMLlibFunc", "(", "\"trainNaiveBayesModel\"", ",", "data", ",", "lambda_", ")", "return", "NaiveBayesModel", "(", "labels", ".", "toArray", "(", ")", ",", "pi", ".", "toArray", "(", ")", ",", "numpy", ".", "array", "(", "theta", ")", ")" ]
Train a Naive Bayes model given an RDD of (label, features) vectors. This is the Multinomial NB (U{http://tinyurl.com/lsdw6p}) which can handle all kinds of discrete data. For example, by converting documents into TF-IDF vectors, it can be used for document classification. By making every vector a 0-1 vector, it can also be used as Bernoulli NB (U{http://tinyurl.com/p7c96j6}). The input feature values must be nonnegative. :param data: RDD of LabeledPoint. :param lambda_: The smoothing parameter. (default: 1.0)
[ "Train", "a", "Naive", "Bayes", "model", "given", "an", "RDD", "of", "(", "label", "features", ")", "vectors", "." ]
python
train
manns/pyspread
pyspread/src/gui/_chart_dialog.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_chart_dialog.py#L630-L637
def OnDirectionChoice(self, event): """Direction choice event handler""" label = self.direction_choicectrl.GetItems()[event.GetSelection()] param = self.choice_label2param[label] self.attrs["direction"] = param post_command_event(self, self.DrawChartMsg)
[ "def", "OnDirectionChoice", "(", "self", ",", "event", ")", ":", "label", "=", "self", ".", "direction_choicectrl", ".", "GetItems", "(", ")", "[", "event", ".", "GetSelection", "(", ")", "]", "param", "=", "self", ".", "choice_label2param", "[", "label", "]", "self", ".", "attrs", "[", "\"direction\"", "]", "=", "param", "post_command_event", "(", "self", ",", "self", ".", "DrawChartMsg", ")" ]
Direction choice event handler
[ "Direction", "choice", "event", "handler" ]
python
train
flowersteam/explauto
explauto/environment/poppy/poppy_env.py
https://github.com/flowersteam/explauto/blob/cf0f81ecb9f6412f7276a95bd27359000e1e26b6/explauto/environment/poppy/poppy_env.py#L42-L45
def compute_motor_command(self, m_ag): """ Compute the motor command by restricting it to the bounds. """ m_env = bounds_min_max(m_ag, self.conf.m_mins, self.conf.m_maxs) return m_env
[ "def", "compute_motor_command", "(", "self", ",", "m_ag", ")", ":", "m_env", "=", "bounds_min_max", "(", "m_ag", ",", "self", ".", "conf", ".", "m_mins", ",", "self", ".", "conf", ".", "m_maxs", ")", "return", "m_env" ]
Compute the motor command by restricting it to the bounds.
[ "Compute", "the", "motor", "command", "by", "restricting", "it", "to", "the", "bounds", "." ]
python
train
cournape/audiolab
pavement.py
https://github.com/cournape/audiolab/blob/e4918832c1e52b56428c5f3535ddeb9d9daff9ac/pavement.py#L96-L103
def clean(): """Remove build, dist, egg-info garbage.""" d = ['build', 'dist', 'scikits.audiolab.egg-info', HTML_DESTDIR, PDF_DESTDIR] for i in d: paver.path.path(i).rmtree() (paver.path.path('docs') / options.sphinx.builddir).rmtree()
[ "def", "clean", "(", ")", ":", "d", "=", "[", "'build'", ",", "'dist'", ",", "'scikits.audiolab.egg-info'", ",", "HTML_DESTDIR", ",", "PDF_DESTDIR", "]", "for", "i", "in", "d", ":", "paver", ".", "path", ".", "path", "(", "i", ")", ".", "rmtree", "(", ")", "(", "paver", ".", "path", ".", "path", "(", "'docs'", ")", "/", "options", ".", "sphinx", ".", "builddir", ")", ".", "rmtree", "(", ")" ]
Remove build, dist, egg-info garbage.
[ "Remove", "build", "dist", "egg", "-", "info", "garbage", "." ]
python
train
quora/qcore
qcore/events.py
https://github.com/quora/qcore/blob/fa5cd438eea554db35fd29cbc8dfbde69f09961c/qcore/events.py#L46-L49
def subscribe(self, handler): """Adds a new event handler.""" assert callable(handler), "Invalid handler %s" % handler self.handlers.append(handler)
[ "def", "subscribe", "(", "self", ",", "handler", ")", ":", "assert", "callable", "(", "handler", ")", ",", "\"Invalid handler %s\"", "%", "handler", "self", ".", "handlers", ".", "append", "(", "handler", ")" ]
Adds a new event handler.
[ "Adds", "a", "new", "event", "handler", "." ]
python
train
svinota/mdns
mdns/zeroconf.py
https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L953-L957
def write_string(self, value, length): """Writes a string to the packet""" format = '!' + str(length) + 's' self.data.append(struct.pack(format, value)) self.size += length
[ "def", "write_string", "(", "self", ",", "value", ",", "length", ")", ":", "format", "=", "'!'", "+", "str", "(", "length", ")", "+", "'s'", "self", ".", "data", ".", "append", "(", "struct", ".", "pack", "(", "format", ",", "value", ")", ")", "self", ".", "size", "+=", "length" ]
Writes a string to the packet
[ "Writes", "a", "string", "to", "the", "packet" ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/frontend/qt/console/console_widget.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/frontend/qt/console/console_widget.py#L1892-L1916
def _adjust_scrollbars(self): """ Expands the vertical scrollbar beyond the range set by Qt. """ # This code is adapted from _q_adjustScrollbars in qplaintextedit.cpp # and qtextedit.cpp. document = self._control.document() scrollbar = self._control.verticalScrollBar() viewport_height = self._control.viewport().height() if isinstance(self._control, QtGui.QPlainTextEdit): maximum = max(0, document.lineCount() - 1) step = viewport_height / self._control.fontMetrics().lineSpacing() else: # QTextEdit does not do line-based layout and blocks will not in # general have the same height. Therefore it does not make sense to # attempt to scroll in line height increments. maximum = document.size().height() step = viewport_height diff = maximum - scrollbar.maximum() scrollbar.setRange(0, maximum) scrollbar.setPageStep(step) # Compensate for undesirable scrolling that occurs automatically due to # maximumBlockCount() text truncation. if diff < 0 and document.blockCount() == document.maximumBlockCount(): scrollbar.setValue(scrollbar.value() + diff)
[ "def", "_adjust_scrollbars", "(", "self", ")", ":", "# This code is adapted from _q_adjustScrollbars in qplaintextedit.cpp", "# and qtextedit.cpp.", "document", "=", "self", ".", "_control", ".", "document", "(", ")", "scrollbar", "=", "self", ".", "_control", ".", "verticalScrollBar", "(", ")", "viewport_height", "=", "self", ".", "_control", ".", "viewport", "(", ")", ".", "height", "(", ")", "if", "isinstance", "(", "self", ".", "_control", ",", "QtGui", ".", "QPlainTextEdit", ")", ":", "maximum", "=", "max", "(", "0", ",", "document", ".", "lineCount", "(", ")", "-", "1", ")", "step", "=", "viewport_height", "/", "self", ".", "_control", ".", "fontMetrics", "(", ")", ".", "lineSpacing", "(", ")", "else", ":", "# QTextEdit does not do line-based layout and blocks will not in", "# general have the same height. Therefore it does not make sense to", "# attempt to scroll in line height increments.", "maximum", "=", "document", ".", "size", "(", ")", ".", "height", "(", ")", "step", "=", "viewport_height", "diff", "=", "maximum", "-", "scrollbar", ".", "maximum", "(", ")", "scrollbar", ".", "setRange", "(", "0", ",", "maximum", ")", "scrollbar", ".", "setPageStep", "(", "step", ")", "# Compensate for undesirable scrolling that occurs automatically due to", "# maximumBlockCount() text truncation.", "if", "diff", "<", "0", "and", "document", ".", "blockCount", "(", ")", "==", "document", ".", "maximumBlockCount", "(", ")", ":", "scrollbar", ".", "setValue", "(", "scrollbar", ".", "value", "(", ")", "+", "diff", ")" ]
Expands the vertical scrollbar beyond the range set by Qt.
[ "Expands", "the", "vertical", "scrollbar", "beyond", "the", "range", "set", "by", "Qt", "." ]
python
test
martijnvermaat/monoseq
monoseq/commands.py
https://github.com/martijnvermaat/monoseq/blob/02b92f6aa482ba169787a1a4bcad28372662dc36/monoseq/commands.py#L129-L162
def main(): """ Command line interface. """ parser = argparse.ArgumentParser( description='monoseq: pretty-printing DNA and protein sequences', epilog='If INPUT is in FASTA format, each record is pretty-printed ' 'after printing its name and ANNOTATION (if supplied) is used by ' 'matching chromosome/record name. If INPUT contains a raw sequence, ' 'only the first chromosome in ANNOTATION is used.') parser.add_argument( 'sequence_file', metavar='INPUT', nargs='?', default=sys.stdin, type=argparse.FileType('r'), help='file to read sequence(s) from, ' 'can be in FASTA format (default: standard input)') parser.add_argument( '-b', '--block-length', metavar='LENGTH', dest='block_length', type=int, default=10, help='block length in letters (default: 10)') parser.add_argument( '-l', '--blocks-per-line', metavar='BLOCKS', dest='blocks_per_line', type=int, default=6, help='blocks per line (default: 6)') parser.add_argument( '-a', '--annotation', metavar='POS', dest='annotation', nargs=2, action='append', type=int, help='first and last positions of ' 'subsequence to annotate (allowed more than once)') parser.add_argument( '-e', '--bed', metavar='ANNOTATION', dest='annotation_file', type=argparse.FileType('r'), help='file to read annotation from in ' 'BED format') args = parser.parse_args() pprint(_until_eof(args.sequence_file), annotation=args.annotation, annotation_file=args.annotation_file, block_length=args.block_length, blocks_per_line=args.blocks_per_line)
[ "def", "main", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'monoseq: pretty-printing DNA and protein sequences'", ",", "epilog", "=", "'If INPUT is in FASTA format, each record is pretty-printed '", "'after printing its name and ANNOTATION (if supplied) is used by '", "'matching chromosome/record name. If INPUT contains a raw sequence, '", "'only the first chromosome in ANNOTATION is used.'", ")", "parser", ".", "add_argument", "(", "'sequence_file'", ",", "metavar", "=", "'INPUT'", ",", "nargs", "=", "'?'", ",", "default", "=", "sys", ".", "stdin", ",", "type", "=", "argparse", ".", "FileType", "(", "'r'", ")", ",", "help", "=", "'file to read sequence(s) from, '", "'can be in FASTA format (default: standard input)'", ")", "parser", ".", "add_argument", "(", "'-b'", ",", "'--block-length'", ",", "metavar", "=", "'LENGTH'", ",", "dest", "=", "'block_length'", ",", "type", "=", "int", ",", "default", "=", "10", ",", "help", "=", "'block length in letters (default: 10)'", ")", "parser", ".", "add_argument", "(", "'-l'", ",", "'--blocks-per-line'", ",", "metavar", "=", "'BLOCKS'", ",", "dest", "=", "'blocks_per_line'", ",", "type", "=", "int", ",", "default", "=", "6", ",", "help", "=", "'blocks per line (default: 6)'", ")", "parser", ".", "add_argument", "(", "'-a'", ",", "'--annotation'", ",", "metavar", "=", "'POS'", ",", "dest", "=", "'annotation'", ",", "nargs", "=", "2", ",", "action", "=", "'append'", ",", "type", "=", "int", ",", "help", "=", "'first and last positions of '", "'subsequence to annotate (allowed more than once)'", ")", "parser", ".", "add_argument", "(", "'-e'", ",", "'--bed'", ",", "metavar", "=", "'ANNOTATION'", ",", "dest", "=", "'annotation_file'", ",", "type", "=", "argparse", ".", "FileType", "(", "'r'", ")", ",", "help", "=", "'file to read annotation from in '", "'BED format'", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "pprint", "(", "_until_eof", "(", "args", ".", "sequence_file", ")", ",", "annotation", "=", "args", ".", "annotation", ",", "annotation_file", "=", "args", ".", "annotation_file", ",", "block_length", "=", "args", ".", "block_length", ",", "blocks_per_line", "=", "args", ".", "blocks_per_line", ")" ]
Command line interface.
[ "Command", "line", "interface", "." ]
python
train
lkreidberg/batman
batman/transitmodel.py
https://github.com/lkreidberg/batman/blob/77f3275c12a01ef5b7a89b1aeb6272a7c28452a1/batman/transitmodel.py#L284-L289
def get_t_periastron(self, params): """ Return the time of periastron passage (calculated using `params.t0`). """ phase = self._get_phase(params, "primary") return params.t0 - params.per*phase
[ "def", "get_t_periastron", "(", "self", ",", "params", ")", ":", "phase", "=", "self", ".", "_get_phase", "(", "params", ",", "\"primary\"", ")", "return", "params", ".", "t0", "-", "params", ".", "per", "*", "phase" ]
Return the time of periastron passage (calculated using `params.t0`).
[ "Return", "the", "time", "of", "periastron", "passage", "(", "calculated", "using", "params", ".", "t0", ")", "." ]
python
test
cakebread/yolk
yolk/pypi.py
https://github.com/cakebread/yolk/blob/ee8c9f529a542d9c5eff4fe69b9c7906c802e4d8/yolk/pypi.py#L95-L117
def check_proxy_setting(): """ If the environmental variable 'HTTP_PROXY' is set, it will most likely be in one of these forms: proxyhost:8080 http://proxyhost:8080 urlllib2 requires the proxy URL to start with 'http://' This routine does that, and returns the transport for xmlrpc. """ try: http_proxy = os.environ['HTTP_PROXY'] except KeyError: return if not http_proxy.startswith('http://'): match = re.match('(http://)?([-_\.A-Za-z]+):(\d+)', http_proxy) #if not match: # raise Exception('Proxy format not recognised: [%s]' % http_proxy) os.environ['HTTP_PROXY'] = 'http://%s:%s' % (match.group(2), match.group(3)) return
[ "def", "check_proxy_setting", "(", ")", ":", "try", ":", "http_proxy", "=", "os", ".", "environ", "[", "'HTTP_PROXY'", "]", "except", "KeyError", ":", "return", "if", "not", "http_proxy", ".", "startswith", "(", "'http://'", ")", ":", "match", "=", "re", ".", "match", "(", "'(http://)?([-_\\.A-Za-z]+):(\\d+)'", ",", "http_proxy", ")", "#if not match:", "# raise Exception('Proxy format not recognised: [%s]' % http_proxy)", "os", ".", "environ", "[", "'HTTP_PROXY'", "]", "=", "'http://%s:%s'", "%", "(", "match", ".", "group", "(", "2", ")", ",", "match", ".", "group", "(", "3", ")", ")", "return" ]
If the environmental variable 'HTTP_PROXY' is set, it will most likely be in one of these forms: proxyhost:8080 http://proxyhost:8080 urlllib2 requires the proxy URL to start with 'http://' This routine does that, and returns the transport for xmlrpc.
[ "If", "the", "environmental", "variable", "HTTP_PROXY", "is", "set", "it", "will", "most", "likely", "be", "in", "one", "of", "these", "forms", ":" ]
python
train
EconForge/dolo
dolo/numeric/discretization/quadrature.py
https://github.com/EconForge/dolo/blob/d91ddf148b009bf79852d9aec70f3a1877e0f79a/dolo/numeric/discretization/quadrature.py#L59-L122
def gauss_hermite_nodes(orders, sigma, mu=None): ''' Computes the weights and nodes for Gauss Hermite quadrature. Parameters ---------- orders : int, list, array The order of integration used in the quadrature routine sigma : array-like If one dimensional, the variance of the normal distribution being approximated. If multidimensional, the variance-covariance matrix of the multivariate normal process being approximated. Returns ------- x : array Quadrature nodes w : array Quadrature weights ''' if isinstance(orders, int): orders = [orders] import numpy if mu is None: mu = numpy.array( [0]*sigma.shape[0] ) herms = [hermgauss(i) for i in orders] points = [ h[0]*numpy.sqrt(2) for h in herms] weights = [ h[1]/numpy.sqrt( numpy.pi) for h in herms] if len(orders) == 1: # Note: if sigma is 2D, x will always be 2D, even if sigma is only 1x1. # print(points.shape) x = numpy.array(points[0])*numpy.sqrt(float(sigma)) if sigma.ndim==2: x = x[:,None] w = weights[0] return [x,w] else: x = cartesian( points).T from functools import reduce w = reduce( numpy.kron, weights) zero_columns = numpy.where(sigma.sum(axis=0)==0)[0] for i in zero_columns: sigma[i,i] = 1.0 C = numpy.linalg.cholesky(sigma) x = numpy.dot(C, x) + mu[:,numpy.newaxis] x = numpy.ascontiguousarray(x.T) for i in zero_columns: x[:,i] =0 return [x,w]
[ "def", "gauss_hermite_nodes", "(", "orders", ",", "sigma", ",", "mu", "=", "None", ")", ":", "if", "isinstance", "(", "orders", ",", "int", ")", ":", "orders", "=", "[", "orders", "]", "import", "numpy", "if", "mu", "is", "None", ":", "mu", "=", "numpy", ".", "array", "(", "[", "0", "]", "*", "sigma", ".", "shape", "[", "0", "]", ")", "herms", "=", "[", "hermgauss", "(", "i", ")", "for", "i", "in", "orders", "]", "points", "=", "[", "h", "[", "0", "]", "*", "numpy", ".", "sqrt", "(", "2", ")", "for", "h", "in", "herms", "]", "weights", "=", "[", "h", "[", "1", "]", "/", "numpy", ".", "sqrt", "(", "numpy", ".", "pi", ")", "for", "h", "in", "herms", "]", "if", "len", "(", "orders", ")", "==", "1", ":", "# Note: if sigma is 2D, x will always be 2D, even if sigma is only 1x1.", "# print(points.shape)", "x", "=", "numpy", ".", "array", "(", "points", "[", "0", "]", ")", "*", "numpy", ".", "sqrt", "(", "float", "(", "sigma", ")", ")", "if", "sigma", ".", "ndim", "==", "2", ":", "x", "=", "x", "[", ":", ",", "None", "]", "w", "=", "weights", "[", "0", "]", "return", "[", "x", ",", "w", "]", "else", ":", "x", "=", "cartesian", "(", "points", ")", ".", "T", "from", "functools", "import", "reduce", "w", "=", "reduce", "(", "numpy", ".", "kron", ",", "weights", ")", "zero_columns", "=", "numpy", ".", "where", "(", "sigma", ".", "sum", "(", "axis", "=", "0", ")", "==", "0", ")", "[", "0", "]", "for", "i", "in", "zero_columns", ":", "sigma", "[", "i", ",", "i", "]", "=", "1.0", "C", "=", "numpy", ".", "linalg", ".", "cholesky", "(", "sigma", ")", "x", "=", "numpy", ".", "dot", "(", "C", ",", "x", ")", "+", "mu", "[", ":", ",", "numpy", ".", "newaxis", "]", "x", "=", "numpy", ".", "ascontiguousarray", "(", "x", ".", "T", ")", "for", "i", "in", "zero_columns", ":", "x", "[", ":", ",", "i", "]", "=", "0", "return", "[", "x", ",", "w", "]" ]
Computes the weights and nodes for Gauss Hermite quadrature. Parameters ---------- orders : int, list, array The order of integration used in the quadrature routine sigma : array-like If one dimensional, the variance of the normal distribution being approximated. If multidimensional, the variance-covariance matrix of the multivariate normal process being approximated. Returns ------- x : array Quadrature nodes w : array Quadrature weights
[ "Computes", "the", "weights", "and", "nodes", "for", "Gauss", "Hermite", "quadrature", "." ]
python
train
brendonh/pyth
pyth/plugins/xhtml/css.py
https://github.com/brendonh/pyth/blob/f2a06fc8dc9b1cfc439ea14252d39b9845a7fa4b/pyth/plugins/xhtml/css.py#L73-L85
def parse_css(self, css): """ Parse a css style sheet into the CSS object. For the moment this will only work for very simple css documents. It works by using regular expression matching css syntax. This is not bullet proof. """ rulesets = self.ruleset_re.findall(css) for (selector, declarations) in rulesets: rule = Rule(self.parse_selector(selector)) rule.properties = self.parse_declarations(declarations) self.rules.append(rule)
[ "def", "parse_css", "(", "self", ",", "css", ")", ":", "rulesets", "=", "self", ".", "ruleset_re", ".", "findall", "(", "css", ")", "for", "(", "selector", ",", "declarations", ")", "in", "rulesets", ":", "rule", "=", "Rule", "(", "self", ".", "parse_selector", "(", "selector", ")", ")", "rule", ".", "properties", "=", "self", ".", "parse_declarations", "(", "declarations", ")", "self", ".", "rules", ".", "append", "(", "rule", ")" ]
Parse a css style sheet into the CSS object. For the moment this will only work for very simple css documents. It works by using regular expression matching css syntax. This is not bullet proof.
[ "Parse", "a", "css", "style", "sheet", "into", "the", "CSS", "object", "." ]
python
train
dyve/django-bootstrap3
bootstrap3/templatetags/bootstrap3.py
https://github.com/dyve/django-bootstrap3/blob/1d4095ba113a1faff228f9592bdad4f0b3aed653/bootstrap3/templatetags/bootstrap3.py#L63-L84
def bootstrap_message_classes(message): """ Return the message classes for a message """ extra_tags = None try: extra_tags = message.extra_tags except AttributeError: pass if not extra_tags: extra_tags = "" classes = [extra_tags] try: level = message.level except AttributeError: pass else: try: classes.append(MESSAGE_LEVEL_CLASSES[level]) except KeyError: classes.append("alert alert-danger") return " ".join(classes).strip()
[ "def", "bootstrap_message_classes", "(", "message", ")", ":", "extra_tags", "=", "None", "try", ":", "extra_tags", "=", "message", ".", "extra_tags", "except", "AttributeError", ":", "pass", "if", "not", "extra_tags", ":", "extra_tags", "=", "\"\"", "classes", "=", "[", "extra_tags", "]", "try", ":", "level", "=", "message", ".", "level", "except", "AttributeError", ":", "pass", "else", ":", "try", ":", "classes", ".", "append", "(", "MESSAGE_LEVEL_CLASSES", "[", "level", "]", ")", "except", "KeyError", ":", "classes", ".", "append", "(", "\"alert alert-danger\"", ")", "return", "\" \"", ".", "join", "(", "classes", ")", ".", "strip", "(", ")" ]
Return the message classes for a message
[ "Return", "the", "message", "classes", "for", "a", "message" ]
python
train
pypa/pipenv
pipenv/vendor/requirementslib/models/url.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/requirementslib/models/url.py#L47-L62
def remove_password_from_url(url): # type: (S) -> S """ Given a url, remove the password and insert 4 dashes :param url: The url to replace the authentication in :type url: S :return: The new URL without authentication :rtype: S """ parsed = _get_parsed_url(url) if parsed.auth: auth, _, _ = parsed.auth.partition(":") return parsed._replace(auth="{auth}:----".format(auth=auth)).url return parsed.url
[ "def", "remove_password_from_url", "(", "url", ")", ":", "# type: (S) -> S", "parsed", "=", "_get_parsed_url", "(", "url", ")", "if", "parsed", ".", "auth", ":", "auth", ",", "_", ",", "_", "=", "parsed", ".", "auth", ".", "partition", "(", "\":\"", ")", "return", "parsed", ".", "_replace", "(", "auth", "=", "\"{auth}:----\"", ".", "format", "(", "auth", "=", "auth", ")", ")", ".", "url", "return", "parsed", ".", "url" ]
Given a url, remove the password and insert 4 dashes :param url: The url to replace the authentication in :type url: S :return: The new URL without authentication :rtype: S
[ "Given", "a", "url", "remove", "the", "password", "and", "insert", "4", "dashes" ]
python
train
Alignak-monitoring/alignak
alignak/daemons/arbiterdaemon.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/daemons/arbiterdaemon.py#L327-L345
def get_broks_from_satellites(self): # pragma: no cover - not used! """Get broks from my all internal satellite links The arbiter get the broks from ALL the known satellites :return: None """ for satellites in [self.conf.brokers, self.conf.schedulers, self.conf.pollers, self.conf.reactionners, self.conf.receivers]: for satellite in satellites: # Get only if reachable... if not satellite.reachable: continue logger.debug("Getting broks from: %s", satellite.name) new_broks = satellite.get_and_clear_broks() if new_broks: logger.debug("Got %d broks from: %s", len(new_broks), satellite.name) for brok in new_broks: self.add(brok)
[ "def", "get_broks_from_satellites", "(", "self", ")", ":", "# pragma: no cover - not used!", "for", "satellites", "in", "[", "self", ".", "conf", ".", "brokers", ",", "self", ".", "conf", ".", "schedulers", ",", "self", ".", "conf", ".", "pollers", ",", "self", ".", "conf", ".", "reactionners", ",", "self", ".", "conf", ".", "receivers", "]", ":", "for", "satellite", "in", "satellites", ":", "# Get only if reachable...", "if", "not", "satellite", ".", "reachable", ":", "continue", "logger", ".", "debug", "(", "\"Getting broks from: %s\"", ",", "satellite", ".", "name", ")", "new_broks", "=", "satellite", ".", "get_and_clear_broks", "(", ")", "if", "new_broks", ":", "logger", ".", "debug", "(", "\"Got %d broks from: %s\"", ",", "len", "(", "new_broks", ")", ",", "satellite", ".", "name", ")", "for", "brok", "in", "new_broks", ":", "self", ".", "add", "(", "brok", ")" ]
Get broks from my all internal satellite links The arbiter get the broks from ALL the known satellites :return: None
[ "Get", "broks", "from", "my", "all", "internal", "satellite", "links" ]
python
train
PyGithub/PyGithub
github/Repository.py
https://github.com/PyGithub/PyGithub/blob/f716df86bbe7dc276c6596699fa9712b61ef974c/github/Repository.py#L1376-L1396
def get_collaborators(self, affiliation=github.GithubObject.NotSet): """ :calls: `GET /repos/:owner/:repo/collaborators <http://developer.github.com/v3/repos/collaborators>`_ :param affiliation: string :rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser` """ url_parameters = dict() allowed_affiliations = ['outside', 'direct', 'all'] if affiliation is not github.GithubObject.NotSet: assert isinstance(affiliation, str), affiliation assert affiliation in allowed_affiliations, \ 'Affiliation can be one of ' + ', '.join(allowed_affiliations) url_parameters['affiliation'] = affiliation return github.PaginatedList.PaginatedList( github.NamedUser.NamedUser, self._requester, self.url + "/collaborators", url_parameters )
[ "def", "get_collaborators", "(", "self", ",", "affiliation", "=", "github", ".", "GithubObject", ".", "NotSet", ")", ":", "url_parameters", "=", "dict", "(", ")", "allowed_affiliations", "=", "[", "'outside'", ",", "'direct'", ",", "'all'", "]", "if", "affiliation", "is", "not", "github", ".", "GithubObject", ".", "NotSet", ":", "assert", "isinstance", "(", "affiliation", ",", "str", ")", ",", "affiliation", "assert", "affiliation", "in", "allowed_affiliations", ",", "'Affiliation can be one of '", "+", "', '", ".", "join", "(", "allowed_affiliations", ")", "url_parameters", "[", "'affiliation'", "]", "=", "affiliation", "return", "github", ".", "PaginatedList", ".", "PaginatedList", "(", "github", ".", "NamedUser", ".", "NamedUser", ",", "self", ".", "_requester", ",", "self", ".", "url", "+", "\"/collaborators\"", ",", "url_parameters", ")" ]
:calls: `GET /repos/:owner/:repo/collaborators <http://developer.github.com/v3/repos/collaborators>`_ :param affiliation: string :rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
[ ":", "calls", ":", "GET", "/", "repos", "/", ":", "owner", "/", ":", "repo", "/", "collaborators", "<http", ":", "//", "developer", ".", "github", ".", "com", "/", "v3", "/", "repos", "/", "collaborators", ">", "_", ":", "param", "affiliation", ":", "string", ":", "rtype", ":", ":", "class", ":", "github", ".", "PaginatedList", ".", "PaginatedList", "of", ":", "class", ":", "github", ".", "NamedUser", ".", "NamedUser" ]
python
train
KeithSSmith/switcheo-python
switcheo/public_client.py
https://github.com/KeithSSmith/switcheo-python/blob/22f943dea1ad7d692b2bfcd9f0822ec80f4641a6/switcheo/public_client.py#L490-L585
def get_orders(self, address, chain_name='NEO', contract_version='V2', pair=None, from_epoch_time=None, order_status=None, before_id=None, limit=50): """ Function to fetch the order history of the given address. Execution of this function is as follows:: get_orders(address=neo_get_scripthash_from_address(address=address)) The expected return result for this function is as follows:: [{ 'id': '7cbdf481-6acf-4bf3-a1ed-4773f31e6931', 'blockchain': 'neo', 'contract_hash': 'a195c1549e7da61b8da315765a790ac7e7633b82', 'address': 'fea2b883725ef2d194c9060f606cd0a0468a2c59', 'side': 'buy', 'offer_asset_id': 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b', 'want_asset_id': 'ab38352559b8b203bde5fddfa0b07d8b2525e132', 'offer_amount': '53718500', 'want_amount': '110000000000', 'transfer_amount': '0', 'priority_gas_amount': '0', 'use_native_token': True, 'native_fee_transfer_amount': 0, 'deposit_txn': None, 'created_at': '2018-08-03T02:44:47.692Z', 'status': 'processed', 'fills': [{ 'id': 'b6f9e530-60ff-46ff-9a71-362097a2025e', 'offer_hash': '95b3b03be0bff8f58aa86a8dd599700bbaeaffc05078329d5b726b6b995f4cda', 'offer_asset_id': 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b', 'want_asset_id': 'ab38352559b8b203bde5fddfa0b07d8b2525e132', 'fill_amount': '47833882', 'want_amount': '97950000000', 'filled_amount': '', 'fee_asset_id': 'ab38352559b8b203bde5fddfa0b07d8b2525e132', 'fee_amount': '73462500', 'price': '0.00048835', 'txn': None, 'status': 'success', 'created_at': '2018-08-03T02:44:47.706Z', 'transaction_hash': '694745a09e33845ec008cfb79c73986a556e619799ec73274f82b30d85bda13a' }], 'makes': [{ 'id': '357088a0-cc80-49ab-acdd-980589c2d7d8', 'offer_hash': '420cc85abf02feaceb1bcd91489a0c1949c972d2a9a05ae922fa15d79de80c00', 'available_amount': '0', 'offer_asset_id': 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b', 'offer_amount': '5884618', 'want_asset_id': 'ab38352559b8b203bde5fddfa0b07d8b2525e132', 'want_amount': '12050000000', 'filled_amount': '0.0', 'txn': None, 'cancel_txn': None, 'price': '0.000488350041493775933609958506224066390041494', 'status': 'cancelled', 'created_at': '2018-08-03T02:44:47.708Z', 'transaction_hash': '1afa946546550151bbbd19f197a87cec92e9be58c44ec431cae42076298548b7', 'trades': [] }] }, { .... }] :param address: The ScriptHash of the address to filter orders for. :type address: str :param pair: The trading pair to filter order requests on. :type pair: str :param chain_name: The name of the chain to find orders against. :type chain_name: str :param contract_version: The version of the contract to find orders against. :type contract_version: str :param from_epoch_time: Only return orders that are last updated at or after this time. :type from_epoch_time: int :param order_status: Only return orders have this status. Possible values are open, cancelled, completed. :type order_status: str :param before_id: Only return orders that are created before the order with this id. :type before_id: str :param limit: Only return up to this number of orders (min: 1, max: 200, default: 50). :type limit: int :return: List of dictionaries containing the orders for the given NEO address and (optional) trading pair. """ api_params = { "address": address, "contract_hash": self.get_contracts()[chain_name.upper()][contract_version.upper()], "limit": limit } if pair is not None: api_params['pair'] = pair if from_epoch_time is not None: api_params['from_epoch_time'] = from_epoch_time if order_status is not None: api_params['order_status'] = order_status if before_id is not None: api_params['before_id'] = before_id return self.request.get(path='/orders', params=api_params)
[ "def", "get_orders", "(", "self", ",", "address", ",", "chain_name", "=", "'NEO'", ",", "contract_version", "=", "'V2'", ",", "pair", "=", "None", ",", "from_epoch_time", "=", "None", ",", "order_status", "=", "None", ",", "before_id", "=", "None", ",", "limit", "=", "50", ")", ":", "api_params", "=", "{", "\"address\"", ":", "address", ",", "\"contract_hash\"", ":", "self", ".", "get_contracts", "(", ")", "[", "chain_name", ".", "upper", "(", ")", "]", "[", "contract_version", ".", "upper", "(", ")", "]", ",", "\"limit\"", ":", "limit", "}", "if", "pair", "is", "not", "None", ":", "api_params", "[", "'pair'", "]", "=", "pair", "if", "from_epoch_time", "is", "not", "None", ":", "api_params", "[", "'from_epoch_time'", "]", "=", "from_epoch_time", "if", "order_status", "is", "not", "None", ":", "api_params", "[", "'order_status'", "]", "=", "order_status", "if", "before_id", "is", "not", "None", ":", "api_params", "[", "'before_id'", "]", "=", "before_id", "return", "self", ".", "request", ".", "get", "(", "path", "=", "'/orders'", ",", "params", "=", "api_params", ")" ]
Function to fetch the order history of the given address. Execution of this function is as follows:: get_orders(address=neo_get_scripthash_from_address(address=address)) The expected return result for this function is as follows:: [{ 'id': '7cbdf481-6acf-4bf3-a1ed-4773f31e6931', 'blockchain': 'neo', 'contract_hash': 'a195c1549e7da61b8da315765a790ac7e7633b82', 'address': 'fea2b883725ef2d194c9060f606cd0a0468a2c59', 'side': 'buy', 'offer_asset_id': 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b', 'want_asset_id': 'ab38352559b8b203bde5fddfa0b07d8b2525e132', 'offer_amount': '53718500', 'want_amount': '110000000000', 'transfer_amount': '0', 'priority_gas_amount': '0', 'use_native_token': True, 'native_fee_transfer_amount': 0, 'deposit_txn': None, 'created_at': '2018-08-03T02:44:47.692Z', 'status': 'processed', 'fills': [{ 'id': 'b6f9e530-60ff-46ff-9a71-362097a2025e', 'offer_hash': '95b3b03be0bff8f58aa86a8dd599700bbaeaffc05078329d5b726b6b995f4cda', 'offer_asset_id': 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b', 'want_asset_id': 'ab38352559b8b203bde5fddfa0b07d8b2525e132', 'fill_amount': '47833882', 'want_amount': '97950000000', 'filled_amount': '', 'fee_asset_id': 'ab38352559b8b203bde5fddfa0b07d8b2525e132', 'fee_amount': '73462500', 'price': '0.00048835', 'txn': None, 'status': 'success', 'created_at': '2018-08-03T02:44:47.706Z', 'transaction_hash': '694745a09e33845ec008cfb79c73986a556e619799ec73274f82b30d85bda13a' }], 'makes': [{ 'id': '357088a0-cc80-49ab-acdd-980589c2d7d8', 'offer_hash': '420cc85abf02feaceb1bcd91489a0c1949c972d2a9a05ae922fa15d79de80c00', 'available_amount': '0', 'offer_asset_id': 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b', 'offer_amount': '5884618', 'want_asset_id': 'ab38352559b8b203bde5fddfa0b07d8b2525e132', 'want_amount': '12050000000', 'filled_amount': '0.0', 'txn': None, 'cancel_txn': None, 'price': '0.000488350041493775933609958506224066390041494', 'status': 'cancelled', 'created_at': '2018-08-03T02:44:47.708Z', 'transaction_hash': '1afa946546550151bbbd19f197a87cec92e9be58c44ec431cae42076298548b7', 'trades': [] }] }, { .... }] :param address: The ScriptHash of the address to filter orders for. :type address: str :param pair: The trading pair to filter order requests on. :type pair: str :param chain_name: The name of the chain to find orders against. :type chain_name: str :param contract_version: The version of the contract to find orders against. :type contract_version: str :param from_epoch_time: Only return orders that are last updated at or after this time. :type from_epoch_time: int :param order_status: Only return orders have this status. Possible values are open, cancelled, completed. :type order_status: str :param before_id: Only return orders that are created before the order with this id. :type before_id: str :param limit: Only return up to this number of orders (min: 1, max: 200, default: 50). :type limit: int :return: List of dictionaries containing the orders for the given NEO address and (optional) trading pair.
[ "Function", "to", "fetch", "the", "order", "history", "of", "the", "given", "address", ".", "Execution", "of", "this", "function", "is", "as", "follows", "::" ]
python
train
geophysics-ubonn/crtomo_tools
lib/crtomo/eitManager.py
https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/eitManager.py#L502-L527
def get_measurement_responses(self): """Return a dictionary of sip_responses for the modeled SIP spectra Note that this function does NOT check that each frequency contains the same configurations! Returns ------- responses : dict Dictionary with configurations as keys """ # take configurations from first tomodir configs = self.tds[sorted(self.tds.keys())[0]].configs.configs measurements = self.measurements() responses = {} for config, sip_measurement in zip(configs, np.rollaxis(measurements, 1)): sip = sip_response( frequencies=self.frequencies, rmag=sip_measurement[:, 0], rpha=sip_measurement[:, 1] ) responses[tuple(config)] = sip return responses
[ "def", "get_measurement_responses", "(", "self", ")", ":", "# take configurations from first tomodir", "configs", "=", "self", ".", "tds", "[", "sorted", "(", "self", ".", "tds", ".", "keys", "(", ")", ")", "[", "0", "]", "]", ".", "configs", ".", "configs", "measurements", "=", "self", ".", "measurements", "(", ")", "responses", "=", "{", "}", "for", "config", ",", "sip_measurement", "in", "zip", "(", "configs", ",", "np", ".", "rollaxis", "(", "measurements", ",", "1", ")", ")", ":", "sip", "=", "sip_response", "(", "frequencies", "=", "self", ".", "frequencies", ",", "rmag", "=", "sip_measurement", "[", ":", ",", "0", "]", ",", "rpha", "=", "sip_measurement", "[", ":", ",", "1", "]", ")", "responses", "[", "tuple", "(", "config", ")", "]", "=", "sip", "return", "responses" ]
Return a dictionary of sip_responses for the modeled SIP spectra Note that this function does NOT check that each frequency contains the same configurations! Returns ------- responses : dict Dictionary with configurations as keys
[ "Return", "a", "dictionary", "of", "sip_responses", "for", "the", "modeled", "SIP", "spectra" ]
python
train
django-import-export/django-import-export
import_export/admin.py
https://github.com/django-import-export/django-import-export/blob/127f00d03fd0ad282615b064b7f444a639e6ff0c/import_export/admin.py#L546-L564
def export_admin_action(self, request, queryset): """ Exports the selected rows using file_format. """ export_format = request.POST.get('file_format') if not export_format: messages.warning(request, _('You must select an export format.')) else: formats = self.get_export_formats() file_format = formats[int(export_format)]() export_data = self.get_export_data(file_format, queryset, request=request) content_type = file_format.get_content_type() response = HttpResponse(export_data, content_type=content_type) response['Content-Disposition'] = 'attachment; filename=%s' % ( self.get_export_filename(file_format), ) return response
[ "def", "export_admin_action", "(", "self", ",", "request", ",", "queryset", ")", ":", "export_format", "=", "request", ".", "POST", ".", "get", "(", "'file_format'", ")", "if", "not", "export_format", ":", "messages", ".", "warning", "(", "request", ",", "_", "(", "'You must select an export format.'", ")", ")", "else", ":", "formats", "=", "self", ".", "get_export_formats", "(", ")", "file_format", "=", "formats", "[", "int", "(", "export_format", ")", "]", "(", ")", "export_data", "=", "self", ".", "get_export_data", "(", "file_format", ",", "queryset", ",", "request", "=", "request", ")", "content_type", "=", "file_format", ".", "get_content_type", "(", ")", "response", "=", "HttpResponse", "(", "export_data", ",", "content_type", "=", "content_type", ")", "response", "[", "'Content-Disposition'", "]", "=", "'attachment; filename=%s'", "%", "(", "self", ".", "get_export_filename", "(", "file_format", ")", ",", ")", "return", "response" ]
Exports the selected rows using file_format.
[ "Exports", "the", "selected", "rows", "using", "file_format", "." ]
python
train
telminov/sw-django-utils
djutils/views/helpers.py
https://github.com/telminov/sw-django-utils/blob/43b8491c87a5dd8fce145834c00198f4de14ceb9/djutils/views/helpers.py#L7-L26
def url_path(request, base_url=None, is_full=False, *args, **kwargs): """ join base_url and some GET-parameters to one; it could be absolute url optionally usage example: c['current_url'] = url_path(request, use_urllib=True, is_full=False) ... <a href="{{ current_url }}">Лабораторный номер</a> """ if not base_url: base_url = request.path if is_full: protocol = 'https' if request.is_secure() else 'http' base_url = '%s://%s%s' % (protocol, request.get_host(), base_url) params = url_params(request, *args, **kwargs) url = '%s%s' % (base_url, params) return url
[ "def", "url_path", "(", "request", ",", "base_url", "=", "None", ",", "is_full", "=", "False", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "base_url", ":", "base_url", "=", "request", ".", "path", "if", "is_full", ":", "protocol", "=", "'https'", "if", "request", ".", "is_secure", "(", ")", "else", "'http'", "base_url", "=", "'%s://%s%s'", "%", "(", "protocol", ",", "request", ".", "get_host", "(", ")", ",", "base_url", ")", "params", "=", "url_params", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", "url", "=", "'%s%s'", "%", "(", "base_url", ",", "params", ")", "return", "url" ]
join base_url and some GET-parameters to one; it could be absolute url optionally usage example: c['current_url'] = url_path(request, use_urllib=True, is_full=False) ... <a href="{{ current_url }}">Лабораторный номер</a>
[ "join", "base_url", "and", "some", "GET", "-", "parameters", "to", "one", ";", "it", "could", "be", "absolute", "url", "optionally" ]
python
train
lago-project/lago
lago/providers/libvirt/cpu.py
https://github.com/lago-project/lago/blob/5b8970f7687e063e4619066d5b8093ca997678c9/lago/providers/libvirt/cpu.py#L372-L391
def get_cpu_props(cls, family, arch='x86'): """ Get CPU info XML Args: family(str): CPU family arch(str): CPU arch Returns: lxml.etree.Element: CPU xml Raises: :exc:`~LagoException`: If no such CPU family exists """ cpus = cls.get_cpus_by_arch(arch) try: return cpus.xpath('model[@name="{0}"]'.format(family))[0] except IndexError: raise LagoException('No such CPU family: {0}'.format(family))
[ "def", "get_cpu_props", "(", "cls", ",", "family", ",", "arch", "=", "'x86'", ")", ":", "cpus", "=", "cls", ".", "get_cpus_by_arch", "(", "arch", ")", "try", ":", "return", "cpus", ".", "xpath", "(", "'model[@name=\"{0}\"]'", ".", "format", "(", "family", ")", ")", "[", "0", "]", "except", "IndexError", ":", "raise", "LagoException", "(", "'No such CPU family: {0}'", ".", "format", "(", "family", ")", ")" ]
Get CPU info XML Args: family(str): CPU family arch(str): CPU arch Returns: lxml.etree.Element: CPU xml Raises: :exc:`~LagoException`: If no such CPU family exists
[ "Get", "CPU", "info", "XML" ]
python
train
rbit/pydtls
dtls/sslconnection.py
https://github.com/rbit/pydtls/blob/41a71fccd990347d0de5f42418fea1e4e733359c/dtls/sslconnection.py#L267-L276
def set_ssl_logging(self, enable=False, func=_ssl_logging_cb): u''' Enable or disable SSL logging :param True | False enable: Enable or disable SSL logging :param func: Callback function for logging ''' if enable: SSL_CTX_set_info_callback(self._ctx, func) else: SSL_CTX_set_info_callback(self._ctx, 0)
[ "def", "set_ssl_logging", "(", "self", ",", "enable", "=", "False", ",", "func", "=", "_ssl_logging_cb", ")", ":", "if", "enable", ":", "SSL_CTX_set_info_callback", "(", "self", ".", "_ctx", ",", "func", ")", "else", ":", "SSL_CTX_set_info_callback", "(", "self", ".", "_ctx", ",", "0", ")" ]
u''' Enable or disable SSL logging :param True | False enable: Enable or disable SSL logging :param func: Callback function for logging
[ "u", "Enable", "or", "disable", "SSL", "logging" ]
python
train
phoebe-project/phoebe2
phoebe/dependencies/autofig/axes.py
https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/dependencies/autofig/axes.py#L1185-L1224
def lim(self, lim): """ set lim (limits) """ if lim is None: self._lim = lim return typeerror_msg = "lim must be of type tuple, float, None, or in ['fixed', 'symmetric', 'frame', 'sliding']" if isinstance(lim, str): if lim in ['fixed', 'symmetric', 'frame', 'sliding']: self._lim = lim return else: raise ValueError(typeerror_msg) if isinstance(lim, int): lim = float(lim) if isinstance(lim, float): if lim <= 0.0: raise ValueError("lim cannot be <= 0") self._lim = lim return if not isinstance(lim, tuple): try: lim = tuple(lim) except: raise TypeError(typeerror_msg) if not len(lim)==2: raise ValueError('lim must have length 2') for l in lim: if not (isinstance(l, float) or isinstance(l, int) or l is None): raise ValueError("each item in limit must be of type float, int, or None") self._lim = lim
[ "def", "lim", "(", "self", ",", "lim", ")", ":", "if", "lim", "is", "None", ":", "self", ".", "_lim", "=", "lim", "return", "typeerror_msg", "=", "\"lim must be of type tuple, float, None, or in ['fixed', 'symmetric', 'frame', 'sliding']\"", "if", "isinstance", "(", "lim", ",", "str", ")", ":", "if", "lim", "in", "[", "'fixed'", ",", "'symmetric'", ",", "'frame'", ",", "'sliding'", "]", ":", "self", ".", "_lim", "=", "lim", "return", "else", ":", "raise", "ValueError", "(", "typeerror_msg", ")", "if", "isinstance", "(", "lim", ",", "int", ")", ":", "lim", "=", "float", "(", "lim", ")", "if", "isinstance", "(", "lim", ",", "float", ")", ":", "if", "lim", "<=", "0.0", ":", "raise", "ValueError", "(", "\"lim cannot be <= 0\"", ")", "self", ".", "_lim", "=", "lim", "return", "if", "not", "isinstance", "(", "lim", ",", "tuple", ")", ":", "try", ":", "lim", "=", "tuple", "(", "lim", ")", "except", ":", "raise", "TypeError", "(", "typeerror_msg", ")", "if", "not", "len", "(", "lim", ")", "==", "2", ":", "raise", "ValueError", "(", "'lim must have length 2'", ")", "for", "l", "in", "lim", ":", "if", "not", "(", "isinstance", "(", "l", ",", "float", ")", "or", "isinstance", "(", "l", ",", "int", ")", "or", "l", "is", "None", ")", ":", "raise", "ValueError", "(", "\"each item in limit must be of type float, int, or None\"", ")", "self", ".", "_lim", "=", "lim" ]
set lim (limits)
[ "set", "lim", "(", "limits", ")" ]
python
train
scanny/python-pptx
pptx/text/layout.py
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/text/layout.py#L141-L150
def from_ordered_sequence(cls, iseq): """ Return the root of a balanced binary search tree populated with the values in iterable *iseq*. """ seq = list(iseq) # optimize for usually all fits by making longest first bst = cls(seq.pop()) bst._insert_from_ordered_sequence(seq) return bst
[ "def", "from_ordered_sequence", "(", "cls", ",", "iseq", ")", ":", "seq", "=", "list", "(", "iseq", ")", "# optimize for usually all fits by making longest first", "bst", "=", "cls", "(", "seq", ".", "pop", "(", ")", ")", "bst", ".", "_insert_from_ordered_sequence", "(", "seq", ")", "return", "bst" ]
Return the root of a balanced binary search tree populated with the values in iterable *iseq*.
[ "Return", "the", "root", "of", "a", "balanced", "binary", "search", "tree", "populated", "with", "the", "values", "in", "iterable", "*", "iseq", "*", "." ]
python
train
inveniosoftware-contrib/invenio-workflows
invenio_workflows/ext.py
https://github.com/inveniosoftware-contrib/invenio-workflows/blob/9c09fd29509a3db975ac2aba337e6760d8cfd3c2/invenio_workflows/ext.py#L71-L83
def init_app(self, app, entry_point_group='invenio_workflows.workflows', **kwargs): """Flask application initialization.""" app.config.setdefault( "WORKFLOWS_OBJECT_CLASS", "invenio_workflows.api.WorkflowObject" ) state = _WorkflowState( app, entry_point_group=entry_point_group, **kwargs ) app.extensions['invenio-workflows'] = state return state
[ "def", "init_app", "(", "self", ",", "app", ",", "entry_point_group", "=", "'invenio_workflows.workflows'", ",", "*", "*", "kwargs", ")", ":", "app", ".", "config", ".", "setdefault", "(", "\"WORKFLOWS_OBJECT_CLASS\"", ",", "\"invenio_workflows.api.WorkflowObject\"", ")", "state", "=", "_WorkflowState", "(", "app", ",", "entry_point_group", "=", "entry_point_group", ",", "*", "*", "kwargs", ")", "app", ".", "extensions", "[", "'invenio-workflows'", "]", "=", "state", "return", "state" ]
Flask application initialization.
[ "Flask", "application", "initialization", "." ]
python
train
evhub/coconut
coconut/compiler/compiler.py
https://github.com/evhub/coconut/blob/ff97177344e7604e89a0a98a977a87ed2a56fc6d/coconut/compiler/compiler.py#L973-L997
def endline_repl(self, inputstring, reformatting=False, **kwargs): """Add end of line comments.""" out = [] ln = 1 # line number for line in inputstring.splitlines(): add_one_to_ln = False try: if line.endswith(lnwrapper): line, index = line[:-1].rsplit("#", 1) new_ln = self.get_ref("ln", index) if new_ln < ln: raise CoconutInternalException("line number decreased", (ln, new_ln)) ln = new_ln line = line.rstrip() add_one_to_ln = True if not reformatting or add_one_to_ln: # add_one_to_ln here is a proxy for whether there was a ln comment or not line += self.comments.get(ln, "") if not reformatting and line.rstrip() and not line.lstrip().startswith("#"): line += self.ln_comment(ln) except CoconutInternalException as err: complain(err) out.append(line) if add_one_to_ln: ln += 1 return "\n".join(out)
[ "def", "endline_repl", "(", "self", ",", "inputstring", ",", "reformatting", "=", "False", ",", "*", "*", "kwargs", ")", ":", "out", "=", "[", "]", "ln", "=", "1", "# line number", "for", "line", "in", "inputstring", ".", "splitlines", "(", ")", ":", "add_one_to_ln", "=", "False", "try", ":", "if", "line", ".", "endswith", "(", "lnwrapper", ")", ":", "line", ",", "index", "=", "line", "[", ":", "-", "1", "]", ".", "rsplit", "(", "\"#\"", ",", "1", ")", "new_ln", "=", "self", ".", "get_ref", "(", "\"ln\"", ",", "index", ")", "if", "new_ln", "<", "ln", ":", "raise", "CoconutInternalException", "(", "\"line number decreased\"", ",", "(", "ln", ",", "new_ln", ")", ")", "ln", "=", "new_ln", "line", "=", "line", ".", "rstrip", "(", ")", "add_one_to_ln", "=", "True", "if", "not", "reformatting", "or", "add_one_to_ln", ":", "# add_one_to_ln here is a proxy for whether there was a ln comment or not", "line", "+=", "self", ".", "comments", ".", "get", "(", "ln", ",", "\"\"", ")", "if", "not", "reformatting", "and", "line", ".", "rstrip", "(", ")", "and", "not", "line", ".", "lstrip", "(", ")", ".", "startswith", "(", "\"#\"", ")", ":", "line", "+=", "self", ".", "ln_comment", "(", "ln", ")", "except", "CoconutInternalException", "as", "err", ":", "complain", "(", "err", ")", "out", ".", "append", "(", "line", ")", "if", "add_one_to_ln", ":", "ln", "+=", "1", "return", "\"\\n\"", ".", "join", "(", "out", ")" ]
Add end of line comments.
[ "Add", "end", "of", "line", "comments", "." ]
python
train
opendns/pyinvestigate
investigate/investigate.py
https://github.com/opendns/pyinvestigate/blob/a182e73a750f03e906d9b25842d556db8d2fd54f/investigate/investigate.py#L255-L261
def samples(self, anystring, limit=None, offset=None, sortby=None): '''Return an object representing the samples identified by the input domain, IP, or URL''' uri = self._uris['samples'].format(anystring) params = {'limit': limit, 'offset': offset, 'sortby': sortby} return self.get_parse(uri, params)
[ "def", "samples", "(", "self", ",", "anystring", ",", "limit", "=", "None", ",", "offset", "=", "None", ",", "sortby", "=", "None", ")", ":", "uri", "=", "self", ".", "_uris", "[", "'samples'", "]", ".", "format", "(", "anystring", ")", "params", "=", "{", "'limit'", ":", "limit", ",", "'offset'", ":", "offset", ",", "'sortby'", ":", "sortby", "}", "return", "self", ".", "get_parse", "(", "uri", ",", "params", ")" ]
Return an object representing the samples identified by the input domain, IP, or URL
[ "Return", "an", "object", "representing", "the", "samples", "identified", "by", "the", "input", "domain", "IP", "or", "URL" ]
python
train
danielperna84/pyhomematic
pyhomematic/_hm.py
https://github.com/danielperna84/pyhomematic/blob/8b91f3e84c83f05d289c740d507293a0d6759d8e/pyhomematic/_hm.py#L583-L607
def proxyInit(self): """ To receive events the proxy has to tell the CCU / Homegear where to send the events. For that we call the init-method. """ # Call init() with local XML RPC config and interface_id (the name of # the receiver) to receive events. XML RPC server has to be running. for interface_id, proxy in self.proxies.items(): if proxy._skipinit: continue if proxy._callbackip and proxy._callbackport: callbackip = proxy._callbackip callbackport = proxy._callbackport else: callbackip = proxy._localip callbackport = self._localport LOG.debug("ServerThread.proxyInit: init('http://%s:%i', '%s')" % (callbackip, callbackport, interface_id)) try: proxy.init("http://%s:%i" % (callbackip, callbackport), interface_id) LOG.info("Proxy initialized") except Exception as err: LOG.debug("proxyInit: Exception: %s" % str(err)) LOG.warning("Failed to initialize proxy") self.failed_inits.append(interface_id)
[ "def", "proxyInit", "(", "self", ")", ":", "# Call init() with local XML RPC config and interface_id (the name of", "# the receiver) to receive events. XML RPC server has to be running.", "for", "interface_id", ",", "proxy", "in", "self", ".", "proxies", ".", "items", "(", ")", ":", "if", "proxy", ".", "_skipinit", ":", "continue", "if", "proxy", ".", "_callbackip", "and", "proxy", ".", "_callbackport", ":", "callbackip", "=", "proxy", ".", "_callbackip", "callbackport", "=", "proxy", ".", "_callbackport", "else", ":", "callbackip", "=", "proxy", ".", "_localip", "callbackport", "=", "self", ".", "_localport", "LOG", ".", "debug", "(", "\"ServerThread.proxyInit: init('http://%s:%i', '%s')\"", "%", "(", "callbackip", ",", "callbackport", ",", "interface_id", ")", ")", "try", ":", "proxy", ".", "init", "(", "\"http://%s:%i\"", "%", "(", "callbackip", ",", "callbackport", ")", ",", "interface_id", ")", "LOG", ".", "info", "(", "\"Proxy initialized\"", ")", "except", "Exception", "as", "err", ":", "LOG", ".", "debug", "(", "\"proxyInit: Exception: %s\"", "%", "str", "(", "err", ")", ")", "LOG", ".", "warning", "(", "\"Failed to initialize proxy\"", ")", "self", ".", "failed_inits", ".", "append", "(", "interface_id", ")" ]
To receive events the proxy has to tell the CCU / Homegear where to send the events. For that we call the init-method.
[ "To", "receive", "events", "the", "proxy", "has", "to", "tell", "the", "CCU", "/", "Homegear", "where", "to", "send", "the", "events", ".", "For", "that", "we", "call", "the", "init", "-", "method", "." ]
python
train
saltstack/salt
salt/states/timezone.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/timezone.py#L43-L127
def system(name, utc=True): ''' Set the timezone for the system. name The name of the timezone to use (e.g.: America/Denver) utc Whether or not to set the hardware clock to UTC (default is True) ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} # Set up metadata do_utc = False do_zone = False try: compzone = __salt__['timezone.zone_compare'](name) except (SaltInvocationError, CommandExecutionError) as exc: ret['result'] = False ret['comment'] = ( 'Unable to compare desired timezone \'{0}\' to system timezone: {1}' .format(name, exc) ) return ret myutc = True messages = [] if __salt__['timezone.get_hwclock']() == 'localtime': myutc = False # Check the time zone if compzone is True: ret['result'] = True messages.append('Timezone {0} already set'.format(name)) else: do_zone = True # If the user passed in utc, do a check if utc and utc != myutc: ret['result'] = None do_utc = True elif utc and utc == myutc: messages.append('UTC already set to {0}'.format(name)) if ret['result'] is True: ret['comment'] = ', '.join(messages) return ret if __opts__['test']: messages = [] if compzone is False: messages.append('Timezone {0} needs to be set'.format(name)) if utc and myutc != utc: messages.append('UTC needs to be set to {0}'.format(utc)) ret['comment'] = ', '.join(messages) return ret messages = [] if do_zone: if __salt__['timezone.set_zone'](name): ret['changes']['timezone'] = name messages.append('Set timezone {0}'.format(name)) ret['result'] = True else: messages.append('Failed to set timezone') ret['result'] = False if do_utc: clock = 'localtime' if utc: clock = 'UTC' if __salt__['timezone.set_hwclock'](clock): ret['changes']['utc'] = utc messages.append('Set UTC to {0}'.format(utc)) ret['result'] = True else: messages.append('Failed to set UTC to {0}'.format(utc)) ret['result'] = False ret['comment'] = ', '.join(messages) return ret
[ "def", "system", "(", "name", ",", "utc", "=", "True", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "None", ",", "'comment'", ":", "''", "}", "# Set up metadata", "do_utc", "=", "False", "do_zone", "=", "False", "try", ":", "compzone", "=", "__salt__", "[", "'timezone.zone_compare'", "]", "(", "name", ")", "except", "(", "SaltInvocationError", ",", "CommandExecutionError", ")", "as", "exc", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "(", "'Unable to compare desired timezone \\'{0}\\' to system timezone: {1}'", ".", "format", "(", "name", ",", "exc", ")", ")", "return", "ret", "myutc", "=", "True", "messages", "=", "[", "]", "if", "__salt__", "[", "'timezone.get_hwclock'", "]", "(", ")", "==", "'localtime'", ":", "myutc", "=", "False", "# Check the time zone", "if", "compzone", "is", "True", ":", "ret", "[", "'result'", "]", "=", "True", "messages", ".", "append", "(", "'Timezone {0} already set'", ".", "format", "(", "name", ")", ")", "else", ":", "do_zone", "=", "True", "# If the user passed in utc, do a check", "if", "utc", "and", "utc", "!=", "myutc", ":", "ret", "[", "'result'", "]", "=", "None", "do_utc", "=", "True", "elif", "utc", "and", "utc", "==", "myutc", ":", "messages", ".", "append", "(", "'UTC already set to {0}'", ".", "format", "(", "name", ")", ")", "if", "ret", "[", "'result'", "]", "is", "True", ":", "ret", "[", "'comment'", "]", "=", "', '", ".", "join", "(", "messages", ")", "return", "ret", "if", "__opts__", "[", "'test'", "]", ":", "messages", "=", "[", "]", "if", "compzone", "is", "False", ":", "messages", ".", "append", "(", "'Timezone {0} needs to be set'", ".", "format", "(", "name", ")", ")", "if", "utc", "and", "myutc", "!=", "utc", ":", "messages", ".", "append", "(", "'UTC needs to be set to {0}'", ".", "format", "(", "utc", ")", ")", "ret", "[", "'comment'", "]", "=", "', '", ".", "join", "(", "messages", ")", "return", "ret", "messages", "=", "[", "]", "if", "do_zone", ":", "if", "__salt__", "[", "'timezone.set_zone'", "]", "(", "name", ")", ":", "ret", "[", "'changes'", "]", "[", "'timezone'", "]", "=", "name", "messages", ".", "append", "(", "'Set timezone {0}'", ".", "format", "(", "name", ")", ")", "ret", "[", "'result'", "]", "=", "True", "else", ":", "messages", ".", "append", "(", "'Failed to set timezone'", ")", "ret", "[", "'result'", "]", "=", "False", "if", "do_utc", ":", "clock", "=", "'localtime'", "if", "utc", ":", "clock", "=", "'UTC'", "if", "__salt__", "[", "'timezone.set_hwclock'", "]", "(", "clock", ")", ":", "ret", "[", "'changes'", "]", "[", "'utc'", "]", "=", "utc", "messages", ".", "append", "(", "'Set UTC to {0}'", ".", "format", "(", "utc", ")", ")", "ret", "[", "'result'", "]", "=", "True", "else", ":", "messages", ".", "append", "(", "'Failed to set UTC to {0}'", ".", "format", "(", "utc", ")", ")", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "', '", ".", "join", "(", "messages", ")", "return", "ret" ]
Set the timezone for the system. name The name of the timezone to use (e.g.: America/Denver) utc Whether or not to set the hardware clock to UTC (default is True)
[ "Set", "the", "timezone", "for", "the", "system", "." ]
python
train
saltstack/salt
salt/cloud/clouds/packet.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/packet.py#L464-L489
def list_nodes_min(call=None): ''' Return a list of the VMs that are on the provider. Only a list of VM names and their state is returned. This is the minimum amount of information needed to check for existing VMs. .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt-cloud -f list_nodes_min packet-provider salt-cloud --function list_nodes_min packet-provider ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes_min function must be called with -f or --function.' ) ret = {} for device in get_devices_by_token(): ret[device.hostname] = {'id': device.id, 'state': device.state} return ret
[ "def", "list_nodes_min", "(", "call", "=", "None", ")", ":", "if", "call", "==", "'action'", ":", "raise", "SaltCloudSystemExit", "(", "'The list_nodes_min function must be called with -f or --function.'", ")", "ret", "=", "{", "}", "for", "device", "in", "get_devices_by_token", "(", ")", ":", "ret", "[", "device", ".", "hostname", "]", "=", "{", "'id'", ":", "device", ".", "id", ",", "'state'", ":", "device", ".", "state", "}", "return", "ret" ]
Return a list of the VMs that are on the provider. Only a list of VM names and their state is returned. This is the minimum amount of information needed to check for existing VMs. .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt-cloud -f list_nodes_min packet-provider salt-cloud --function list_nodes_min packet-provider
[ "Return", "a", "list", "of", "the", "VMs", "that", "are", "on", "the", "provider", ".", "Only", "a", "list", "of", "VM", "names", "and", "their", "state", "is", "returned", ".", "This", "is", "the", "minimum", "amount", "of", "information", "needed", "to", "check", "for", "existing", "VMs", "." ]
python
train
serge-sans-paille/pythran
pythran/passmanager.py
https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/passmanager.py#L213-L219
def dump(self, backend, node): '''High-level function to call a `backend' on a `node' to generate code for module `module_name'.''' assert issubclass(backend, Backend) b = backend() b.attach(self) return b.run(node)
[ "def", "dump", "(", "self", ",", "backend", ",", "node", ")", ":", "assert", "issubclass", "(", "backend", ",", "Backend", ")", "b", "=", "backend", "(", ")", "b", ".", "attach", "(", "self", ")", "return", "b", ".", "run", "(", "node", ")" ]
High-level function to call a `backend' on a `node' to generate code for module `module_name'.
[ "High", "-", "level", "function", "to", "call", "a", "backend", "on", "a", "node", "to", "generate", "code", "for", "module", "module_name", "." ]
python
train
PyHDI/Pyverilog
pyverilog/vparser/parser.py
https://github.com/PyHDI/Pyverilog/blob/b852cc5ed6a7a2712e33639f9d9782d0d1587a53/pyverilog/vparser/parser.py#L2177-L2180
def p_single_statement_systemcall(self, p): 'single_statement : systemcall SEMICOLON' p[0] = SingleStatement(p[1], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
[ "def", "p_single_statement_systemcall", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "SingleStatement", "(", "p", "[", "1", "]", ",", "lineno", "=", "p", ".", "lineno", "(", "1", ")", ")", "p", ".", "set_lineno", "(", "0", ",", "p", ".", "lineno", "(", "1", ")", ")" ]
single_statement : systemcall SEMICOLON
[ "single_statement", ":", "systemcall", "SEMICOLON" ]
python
train
ocaballeror/LyricFetch
lyricfetch/scraping.py
https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/scraping.py#L71-L103
def normalize(string, chars_to_remove=None, replacement=''): """ Remove accented characters and such. The argument chars_to_remove is a dictionary that maps a string of chars to a single character. Every occurrence of every character in the first string will be replaced by that second character passed as value. If only one mapping is desired, chars_to_remove may be a single string, but a third parameter, replacement, must be provided to complete the translation. """ ret = string.translate(str.maketrans({ 'á': 'a', 'ä': 'a', 'æ': 'ae', 'é': 'e', 'í': 'i', 'ó': 'o', 'ö': 'o', 'ú': 'u', 'ü': 'u', 'ñ': 'n', })) if isinstance(chars_to_remove, dict): for chars, replace in chars_to_remove.items(): reg = '[' + re.escape(chars) + ']' ret = re.sub(reg, replace, ret) elif isinstance(chars_to_remove, str): reg = '[' + re.escape(chars_to_remove) + ']' ret = re.sub(reg, replacement, ret) return ret
[ "def", "normalize", "(", "string", ",", "chars_to_remove", "=", "None", ",", "replacement", "=", "''", ")", ":", "ret", "=", "string", ".", "translate", "(", "str", ".", "maketrans", "(", "{", "'á':", " ", "a',", "", "'ä':", " ", "a',", "", "'æ':", " ", "ae',", "", "'é':", " ", "e',", "", "'í':", " ", "i',", "", "'ó':", " ", "o',", "", "'ö':", " ", "o',", "", "'ú':", " ", "u',", "", "'ü':", " ", "u',", "", "'ñ':", " ", "n',", "", "}", ")", ")", "if", "isinstance", "(", "chars_to_remove", ",", "dict", ")", ":", "for", "chars", ",", "replace", "in", "chars_to_remove", ".", "items", "(", ")", ":", "reg", "=", "'['", "+", "re", ".", "escape", "(", "chars", ")", "+", "']'", "ret", "=", "re", ".", "sub", "(", "reg", ",", "replace", ",", "ret", ")", "elif", "isinstance", "(", "chars_to_remove", ",", "str", ")", ":", "reg", "=", "'['", "+", "re", ".", "escape", "(", "chars_to_remove", ")", "+", "']'", "ret", "=", "re", ".", "sub", "(", "reg", ",", "replacement", ",", "ret", ")", "return", "ret" ]
Remove accented characters and such. The argument chars_to_remove is a dictionary that maps a string of chars to a single character. Every occurrence of every character in the first string will be replaced by that second character passed as value. If only one mapping is desired, chars_to_remove may be a single string, but a third parameter, replacement, must be provided to complete the translation.
[ "Remove", "accented", "characters", "and", "such", "." ]
python
train
tsnaomi/finnsyll
ez_setup.py
https://github.com/tsnaomi/finnsyll/blob/6a42740311688c946a636a3e2304866c7aa041b3/ez_setup.py#L162-L173
def download_file_powershell(url, target): ''' Download the file at url to target using Powershell (which will validate trust). Raise an exception if the command cannot complete. ''' target = os.path.abspath(target) cmd = [ 'powershell', '-Command', '(new-object System.Net.WebClient).DownloadFile(%(url)r, %(target)r)' % vars(), ] subprocess.check_call(cmd)
[ "def", "download_file_powershell", "(", "url", ",", "target", ")", ":", "target", "=", "os", ".", "path", ".", "abspath", "(", "target", ")", "cmd", "=", "[", "'powershell'", ",", "'-Command'", ",", "'(new-object System.Net.WebClient).DownloadFile(%(url)r, %(target)r)'", "%", "vars", "(", ")", ",", "]", "subprocess", ".", "check_call", "(", "cmd", ")" ]
Download the file at url to target using Powershell (which will validate trust). Raise an exception if the command cannot complete.
[ "Download", "the", "file", "at", "url", "to", "target", "using", "Powershell", "(", "which", "will", "validate", "trust", ")", ".", "Raise", "an", "exception", "if", "the", "command", "cannot", "complete", "." ]
python
train
BDNYC/astrodbkit
astrodbkit/astrodb.py
https://github.com/BDNYC/astrodbkit/blob/02c03c5e91aa7c7b0f3b5fa95bcf71e33ffcee09/astrodbkit/astrodb.py#L1277-L1299
def modify(self, SQL, params='', verbose=True): """ Wrapper for CRUD operations to make them distinct from queries and automatically pass commit() method to cursor. Parameters ---------- SQL: str The SQL query to execute params: sequence Mimics the native parameter substitution of sqlite3 verbose: bool Prints the number of modified records """ # Make sure the database isn't locked self.conn.commit() if SQL.lower().startswith('select'): print('Use self.query method for queries.') else: self.list(SQL, params) self.conn.commit() if verbose: print('Number of records modified: {}'.format(self.list("SELECT changes()").fetchone()[0] or '0'))
[ "def", "modify", "(", "self", ",", "SQL", ",", "params", "=", "''", ",", "verbose", "=", "True", ")", ":", "# Make sure the database isn't locked", "self", ".", "conn", ".", "commit", "(", ")", "if", "SQL", ".", "lower", "(", ")", ".", "startswith", "(", "'select'", ")", ":", "print", "(", "'Use self.query method for queries.'", ")", "else", ":", "self", ".", "list", "(", "SQL", ",", "params", ")", "self", ".", "conn", ".", "commit", "(", ")", "if", "verbose", ":", "print", "(", "'Number of records modified: {}'", ".", "format", "(", "self", ".", "list", "(", "\"SELECT changes()\"", ")", ".", "fetchone", "(", ")", "[", "0", "]", "or", "'0'", ")", ")" ]
Wrapper for CRUD operations to make them distinct from queries and automatically pass commit() method to cursor. Parameters ---------- SQL: str The SQL query to execute params: sequence Mimics the native parameter substitution of sqlite3 verbose: bool Prints the number of modified records
[ "Wrapper", "for", "CRUD", "operations", "to", "make", "them", "distinct", "from", "queries", "and", "automatically", "pass", "commit", "()", "method", "to", "cursor", "." ]
python
train
arne-cl/discoursegraphs
src/discoursegraphs/readwrite/paulaxml/paula.py
https://github.com/arne-cl/discoursegraphs/blob/842f0068a3190be2c75905754521b176b25a54fb/src/discoursegraphs/readwrite/paulaxml/paula.py#L588-L595
def gen_paula_etree(paula_id): """ creates an element tree representation of an empty PAULA XML file. """ E = ElementMaker(nsmap=NSMAP) tree = E('paula', version='1.1') tree.append(E('header', paula_id=paula_id)) return E, tree
[ "def", "gen_paula_etree", "(", "paula_id", ")", ":", "E", "=", "ElementMaker", "(", "nsmap", "=", "NSMAP", ")", "tree", "=", "E", "(", "'paula'", ",", "version", "=", "'1.1'", ")", "tree", ".", "append", "(", "E", "(", "'header'", ",", "paula_id", "=", "paula_id", ")", ")", "return", "E", ",", "tree" ]
creates an element tree representation of an empty PAULA XML file.
[ "creates", "an", "element", "tree", "representation", "of", "an", "empty", "PAULA", "XML", "file", "." ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/core/completer.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/completer.py#L873-L933
def rlcomplete(self, text, state): """Return the state-th possible completion for 'text'. This is called successively with state == 0, 1, 2, ... until it returns None. The completion should begin with 'text'. Parameters ---------- text : string Text to perform the completion on. state : int Counter used by readline. """ if state==0: self.line_buffer = line_buffer = self.readline.get_line_buffer() cursor_pos = self.readline.get_endidx() #io.rprint("\nRLCOMPLETE: %r %r %r" % # (text, line_buffer, cursor_pos) ) # dbg # if there is only a tab on a line with only whitespace, instead of # the mostly useless 'do you want to see all million completions' # message, just do the right thing and give the user his tab! # Incidentally, this enables pasting of tabbed text from an editor # (as long as autoindent is off). # It should be noted that at least pyreadline still shows file # completions - is there a way around it? # don't apply this on 'dumb' terminals, such as emacs buffers, so # we don't interfere with their own tab-completion mechanism. if not (self.dumb_terminal or line_buffer.strip()): self.readline.insert_text('\t') sys.stdout.flush() return None # Note: debugging exceptions that may occur in completion is very # tricky, because readline unconditionally silences them. So if # during development you suspect a bug in the completion code, turn # this flag on temporarily by uncommenting the second form (don't # flip the value in the first line, as the '# dbg' marker can be # automatically detected and is used elsewhere). DEBUG = False #DEBUG = True # dbg if DEBUG: try: self.complete(text, line_buffer, cursor_pos) except: import traceback; traceback.print_exc() else: # The normal production version is here # This method computes the self.matches array self.complete(text, line_buffer, cursor_pos) try: return self.matches[state] except IndexError: return None
[ "def", "rlcomplete", "(", "self", ",", "text", ",", "state", ")", ":", "if", "state", "==", "0", ":", "self", ".", "line_buffer", "=", "line_buffer", "=", "self", ".", "readline", ".", "get_line_buffer", "(", ")", "cursor_pos", "=", "self", ".", "readline", ".", "get_endidx", "(", ")", "#io.rprint(\"\\nRLCOMPLETE: %r %r %r\" %", "# (text, line_buffer, cursor_pos) ) # dbg", "# if there is only a tab on a line with only whitespace, instead of", "# the mostly useless 'do you want to see all million completions'", "# message, just do the right thing and give the user his tab!", "# Incidentally, this enables pasting of tabbed text from an editor", "# (as long as autoindent is off).", "# It should be noted that at least pyreadline still shows file", "# completions - is there a way around it?", "# don't apply this on 'dumb' terminals, such as emacs buffers, so", "# we don't interfere with their own tab-completion mechanism.", "if", "not", "(", "self", ".", "dumb_terminal", "or", "line_buffer", ".", "strip", "(", ")", ")", ":", "self", ".", "readline", ".", "insert_text", "(", "'\\t'", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "return", "None", "# Note: debugging exceptions that may occur in completion is very", "# tricky, because readline unconditionally silences them. So if", "# during development you suspect a bug in the completion code, turn", "# this flag on temporarily by uncommenting the second form (don't", "# flip the value in the first line, as the '# dbg' marker can be", "# automatically detected and is used elsewhere).", "DEBUG", "=", "False", "#DEBUG = True # dbg", "if", "DEBUG", ":", "try", ":", "self", ".", "complete", "(", "text", ",", "line_buffer", ",", "cursor_pos", ")", "except", ":", "import", "traceback", "traceback", ".", "print_exc", "(", ")", "else", ":", "# The normal production version is here", "# This method computes the self.matches array", "self", ".", "complete", "(", "text", ",", "line_buffer", ",", "cursor_pos", ")", "try", ":", "return", "self", ".", "matches", "[", "state", "]", "except", "IndexError", ":", "return", "None" ]
Return the state-th possible completion for 'text'. This is called successively with state == 0, 1, 2, ... until it returns None. The completion should begin with 'text'. Parameters ---------- text : string Text to perform the completion on. state : int Counter used by readline.
[ "Return", "the", "state", "-", "th", "possible", "completion", "for", "text", "." ]
python
test
odlgroup/odl
odl/discr/discretization.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/discr/discretization.py#L483-L530
def tspace_type(space, impl, dtype=None): """Select the correct corresponding tensor space. Parameters ---------- space : `LinearSpace` Template space from which to infer an adequate tensor space. If it has a ``field`` attribute, ``dtype`` must be consistent with it. impl : string Implementation backend for the tensor space. dtype : optional Data type which the space is supposed to use. If ``None`` is given, the space type is purely determined from ``space`` and ``impl``. Otherwise, it must be compatible with the field of ``space``. Returns ------- stype : type Space type selected after the space's field, the backend and the data type. """ field_type = type(getattr(space, 'field', None)) if dtype is None: pass elif is_real_floating_dtype(dtype): if field_type is None or field_type == ComplexNumbers: raise TypeError('real floating data type {!r} requires space ' 'field to be of type RealNumbers, got {}' ''.format(dtype, field_type)) elif is_complex_floating_dtype(dtype): if field_type is None or field_type == RealNumbers: raise TypeError('complex floating data type {!r} requires space ' 'field to be of type ComplexNumbers, got {!r}' ''.format(dtype, field_type)) elif is_numeric_dtype(dtype): if field_type == ComplexNumbers: raise TypeError('non-floating data type {!r} requires space field ' 'to be of type RealNumbers, got {!r}' .format(dtype, field_type)) try: return tensor_space_impl(impl) except ValueError: raise NotImplementedError('no corresponding tensor space available ' 'for space {!r} and implementation {!r}' ''.format(space, impl))
[ "def", "tspace_type", "(", "space", ",", "impl", ",", "dtype", "=", "None", ")", ":", "field_type", "=", "type", "(", "getattr", "(", "space", ",", "'field'", ",", "None", ")", ")", "if", "dtype", "is", "None", ":", "pass", "elif", "is_real_floating_dtype", "(", "dtype", ")", ":", "if", "field_type", "is", "None", "or", "field_type", "==", "ComplexNumbers", ":", "raise", "TypeError", "(", "'real floating data type {!r} requires space '", "'field to be of type RealNumbers, got {}'", "''", ".", "format", "(", "dtype", ",", "field_type", ")", ")", "elif", "is_complex_floating_dtype", "(", "dtype", ")", ":", "if", "field_type", "is", "None", "or", "field_type", "==", "RealNumbers", ":", "raise", "TypeError", "(", "'complex floating data type {!r} requires space '", "'field to be of type ComplexNumbers, got {!r}'", "''", ".", "format", "(", "dtype", ",", "field_type", ")", ")", "elif", "is_numeric_dtype", "(", "dtype", ")", ":", "if", "field_type", "==", "ComplexNumbers", ":", "raise", "TypeError", "(", "'non-floating data type {!r} requires space field '", "'to be of type RealNumbers, got {!r}'", ".", "format", "(", "dtype", ",", "field_type", ")", ")", "try", ":", "return", "tensor_space_impl", "(", "impl", ")", "except", "ValueError", ":", "raise", "NotImplementedError", "(", "'no corresponding tensor space available '", "'for space {!r} and implementation {!r}'", "''", ".", "format", "(", "space", ",", "impl", ")", ")" ]
Select the correct corresponding tensor space. Parameters ---------- space : `LinearSpace` Template space from which to infer an adequate tensor space. If it has a ``field`` attribute, ``dtype`` must be consistent with it. impl : string Implementation backend for the tensor space. dtype : optional Data type which the space is supposed to use. If ``None`` is given, the space type is purely determined from ``space`` and ``impl``. Otherwise, it must be compatible with the field of ``space``. Returns ------- stype : type Space type selected after the space's field, the backend and the data type.
[ "Select", "the", "correct", "corresponding", "tensor", "space", "." ]
python
train
openpermissions/perch
perch/user.py
https://github.com/openpermissions/perch/blob/36d78994133918f3c52c187f19e50132960a0156/perch/user.py#L320-L332
def valid(cls, token, **kwargs): """ Check if a token exists and has not expired :param token: the token :return: bool """ try: token = yield cls.get(token) except couch.NotFound: raise Return(False) raise Return(token.ttl >= datetime.utcnow())
[ "def", "valid", "(", "cls", ",", "token", ",", "*", "*", "kwargs", ")", ":", "try", ":", "token", "=", "yield", "cls", ".", "get", "(", "token", ")", "except", "couch", ".", "NotFound", ":", "raise", "Return", "(", "False", ")", "raise", "Return", "(", "token", ".", "ttl", ">=", "datetime", ".", "utcnow", "(", ")", ")" ]
Check if a token exists and has not expired :param token: the token :return: bool
[ "Check", "if", "a", "token", "exists", "and", "has", "not", "expired" ]
python
train
TheHive-Project/TheHive4py
thehive4py/models.py
https://github.com/TheHive-Project/TheHive4py/blob/35762bbd50d8376943268464326b59c752d6241b/thehive4py/models.py#L154-L174
def create(self, title, description, **kwargs): """ Create an instance of the Case class. :param title: Case title. :param description: Case description. :param kwargs: Additional arguments. :return: The created instance. """ case = Case(title=title, description=description, **kwargs) response = self._thehive.create_case(case) # Check for failed authentication if response.status_code == requests.codes.unauthorized: raise TheHiveException("Authentication failed") if self.status_ok(response.status_code): return self(response.json()['id']) else: raise CaseException("Server returned {}: {}".format(response.status_code, response.text))
[ "def", "create", "(", "self", ",", "title", ",", "description", ",", "*", "*", "kwargs", ")", ":", "case", "=", "Case", "(", "title", "=", "title", ",", "description", "=", "description", ",", "*", "*", "kwargs", ")", "response", "=", "self", ".", "_thehive", ".", "create_case", "(", "case", ")", "# Check for failed authentication", "if", "response", ".", "status_code", "==", "requests", ".", "codes", ".", "unauthorized", ":", "raise", "TheHiveException", "(", "\"Authentication failed\"", ")", "if", "self", ".", "status_ok", "(", "response", ".", "status_code", ")", ":", "return", "self", "(", "response", ".", "json", "(", ")", "[", "'id'", "]", ")", "else", ":", "raise", "CaseException", "(", "\"Server returned {}: {}\"", ".", "format", "(", "response", ".", "status_code", ",", "response", ".", "text", ")", ")" ]
Create an instance of the Case class. :param title: Case title. :param description: Case description. :param kwargs: Additional arguments. :return: The created instance.
[ "Create", "an", "instance", "of", "the", "Case", "class", ".", ":", "param", "title", ":", "Case", "title", ".", ":", "param", "description", ":", "Case", "description", ".", ":", "param", "kwargs", ":", "Additional", "arguments", "." ]
python
train
f3at/feat
src/feat/agencies/net/broker.py
https://github.com/f3at/feat/blob/15da93fc9d6ec8154f52a9172824e25821195ef8/src/feat/agencies/net/broker.py#L280-L285
def has_slave(self): '''Returns True/False wether we have a slave agency which is not standalone running.''' slave = first(x for x in self.slaves.itervalues() if not x.is_standalone) return slave is not None
[ "def", "has_slave", "(", "self", ")", ":", "slave", "=", "first", "(", "x", "for", "x", "in", "self", ".", "slaves", ".", "itervalues", "(", ")", "if", "not", "x", ".", "is_standalone", ")", "return", "slave", "is", "not", "None" ]
Returns True/False wether we have a slave agency which is not standalone running.
[ "Returns", "True", "/", "False", "wether", "we", "have", "a", "slave", "agency", "which", "is", "not", "standalone", "running", "." ]
python
train
osrg/ryu
ryu/lib/packet/vlan.py
https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/lib/packet/vlan.py#L79-L88
def get_packet_type(cls, type_): """Override method for the Length/Type field (self.ethertype). The Length/Type field means Length or Type interpretation, same as ethernet IEEE802.3. If the value of Length/Type field is less than or equal to 1500 decimal(05DC hexadecimal), it means Length interpretation and be passed to the LLC sublayer.""" if type_ <= ether.ETH_TYPE_IEEE802_3: type_ = ether.ETH_TYPE_IEEE802_3 return cls._TYPES.get(type_)
[ "def", "get_packet_type", "(", "cls", ",", "type_", ")", ":", "if", "type_", "<=", "ether", ".", "ETH_TYPE_IEEE802_3", ":", "type_", "=", "ether", ".", "ETH_TYPE_IEEE802_3", "return", "cls", ".", "_TYPES", ".", "get", "(", "type_", ")" ]
Override method for the Length/Type field (self.ethertype). The Length/Type field means Length or Type interpretation, same as ethernet IEEE802.3. If the value of Length/Type field is less than or equal to 1500 decimal(05DC hexadecimal), it means Length interpretation and be passed to the LLC sublayer.
[ "Override", "method", "for", "the", "Length", "/", "Type", "field", "(", "self", ".", "ethertype", ")", ".", "The", "Length", "/", "Type", "field", "means", "Length", "or", "Type", "interpretation", "same", "as", "ethernet", "IEEE802", ".", "3", ".", "If", "the", "value", "of", "Length", "/", "Type", "field", "is", "less", "than", "or", "equal", "to", "1500", "decimal", "(", "05DC", "hexadecimal", ")", "it", "means", "Length", "interpretation", "and", "be", "passed", "to", "the", "LLC", "sublayer", "." ]
python
train
BerkeleyAutomation/perception
perception/kinect2_sensor.py
https://github.com/BerkeleyAutomation/perception/blob/03d9b37dd6b66896cdfe173905c9413c8c3c5df6/perception/kinect2_sensor.py#L673-L709
def frames(self): """Retrieve the next frame from the image directory and convert it to a ColorImage, a DepthImage, and an IrImage. Parameters ---------- skip_registration : bool If True, the registration step is skipped. Returns ------- :obj:`tuple` of :obj:`ColorImage`, :obj:`DepthImage`, :obj:`IrImage`, :obj:`numpy.ndarray` The ColorImage, DepthImage, and IrImage of the current frame. Raises ------ RuntimeError If the Kinect stream is not running or if all images in the directory have been used. """ if not self._running: raise RuntimeError('VirtualKinect2 device pointing to %s not runnning. Cannot read frames' %(self._path_to_images)) if self._im_index > self._num_images: raise RuntimeError('VirtualKinect2 device is out of images') # read images color_filename = os.path.join(self._path_to_images, 'color_%d.png' %(self._im_index)) color_im = ColorImage.open(color_filename, frame=self._frame) depth_filename = os.path.join(self._path_to_images, 'depth_%d.npy' %(self._im_index)) depth_im = DepthImage.open(depth_filename, frame=self._frame) ir_filename = os.path.join(self._path_to_images, 'ir_%d.npy' %(self._im_index)) ir_im = None if os.path.exists(ir_filename): ir_im = IrImage.open(ir_filename, frame=self._frame) self._im_index += 1 return color_im, depth_im, ir_im
[ "def", "frames", "(", "self", ")", ":", "if", "not", "self", ".", "_running", ":", "raise", "RuntimeError", "(", "'VirtualKinect2 device pointing to %s not runnning. Cannot read frames'", "%", "(", "self", ".", "_path_to_images", ")", ")", "if", "self", ".", "_im_index", ">", "self", ".", "_num_images", ":", "raise", "RuntimeError", "(", "'VirtualKinect2 device is out of images'", ")", "# read images", "color_filename", "=", "os", ".", "path", ".", "join", "(", "self", ".", "_path_to_images", ",", "'color_%d.png'", "%", "(", "self", ".", "_im_index", ")", ")", "color_im", "=", "ColorImage", ".", "open", "(", "color_filename", ",", "frame", "=", "self", ".", "_frame", ")", "depth_filename", "=", "os", ".", "path", ".", "join", "(", "self", ".", "_path_to_images", ",", "'depth_%d.npy'", "%", "(", "self", ".", "_im_index", ")", ")", "depth_im", "=", "DepthImage", ".", "open", "(", "depth_filename", ",", "frame", "=", "self", ".", "_frame", ")", "ir_filename", "=", "os", ".", "path", ".", "join", "(", "self", ".", "_path_to_images", ",", "'ir_%d.npy'", "%", "(", "self", ".", "_im_index", ")", ")", "ir_im", "=", "None", "if", "os", ".", "path", ".", "exists", "(", "ir_filename", ")", ":", "ir_im", "=", "IrImage", ".", "open", "(", "ir_filename", ",", "frame", "=", "self", ".", "_frame", ")", "self", ".", "_im_index", "+=", "1", "return", "color_im", ",", "depth_im", ",", "ir_im" ]
Retrieve the next frame from the image directory and convert it to a ColorImage, a DepthImage, and an IrImage. Parameters ---------- skip_registration : bool If True, the registration step is skipped. Returns ------- :obj:`tuple` of :obj:`ColorImage`, :obj:`DepthImage`, :obj:`IrImage`, :obj:`numpy.ndarray` The ColorImage, DepthImage, and IrImage of the current frame. Raises ------ RuntimeError If the Kinect stream is not running or if all images in the directory have been used.
[ "Retrieve", "the", "next", "frame", "from", "the", "image", "directory", "and", "convert", "it", "to", "a", "ColorImage", "a", "DepthImage", "and", "an", "IrImage", "." ]
python
train
python-visualization/branca
branca/element.py
https://github.com/python-visualization/branca/blob/4e89e88a5a7ff3586f0852249c2c125f72316da8/branca/element.py#L79-L97
def get_bounds(self): """Computes the bounds of the object and all it's children in the form [[lat_min, lon_min], [lat_max, lon_max]]. """ bounds = self._get_self_bounds() for child in self._children.values(): child_bounds = child.get_bounds() bounds = [ [ none_min(bounds[0][0], child_bounds[0][0]), none_min(bounds[0][1], child_bounds[0][1]), ], [ none_max(bounds[1][0], child_bounds[1][0]), none_max(bounds[1][1], child_bounds[1][1]), ], ] return bounds
[ "def", "get_bounds", "(", "self", ")", ":", "bounds", "=", "self", ".", "_get_self_bounds", "(", ")", "for", "child", "in", "self", ".", "_children", ".", "values", "(", ")", ":", "child_bounds", "=", "child", ".", "get_bounds", "(", ")", "bounds", "=", "[", "[", "none_min", "(", "bounds", "[", "0", "]", "[", "0", "]", ",", "child_bounds", "[", "0", "]", "[", "0", "]", ")", ",", "none_min", "(", "bounds", "[", "0", "]", "[", "1", "]", ",", "child_bounds", "[", "0", "]", "[", "1", "]", ")", ",", "]", ",", "[", "none_max", "(", "bounds", "[", "1", "]", "[", "0", "]", ",", "child_bounds", "[", "1", "]", "[", "0", "]", ")", ",", "none_max", "(", "bounds", "[", "1", "]", "[", "1", "]", ",", "child_bounds", "[", "1", "]", "[", "1", "]", ")", ",", "]", ",", "]", "return", "bounds" ]
Computes the bounds of the object and all it's children in the form [[lat_min, lon_min], [lat_max, lon_max]].
[ "Computes", "the", "bounds", "of", "the", "object", "and", "all", "it", "s", "children", "in", "the", "form", "[[", "lat_min", "lon_min", "]", "[", "lat_max", "lon_max", "]]", "." ]
python
train
cs50/check50
check50/__main__.py
https://github.com/cs50/check50/blob/42c1f0c36baa6a24f69742d74551a9ea7a5ceb33/check50/__main__.py#L134-L144
def install_translations(config): """Add check translations according to ``config`` as a fallback to existing translations""" if not config: return from . import _translation checks_translation = gettext.translation(domain=config["domain"], localedir=internal.check_dir / config["localedir"], fallback=True) _translation.add_fallback(checks_translation)
[ "def", "install_translations", "(", "config", ")", ":", "if", "not", "config", ":", "return", "from", ".", "import", "_translation", "checks_translation", "=", "gettext", ".", "translation", "(", "domain", "=", "config", "[", "\"domain\"", "]", ",", "localedir", "=", "internal", ".", "check_dir", "/", "config", "[", "\"localedir\"", "]", ",", "fallback", "=", "True", ")", "_translation", ".", "add_fallback", "(", "checks_translation", ")" ]
Add check translations according to ``config`` as a fallback to existing translations
[ "Add", "check", "translations", "according", "to", "config", "as", "a", "fallback", "to", "existing", "translations" ]
python
train
fedora-infra/fedmsg_meta_fedora_infrastructure
fedmsg_meta_fedora_infrastructure/conglomerators/bodhi/overrides.py
https://github.com/fedora-infra/fedmsg_meta_fedora_infrastructure/blob/85bf4162692e3042c7dbcc12dfafaca4764b4ae6/fedmsg_meta_fedora_infrastructure/conglomerators/bodhi/overrides.py#L11-L17
def matches(self, a, b, **config): """ The message must match by username """ submitter_a = a['msg']['override']['submitter']['name'] submitter_b = b['msg']['override']['submitter']['name'] if submitter_a != submitter_b: return False return True
[ "def", "matches", "(", "self", ",", "a", ",", "b", ",", "*", "*", "config", ")", ":", "submitter_a", "=", "a", "[", "'msg'", "]", "[", "'override'", "]", "[", "'submitter'", "]", "[", "'name'", "]", "submitter_b", "=", "b", "[", "'msg'", "]", "[", "'override'", "]", "[", "'submitter'", "]", "[", "'name'", "]", "if", "submitter_a", "!=", "submitter_b", ":", "return", "False", "return", "True" ]
The message must match by username
[ "The", "message", "must", "match", "by", "username" ]
python
train
Unidata/siphon
siphon/catalog.py
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/catalog.py#L652-L705
def access_with_service(self, service, use_xarray=None): """Access the dataset using a particular service. Return an Python object capable of communicating with the server using the particular service. For instance, for 'HTTPServer' this is a file-like object capable of HTTP communication; for OPENDAP this is a netCDF4 dataset. Parameters ---------- service : str The name of the service for accessing the dataset Returns ------- An instance appropriate for communicating using ``service``. """ service = CaseInsensitiveStr(service) if service == 'CdmRemote': if use_xarray: from .cdmr.xarray_support import CDMRemoteStore try: import xarray as xr provider = lambda url: xr.open_dataset(CDMRemoteStore(url)) # noqa: E731 except ImportError: raise ImportError('CdmRemote access needs xarray to be installed.') else: from .cdmr import Dataset as CDMRDataset provider = CDMRDataset elif service == 'OPENDAP': if use_xarray: try: import xarray as xr provider = xr.open_dataset except ImportError: raise ImportError('xarray to be installed if `use_xarray` is True.') else: try: from netCDF4 import Dataset as NC4Dataset provider = NC4Dataset except ImportError: raise ImportError('OPENDAP access needs netCDF4-python to be installed.') elif service in self.ncssServiceNames: from .ncss import NCSS provider = NCSS elif service == 'HTTPServer': provider = session_manager.urlopen else: raise ValueError(service + ' is not an access method supported by Siphon') try: return provider(self.access_urls[service]) except KeyError: raise ValueError(service + ' is not available for this dataset')
[ "def", "access_with_service", "(", "self", ",", "service", ",", "use_xarray", "=", "None", ")", ":", "service", "=", "CaseInsensitiveStr", "(", "service", ")", "if", "service", "==", "'CdmRemote'", ":", "if", "use_xarray", ":", "from", ".", "cdmr", ".", "xarray_support", "import", "CDMRemoteStore", "try", ":", "import", "xarray", "as", "xr", "provider", "=", "lambda", "url", ":", "xr", ".", "open_dataset", "(", "CDMRemoteStore", "(", "url", ")", ")", "# noqa: E731", "except", "ImportError", ":", "raise", "ImportError", "(", "'CdmRemote access needs xarray to be installed.'", ")", "else", ":", "from", ".", "cdmr", "import", "Dataset", "as", "CDMRDataset", "provider", "=", "CDMRDataset", "elif", "service", "==", "'OPENDAP'", ":", "if", "use_xarray", ":", "try", ":", "import", "xarray", "as", "xr", "provider", "=", "xr", ".", "open_dataset", "except", "ImportError", ":", "raise", "ImportError", "(", "'xarray to be installed if `use_xarray` is True.'", ")", "else", ":", "try", ":", "from", "netCDF4", "import", "Dataset", "as", "NC4Dataset", "provider", "=", "NC4Dataset", "except", "ImportError", ":", "raise", "ImportError", "(", "'OPENDAP access needs netCDF4-python to be installed.'", ")", "elif", "service", "in", "self", ".", "ncssServiceNames", ":", "from", ".", "ncss", "import", "NCSS", "provider", "=", "NCSS", "elif", "service", "==", "'HTTPServer'", ":", "provider", "=", "session_manager", ".", "urlopen", "else", ":", "raise", "ValueError", "(", "service", "+", "' is not an access method supported by Siphon'", ")", "try", ":", "return", "provider", "(", "self", ".", "access_urls", "[", "service", "]", ")", "except", "KeyError", ":", "raise", "ValueError", "(", "service", "+", "' is not available for this dataset'", ")" ]
Access the dataset using a particular service. Return an Python object capable of communicating with the server using the particular service. For instance, for 'HTTPServer' this is a file-like object capable of HTTP communication; for OPENDAP this is a netCDF4 dataset. Parameters ---------- service : str The name of the service for accessing the dataset Returns ------- An instance appropriate for communicating using ``service``.
[ "Access", "the", "dataset", "using", "a", "particular", "service", "." ]
python
train
mikedh/trimesh
trimesh/transformations.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/transformations.py#L2033-L2052
def spherical_matrix(theta, phi, axes='sxyz'): """ Give a spherical coordinate vector, find the rotation that will transform a [0,0,1] vector to those coordinates Parameters ----------- theta: float, rotation angle in radians phi: float, rotation angle in radians Returns ---------- matrix: (4,4) rotation matrix where the following will be a cartesian vector in the direction of the input spherical coordinats: np.dot(matrix, [0,0,1,0]) """ result = euler_matrix(0.0, phi, theta, axes=axes) return result
[ "def", "spherical_matrix", "(", "theta", ",", "phi", ",", "axes", "=", "'sxyz'", ")", ":", "result", "=", "euler_matrix", "(", "0.0", ",", "phi", ",", "theta", ",", "axes", "=", "axes", ")", "return", "result" ]
Give a spherical coordinate vector, find the rotation that will transform a [0,0,1] vector to those coordinates Parameters ----------- theta: float, rotation angle in radians phi: float, rotation angle in radians Returns ---------- matrix: (4,4) rotation matrix where the following will be a cartesian vector in the direction of the input spherical coordinats: np.dot(matrix, [0,0,1,0])
[ "Give", "a", "spherical", "coordinate", "vector", "find", "the", "rotation", "that", "will", "transform", "a", "[", "0", "0", "1", "]", "vector", "to", "those", "coordinates" ]
python
train
polyaxon/polyaxon
polyaxon/query/parser.py
https://github.com/polyaxon/polyaxon/blob/e1724f0756b1a42f9e7aa08a976584a84ef7f016/polyaxon/query/parser.py#L180-L199
def parse_expression(expression: str) -> Tuple[str, str]: """Base parsing for expressions. Every expression must follow a basic format: `name:[modifier|operator]operation[*[operator]operation]` So this parser just split the expression into: field name, operation. """ try: _expression = expression.strip() name, operation = _expression.split(':') name = name.strip() operation = operation.strip() if not name or not operation: raise ValueError except (ValueError, AttributeError): raise QueryParserException('Expression is not valid, it must be formatted as ' 'name:operation, ' 'Expression: {}'.format(expression)) return name, operation
[ "def", "parse_expression", "(", "expression", ":", "str", ")", "->", "Tuple", "[", "str", ",", "str", "]", ":", "try", ":", "_expression", "=", "expression", ".", "strip", "(", ")", "name", ",", "operation", "=", "_expression", ".", "split", "(", "':'", ")", "name", "=", "name", ".", "strip", "(", ")", "operation", "=", "operation", ".", "strip", "(", ")", "if", "not", "name", "or", "not", "operation", ":", "raise", "ValueError", "except", "(", "ValueError", ",", "AttributeError", ")", ":", "raise", "QueryParserException", "(", "'Expression is not valid, it must be formatted as '", "'name:operation, '", "'Expression: {}'", ".", "format", "(", "expression", ")", ")", "return", "name", ",", "operation" ]
Base parsing for expressions. Every expression must follow a basic format: `name:[modifier|operator]operation[*[operator]operation]` So this parser just split the expression into: field name, operation.
[ "Base", "parsing", "for", "expressions", "." ]
python
train
gbiggs/rtsprofile
rtsprofile/targets.py
https://github.com/gbiggs/rtsprofile/blob/fded6eddcb0b25fe9808b1b12336a4413ea00905/rtsprofile/targets.py#L232-L236
def to_dict(self): '''Save this target port into a dictionary.''' d = super(TargetPort, self).to_dict() d['portName'] = self.port_name return d
[ "def", "to_dict", "(", "self", ")", ":", "d", "=", "super", "(", "TargetPort", ",", "self", ")", ".", "to_dict", "(", ")", "d", "[", "'portName'", "]", "=", "self", ".", "port_name", "return", "d" ]
Save this target port into a dictionary.
[ "Save", "this", "target", "port", "into", "a", "dictionary", "." ]
python
train
shaypal5/pdutil
pdutil/display/display.py
https://github.com/shaypal5/pdutil/blob/231059634643af2558d22070f89767410978cf56/pdutil/display/display.py#L80-L106
def df_to_html(df, percentage_columns=None): # pragma: no cover """Return a nicely formatted HTML code string for the given dataframe. Arguments --------- df : pandas.DataFrame A dataframe object. percentage_columns : iterable A list of cloumn names to be displayed with a percentage sign. Returns ------- str A nicely formatted string for the given dataframe. """ big_dataframe_setup() try: res = '<br><h2> {} </h2>'.format(df.name) except AttributeError: res = '' df.style.set_properties(**{'text-align': 'center'}) res += df.to_html(formatters=_formatters_dict( input_df=df, percentage_columns=percentage_columns )) res += '<br>' return res
[ "def", "df_to_html", "(", "df", ",", "percentage_columns", "=", "None", ")", ":", "# pragma: no cover", "big_dataframe_setup", "(", ")", "try", ":", "res", "=", "'<br><h2> {} </h2>'", ".", "format", "(", "df", ".", "name", ")", "except", "AttributeError", ":", "res", "=", "''", "df", ".", "style", ".", "set_properties", "(", "*", "*", "{", "'text-align'", ":", "'center'", "}", ")", "res", "+=", "df", ".", "to_html", "(", "formatters", "=", "_formatters_dict", "(", "input_df", "=", "df", ",", "percentage_columns", "=", "percentage_columns", ")", ")", "res", "+=", "'<br>'", "return", "res" ]
Return a nicely formatted HTML code string for the given dataframe. Arguments --------- df : pandas.DataFrame A dataframe object. percentage_columns : iterable A list of cloumn names to be displayed with a percentage sign. Returns ------- str A nicely formatted string for the given dataframe.
[ "Return", "a", "nicely", "formatted", "HTML", "code", "string", "for", "the", "given", "dataframe", "." ]
python
train
jazzband/django-widget-tweaks
widget_tweaks/templatetags/widget_tweaks.py
https://github.com/jazzband/django-widget-tweaks/blob/f50ee92410d68e81528a7643a10544e7331af8fb/widget_tweaks/templatetags/widget_tweaks.py#L138-L172
def render_field(parser, token): """ Render a form field using given attribute-value pairs Takes form field as first argument and list of attribute-value pairs for all other arguments. Attribute-value pairs should be in the form of attribute=value or attribute="a value" for assignment and attribute+=value or attribute+="value" for appending. """ error_msg = '%r tag requires a form field followed by a list of attributes and values in the form attr="value"' % token.split_contents()[0] try: bits = token.split_contents() tag_name = bits[0] form_field = bits[1] attr_list = bits[2:] except ValueError: raise TemplateSyntaxError(error_msg) form_field = parser.compile_filter(form_field) set_attrs = [] append_attrs = [] for pair in attr_list: match = ATTRIBUTE_RE.match(pair) if not match: raise TemplateSyntaxError(error_msg + ": %s" % pair) dct = match.groupdict() attr, sign, value = \ dct['attr'], dct['sign'], parser.compile_filter(dct['value']) if sign == "=": set_attrs.append((attr, value)) else: append_attrs.append((attr, value)) return FieldAttributeNode(form_field, set_attrs, append_attrs)
[ "def", "render_field", "(", "parser", ",", "token", ")", ":", "error_msg", "=", "'%r tag requires a form field followed by a list of attributes and values in the form attr=\"value\"'", "%", "token", ".", "split_contents", "(", ")", "[", "0", "]", "try", ":", "bits", "=", "token", ".", "split_contents", "(", ")", "tag_name", "=", "bits", "[", "0", "]", "form_field", "=", "bits", "[", "1", "]", "attr_list", "=", "bits", "[", "2", ":", "]", "except", "ValueError", ":", "raise", "TemplateSyntaxError", "(", "error_msg", ")", "form_field", "=", "parser", ".", "compile_filter", "(", "form_field", ")", "set_attrs", "=", "[", "]", "append_attrs", "=", "[", "]", "for", "pair", "in", "attr_list", ":", "match", "=", "ATTRIBUTE_RE", ".", "match", "(", "pair", ")", "if", "not", "match", ":", "raise", "TemplateSyntaxError", "(", "error_msg", "+", "\": %s\"", "%", "pair", ")", "dct", "=", "match", ".", "groupdict", "(", ")", "attr", ",", "sign", ",", "value", "=", "dct", "[", "'attr'", "]", ",", "dct", "[", "'sign'", "]", ",", "parser", ".", "compile_filter", "(", "dct", "[", "'value'", "]", ")", "if", "sign", "==", "\"=\"", ":", "set_attrs", ".", "append", "(", "(", "attr", ",", "value", ")", ")", "else", ":", "append_attrs", ".", "append", "(", "(", "attr", ",", "value", ")", ")", "return", "FieldAttributeNode", "(", "form_field", ",", "set_attrs", ",", "append_attrs", ")" ]
Render a form field using given attribute-value pairs Takes form field as first argument and list of attribute-value pairs for all other arguments. Attribute-value pairs should be in the form of attribute=value or attribute="a value" for assignment and attribute+=value or attribute+="value" for appending.
[ "Render", "a", "form", "field", "using", "given", "attribute", "-", "value", "pairs" ]
python
train
ocaballeror/LyricFetch
lyricfetch/scraping.py
https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/scraping.py#L215-L244
def metalarchives(song): """ Returns the lyrics found in MetalArchives for the specified mp3 file or an empty string if not found. """ artist = normalize(song.artist) title = normalize(song.title) url = 'https://www.metal-archives.com/search/ajax-advanced/searching/songs' url += f'/?songTitle={title}&bandName={artist}&ExactBandMatch=1' soup = get_url(url, parser='json') if not soup: return '' song_id_re = re.compile(r'lyricsLink_([0-9]*)') ids = set(re.search(song_id_re, a) for sub in soup['aaData'] for a in sub) if not ids: return '' if None in ids: ids.remove(None) ids = map(lambda a: a.group(1), ids) for song_id in ids: url = 'https://www.metal-archives.com/release/ajax-view-lyrics/id/{}' lyrics = get_url(url.format(song_id), parser='html') lyrics = lyrics.get_text().strip() if not re.search('lyrics not available', lyrics): return lyrics return ''
[ "def", "metalarchives", "(", "song", ")", ":", "artist", "=", "normalize", "(", "song", ".", "artist", ")", "title", "=", "normalize", "(", "song", ".", "title", ")", "url", "=", "'https://www.metal-archives.com/search/ajax-advanced/searching/songs'", "url", "+=", "f'/?songTitle={title}&bandName={artist}&ExactBandMatch=1'", "soup", "=", "get_url", "(", "url", ",", "parser", "=", "'json'", ")", "if", "not", "soup", ":", "return", "''", "song_id_re", "=", "re", ".", "compile", "(", "r'lyricsLink_([0-9]*)'", ")", "ids", "=", "set", "(", "re", ".", "search", "(", "song_id_re", ",", "a", ")", "for", "sub", "in", "soup", "[", "'aaData'", "]", "for", "a", "in", "sub", ")", "if", "not", "ids", ":", "return", "''", "if", "None", "in", "ids", ":", "ids", ".", "remove", "(", "None", ")", "ids", "=", "map", "(", "lambda", "a", ":", "a", ".", "group", "(", "1", ")", ",", "ids", ")", "for", "song_id", "in", "ids", ":", "url", "=", "'https://www.metal-archives.com/release/ajax-view-lyrics/id/{}'", "lyrics", "=", "get_url", "(", "url", ".", "format", "(", "song_id", ")", ",", "parser", "=", "'html'", ")", "lyrics", "=", "lyrics", ".", "get_text", "(", ")", ".", "strip", "(", ")", "if", "not", "re", ".", "search", "(", "'lyrics not available'", ",", "lyrics", ")", ":", "return", "lyrics", "return", "''" ]
Returns the lyrics found in MetalArchives for the specified mp3 file or an empty string if not found.
[ "Returns", "the", "lyrics", "found", "in", "MetalArchives", "for", "the", "specified", "mp3", "file", "or", "an", "empty", "string", "if", "not", "found", "." ]
python
train
a1ezzz/wasp-general
wasp_general/os/linux/lvm.py
https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/os/linux/lvm.py#L492-L499
def remove_volume(self): """ Remove this volume :return: None """ lvremove_cmd = ['sudo'] if self.lvm_command().sudo() is True else [] lvremove_cmd.extend(['lvremove', '-f', self.volume_path()]) subprocess.check_output(lvremove_cmd, timeout=self.__class__.__lvm_snapshot_remove_cmd_timeout__)
[ "def", "remove_volume", "(", "self", ")", ":", "lvremove_cmd", "=", "[", "'sudo'", "]", "if", "self", ".", "lvm_command", "(", ")", ".", "sudo", "(", ")", "is", "True", "else", "[", "]", "lvremove_cmd", ".", "extend", "(", "[", "'lvremove'", ",", "'-f'", ",", "self", ".", "volume_path", "(", ")", "]", ")", "subprocess", ".", "check_output", "(", "lvremove_cmd", ",", "timeout", "=", "self", ".", "__class__", ".", "__lvm_snapshot_remove_cmd_timeout__", ")" ]
Remove this volume :return: None
[ "Remove", "this", "volume" ]
python
train
mdsol/rwslib
rwslib/builders/metadata.py
https://github.com/mdsol/rwslib/blob/1a86bc072d408c009ed1de8bf6e98a1769f54d18/rwslib/builders/metadata.py#L250-L257
def build(self, builder): """Build XML by appending to builder""" params = {} if self.lang is not None: params["xml:lang"] = self.lang builder.start("TranslatedText", params) builder.data(self.text) builder.end("TranslatedText")
[ "def", "build", "(", "self", ",", "builder", ")", ":", "params", "=", "{", "}", "if", "self", ".", "lang", "is", "not", "None", ":", "params", "[", "\"xml:lang\"", "]", "=", "self", ".", "lang", "builder", ".", "start", "(", "\"TranslatedText\"", ",", "params", ")", "builder", ".", "data", "(", "self", ".", "text", ")", "builder", ".", "end", "(", "\"TranslatedText\"", ")" ]
Build XML by appending to builder
[ "Build", "XML", "by", "appending", "to", "builder" ]
python
train
PyProphet/pyprophet
pyprophet/main.py
https://github.com/PyProphet/pyprophet/blob/f546ad171750cd7685afbde6785fe71f82cadb35/pyprophet/main.py#L304-L319
def export_compound(infile, outfile, format, outcsv, max_rs_peakgroup_qvalue): """ Export Compound TSV/CSV tables """ if format == "score_plots": export_score_plots(infile) else: if outfile is None: if outcsv: outfile = infile.split(".osw")[0] + ".csv" else: outfile = infile.split(".osw")[0] + ".tsv" else: outfile = outfile export_compound_tsv(infile, outfile, format, outcsv, max_rs_peakgroup_qvalue)
[ "def", "export_compound", "(", "infile", ",", "outfile", ",", "format", ",", "outcsv", ",", "max_rs_peakgroup_qvalue", ")", ":", "if", "format", "==", "\"score_plots\"", ":", "export_score_plots", "(", "infile", ")", "else", ":", "if", "outfile", "is", "None", ":", "if", "outcsv", ":", "outfile", "=", "infile", ".", "split", "(", "\".osw\"", ")", "[", "0", "]", "+", "\".csv\"", "else", ":", "outfile", "=", "infile", ".", "split", "(", "\".osw\"", ")", "[", "0", "]", "+", "\".tsv\"", "else", ":", "outfile", "=", "outfile", "export_compound_tsv", "(", "infile", ",", "outfile", ",", "format", ",", "outcsv", ",", "max_rs_peakgroup_qvalue", ")" ]
Export Compound TSV/CSV tables
[ "Export", "Compound", "TSV", "/", "CSV", "tables" ]
python
test
mbedmicro/pyOCD
pyocd/target/pack/cmsis_pack.py
https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/target/pack/cmsis_pack.py#L462-L475
def memory_map(self): """! @brief MemoryMap object.""" # Lazily construct the memory map. if self._memory_map is None: self._build_memory_regions() self._build_flash_regions() # Warn if there was no boot memory. if not self._saw_startup: LOG.warning("CMSIS-Pack device %s has no identifiable boot memory", self.part_number) self._memory_map = MemoryMap(self._regions) return self._memory_map
[ "def", "memory_map", "(", "self", ")", ":", "# Lazily construct the memory map.", "if", "self", ".", "_memory_map", "is", "None", ":", "self", ".", "_build_memory_regions", "(", ")", "self", ".", "_build_flash_regions", "(", ")", "# Warn if there was no boot memory.", "if", "not", "self", ".", "_saw_startup", ":", "LOG", ".", "warning", "(", "\"CMSIS-Pack device %s has no identifiable boot memory\"", ",", "self", ".", "part_number", ")", "self", ".", "_memory_map", "=", "MemoryMap", "(", "self", ".", "_regions", ")", "return", "self", ".", "_memory_map" ]
! @brief MemoryMap object.
[ "!" ]
python
train
ioos/compliance-checker
compliance_checker/suite.py
https://github.com/ioos/compliance-checker/blob/ee89c27b0daade58812489a2da3aa3b6859eafd9/compliance_checker/suite.py#L216-L243
def _process_skip_checks(cls, skip_checks): """ Processes an iterable of skip_checks with strings and returns a dict with <check_name>: <max_skip_level> pairs """ check_dict = defaultdict(lambda: None) # A is for "all", "M" is for medium, "L" is for low check_lookup = {'A': BaseCheck.HIGH, 'M': BaseCheck.MEDIUM, 'L': BaseCheck.LOW} for skip_check_spec in skip_checks: split_check_spec = skip_check_spec.split(':') check_name = split_check_spec[0] if len(split_check_spec) < 2: check_max_level = BaseCheck.HIGH else: try: check_max_level = check_lookup[split_check_spec[1]] except KeyError: warnings.warn("Skip specifier '{}' on check '{}' not found," " defaulting to skip entire check".format(split_check_spec[1], check_name)) check_max_level = BaseCheck.HIGH check_dict[check_name] = check_max_level return check_dict
[ "def", "_process_skip_checks", "(", "cls", ",", "skip_checks", ")", ":", "check_dict", "=", "defaultdict", "(", "lambda", ":", "None", ")", "# A is for \"all\", \"M\" is for medium, \"L\" is for low", "check_lookup", "=", "{", "'A'", ":", "BaseCheck", ".", "HIGH", ",", "'M'", ":", "BaseCheck", ".", "MEDIUM", ",", "'L'", ":", "BaseCheck", ".", "LOW", "}", "for", "skip_check_spec", "in", "skip_checks", ":", "split_check_spec", "=", "skip_check_spec", ".", "split", "(", "':'", ")", "check_name", "=", "split_check_spec", "[", "0", "]", "if", "len", "(", "split_check_spec", ")", "<", "2", ":", "check_max_level", "=", "BaseCheck", ".", "HIGH", "else", ":", "try", ":", "check_max_level", "=", "check_lookup", "[", "split_check_spec", "[", "1", "]", "]", "except", "KeyError", ":", "warnings", ".", "warn", "(", "\"Skip specifier '{}' on check '{}' not found,\"", "\" defaulting to skip entire check\"", ".", "format", "(", "split_check_spec", "[", "1", "]", ",", "check_name", ")", ")", "check_max_level", "=", "BaseCheck", ".", "HIGH", "check_dict", "[", "check_name", "]", "=", "check_max_level", "return", "check_dict" ]
Processes an iterable of skip_checks with strings and returns a dict with <check_name>: <max_skip_level> pairs
[ "Processes", "an", "iterable", "of", "skip_checks", "with", "strings", "and", "returns", "a", "dict", "with", "<check_name", ">", ":", "<max_skip_level", ">", "pairs" ]
python
train
annoviko/pyclustering
pyclustering/container/cftree.py
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/container/cftree.py#L668-L677
def remove_entry(self, entry): """! @brief Remove clustering feature from the leaf node. @param[in] entry (cfentry): Clustering feature. """ self.feature -= entry; self.entries.remove(entry);
[ "def", "remove_entry", "(", "self", ",", "entry", ")", ":", "self", ".", "feature", "-=", "entry", "self", ".", "entries", ".", "remove", "(", "entry", ")" ]
! @brief Remove clustering feature from the leaf node. @param[in] entry (cfentry): Clustering feature.
[ "!" ]
python
valid
unistra/django-rest-framework-fine-permissions
rest_framework_fine_permissions/utils.py
https://github.com/unistra/django-rest-framework-fine-permissions/blob/71af5953648ef9f9bdfb64a4c0ed0ea62661fa61/rest_framework_fine_permissions/utils.py#L28-L39
def get_serializer(serializer): """ Load a serializer. """ if isinstance(serializer, string_types): try: app_label, serializer_name = serializer.split('.') app_package = get_application(app_label) serializer_module = import_module('%s.serializers' % app_package) serializer = getattr(serializer_module, serializer_name) except Exception as e: logger.error('Serializer %s not found: %s' % (serializer, e)) return None return serializer
[ "def", "get_serializer", "(", "serializer", ")", ":", "if", "isinstance", "(", "serializer", ",", "string_types", ")", ":", "try", ":", "app_label", ",", "serializer_name", "=", "serializer", ".", "split", "(", "'.'", ")", "app_package", "=", "get_application", "(", "app_label", ")", "serializer_module", "=", "import_module", "(", "'%s.serializers'", "%", "app_package", ")", "serializer", "=", "getattr", "(", "serializer_module", ",", "serializer_name", ")", "except", "Exception", "as", "e", ":", "logger", ".", "error", "(", "'Serializer %s not found: %s'", "%", "(", "serializer", ",", "e", ")", ")", "return", "None", "return", "serializer" ]
Load a serializer.
[ "Load", "a", "serializer", "." ]
python
train
shexSpec/grammar
parsers/python/pyshexc/parser_impl/shex_shape_expression_parser.py
https://github.com/shexSpec/grammar/blob/4497cd1f73fa6703bca6e2cb53ba9c120f22e48c/parsers/python/pyshexc/parser_impl/shex_shape_expression_parser.py#L139-L152
def visitInlineShapeAtomNodeConstraint(self, ctx: ShExDocParser.InlineShapeAtomNodeConstraintContext): """ inlineShapeAtomNodeConstraint: nodeConstraint inlineShapeOrRef? # inlineShapeAtomShapeOrRef """ nc = ShexNodeExpressionParser(self.context, self.label) nc.visit(ctx.nodeConstraint()) if ctx.inlineShapeOrRef(): self.expr = ShapeAnd(shapeExprs=[nc.nodeconstraint]) sorref_parser = ShexShapeExpressionParser(self.context) sorref_parser.visit(ctx.inlineShapeOrRef()) # if isinstance(sorref_parser.expr, Shape) and self.context.is_empty_shape(sorref_parser.expr): # self.expr = nc.nodeconstraint # else: self.expr.shapeExprs.append(sorref_parser.expr) else: self.expr = nc.nodeconstraint
[ "def", "visitInlineShapeAtomNodeConstraint", "(", "self", ",", "ctx", ":", "ShExDocParser", ".", "InlineShapeAtomNodeConstraintContext", ")", ":", "nc", "=", "ShexNodeExpressionParser", "(", "self", ".", "context", ",", "self", ".", "label", ")", "nc", ".", "visit", "(", "ctx", ".", "nodeConstraint", "(", ")", ")", "if", "ctx", ".", "inlineShapeOrRef", "(", ")", ":", "self", ".", "expr", "=", "ShapeAnd", "(", "shapeExprs", "=", "[", "nc", ".", "nodeconstraint", "]", ")", "sorref_parser", "=", "ShexShapeExpressionParser", "(", "self", ".", "context", ")", "sorref_parser", ".", "visit", "(", "ctx", ".", "inlineShapeOrRef", "(", ")", ")", "# if isinstance(sorref_parser.expr, Shape) and self.context.is_empty_shape(sorref_parser.expr):", "# self.expr = nc.nodeconstraint", "# else:", "self", ".", "expr", ".", "shapeExprs", ".", "append", "(", "sorref_parser", ".", "expr", ")", "else", ":", "self", ".", "expr", "=", "nc", ".", "nodeconstraint" ]
inlineShapeAtomNodeConstraint: nodeConstraint inlineShapeOrRef? # inlineShapeAtomShapeOrRef
[ "inlineShapeAtomNodeConstraint", ":", "nodeConstraint", "inlineShapeOrRef?", "#", "inlineShapeAtomShapeOrRef" ]
python
train
Danielhiversen/pymill
mill/__init__.py
https://github.com/Danielhiversen/pymill/blob/f091385914b53682012d0948c549beb4a5a96794/mill/__init__.py#L281-L308
async def update_heaters(self): """Request data.""" homes = await self.get_home_list() for home in homes: payload = {"homeId": home.get("homeId")} data = await self.request("getIndependentDevices", payload) if data is None: continue heater_data = data.get('deviceInfo', []) if not heater_data: continue for _heater in heater_data: _id = _heater.get('deviceId') heater = self.heaters.get(_id, Heater()) heater.device_id = _id await set_heater_values(_heater, heater) self.heaters[_id] = heater for _id, heater in self.heaters.items(): if heater.independent_device: continue payload = {"deviceId": _id} _heater = await self.request("selectDevice", payload) if _heater is None: self.heaters[_id].available = False continue await set_heater_values(_heater, heater) self.heaters[_id] = heater
[ "async", "def", "update_heaters", "(", "self", ")", ":", "homes", "=", "await", "self", ".", "get_home_list", "(", ")", "for", "home", "in", "homes", ":", "payload", "=", "{", "\"homeId\"", ":", "home", ".", "get", "(", "\"homeId\"", ")", "}", "data", "=", "await", "self", ".", "request", "(", "\"getIndependentDevices\"", ",", "payload", ")", "if", "data", "is", "None", ":", "continue", "heater_data", "=", "data", ".", "get", "(", "'deviceInfo'", ",", "[", "]", ")", "if", "not", "heater_data", ":", "continue", "for", "_heater", "in", "heater_data", ":", "_id", "=", "_heater", ".", "get", "(", "'deviceId'", ")", "heater", "=", "self", ".", "heaters", ".", "get", "(", "_id", ",", "Heater", "(", ")", ")", "heater", ".", "device_id", "=", "_id", "await", "set_heater_values", "(", "_heater", ",", "heater", ")", "self", ".", "heaters", "[", "_id", "]", "=", "heater", "for", "_id", ",", "heater", "in", "self", ".", "heaters", ".", "items", "(", ")", ":", "if", "heater", ".", "independent_device", ":", "continue", "payload", "=", "{", "\"deviceId\"", ":", "_id", "}", "_heater", "=", "await", "self", ".", "request", "(", "\"selectDevice\"", ",", "payload", ")", "if", "_heater", "is", "None", ":", "self", ".", "heaters", "[", "_id", "]", ".", "available", "=", "False", "continue", "await", "set_heater_values", "(", "_heater", ",", "heater", ")", "self", ".", "heaters", "[", "_id", "]", "=", "heater" ]
Request data.
[ "Request", "data", "." ]
python
train
pahaz/sshtunnel
sshtunnel.py
https://github.com/pahaz/sshtunnel/blob/66a923e4c6c8e41b8348420523fbf5ddfd53176c/sshtunnel.py#L1198-L1241
def read_private_key_file(pkey_file, pkey_password=None, key_type=None, logger=None): """ Get SSH Public key from a private key file, given an optional password Arguments: pkey_file (str): File containing a private key (RSA, DSS or ECDSA) Keyword Arguments: pkey_password (Optional[str]): Password to decrypt the private key logger (Optional[logging.Logger]) Return: paramiko.Pkey """ ssh_pkey = None for pkey_class in (key_type,) if key_type else ( paramiko.RSAKey, paramiko.DSSKey, paramiko.ECDSAKey, paramiko.Ed25519Key ): try: ssh_pkey = pkey_class.from_private_key_file( pkey_file, password=pkey_password ) if logger: logger.debug('Private key file ({0}, {1}) successfully ' 'loaded'.format(pkey_file, pkey_class)) break except paramiko.PasswordRequiredException: if logger: logger.error('Password is required for key {0}' .format(pkey_file)) break except paramiko.SSHException: if logger: logger.debug('Private key file ({0}) could not be loaded ' 'as type {1} or bad password' .format(pkey_file, pkey_class)) return ssh_pkey
[ "def", "read_private_key_file", "(", "pkey_file", ",", "pkey_password", "=", "None", ",", "key_type", "=", "None", ",", "logger", "=", "None", ")", ":", "ssh_pkey", "=", "None", "for", "pkey_class", "in", "(", "key_type", ",", ")", "if", "key_type", "else", "(", "paramiko", ".", "RSAKey", ",", "paramiko", ".", "DSSKey", ",", "paramiko", ".", "ECDSAKey", ",", "paramiko", ".", "Ed25519Key", ")", ":", "try", ":", "ssh_pkey", "=", "pkey_class", ".", "from_private_key_file", "(", "pkey_file", ",", "password", "=", "pkey_password", ")", "if", "logger", ":", "logger", ".", "debug", "(", "'Private key file ({0}, {1}) successfully '", "'loaded'", ".", "format", "(", "pkey_file", ",", "pkey_class", ")", ")", "break", "except", "paramiko", ".", "PasswordRequiredException", ":", "if", "logger", ":", "logger", ".", "error", "(", "'Password is required for key {0}'", ".", "format", "(", "pkey_file", ")", ")", "break", "except", "paramiko", ".", "SSHException", ":", "if", "logger", ":", "logger", ".", "debug", "(", "'Private key file ({0}) could not be loaded '", "'as type {1} or bad password'", ".", "format", "(", "pkey_file", ",", "pkey_class", ")", ")", "return", "ssh_pkey" ]
Get SSH Public key from a private key file, given an optional password Arguments: pkey_file (str): File containing a private key (RSA, DSS or ECDSA) Keyword Arguments: pkey_password (Optional[str]): Password to decrypt the private key logger (Optional[logging.Logger]) Return: paramiko.Pkey
[ "Get", "SSH", "Public", "key", "from", "a", "private", "key", "file", "given", "an", "optional", "password" ]
python
train
angr/angr
angr/sim_manager.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/sim_manager.py#L217-L241
def explore(self, stash='active', n=None, find=None, avoid=None, find_stash='found', avoid_stash='avoid', cfg=None, num_find=1, **kwargs): """ Tick stash "stash" forward (up to "n" times or until "num_find" states are found), looking for condition "find", avoiding condition "avoid". Stores found states into "find_stash' and avoided states into "avoid_stash". The "find" and "avoid" parameters may be any of: - An address to find - A set or list of addresses to find - A function that takes a state and returns whether or not it matches. If an angr CFG is passed in as the "cfg" parameter and "find" is either a number or a list or a set, then any states which cannot possibly reach a success state without going through a failure state will be preemptively avoided. """ num_find += len(self._stashes[find_stash]) if find_stash in self._stashes else 0 tech = self.use_technique(Explorer(find, avoid, find_stash, avoid_stash, cfg, num_find)) try: self.run(stash=stash, n=n, **kwargs) finally: self.remove_technique(tech) return self
[ "def", "explore", "(", "self", ",", "stash", "=", "'active'", ",", "n", "=", "None", ",", "find", "=", "None", ",", "avoid", "=", "None", ",", "find_stash", "=", "'found'", ",", "avoid_stash", "=", "'avoid'", ",", "cfg", "=", "None", ",", "num_find", "=", "1", ",", "*", "*", "kwargs", ")", ":", "num_find", "+=", "len", "(", "self", ".", "_stashes", "[", "find_stash", "]", ")", "if", "find_stash", "in", "self", ".", "_stashes", "else", "0", "tech", "=", "self", ".", "use_technique", "(", "Explorer", "(", "find", ",", "avoid", ",", "find_stash", ",", "avoid_stash", ",", "cfg", ",", "num_find", ")", ")", "try", ":", "self", ".", "run", "(", "stash", "=", "stash", ",", "n", "=", "n", ",", "*", "*", "kwargs", ")", "finally", ":", "self", ".", "remove_technique", "(", "tech", ")", "return", "self" ]
Tick stash "stash" forward (up to "n" times or until "num_find" states are found), looking for condition "find", avoiding condition "avoid". Stores found states into "find_stash' and avoided states into "avoid_stash". The "find" and "avoid" parameters may be any of: - An address to find - A set or list of addresses to find - A function that takes a state and returns whether or not it matches. If an angr CFG is passed in as the "cfg" parameter and "find" is either a number or a list or a set, then any states which cannot possibly reach a success state without going through a failure state will be preemptively avoided.
[ "Tick", "stash", "stash", "forward", "(", "up", "to", "n", "times", "or", "until", "num_find", "states", "are", "found", ")", "looking", "for", "condition", "find", "avoiding", "condition", "avoid", ".", "Stores", "found", "states", "into", "find_stash", "and", "avoided", "states", "into", "avoid_stash", "." ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_common_def.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_common_def.py#L736-L752
def ip_rtm_config_route_static_route_oif_static_route_dest(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") ip = ET.SubElement(config, "ip", xmlns="urn:brocade.com:mgmt:brocade-common-def") rtm_config = ET.SubElement(ip, "rtm-config", xmlns="urn:brocade.com:mgmt:brocade-rtm") route = ET.SubElement(rtm_config, "route") static_route_oif = ET.SubElement(route, "static-route-oif") static_route_oif_type_key = ET.SubElement(static_route_oif, "static-route-oif-type") static_route_oif_type_key.text = kwargs.pop('static_route_oif_type') static_route_oif_name_key = ET.SubElement(static_route_oif, "static-route-oif-name") static_route_oif_name_key.text = kwargs.pop('static_route_oif_name') static_route_dest = ET.SubElement(static_route_oif, "static-route-dest") static_route_dest.text = kwargs.pop('static_route_dest') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "ip_rtm_config_route_static_route_oif_static_route_dest", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "ip", "=", "ET", ".", "SubElement", "(", "config", ",", "\"ip\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-common-def\"", ")", "rtm_config", "=", "ET", ".", "SubElement", "(", "ip", ",", "\"rtm-config\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-rtm\"", ")", "route", "=", "ET", ".", "SubElement", "(", "rtm_config", ",", "\"route\"", ")", "static_route_oif", "=", "ET", ".", "SubElement", "(", "route", ",", "\"static-route-oif\"", ")", "static_route_oif_type_key", "=", "ET", ".", "SubElement", "(", "static_route_oif", ",", "\"static-route-oif-type\"", ")", "static_route_oif_type_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'static_route_oif_type'", ")", "static_route_oif_name_key", "=", "ET", ".", "SubElement", "(", "static_route_oif", ",", "\"static-route-oif-name\"", ")", "static_route_oif_name_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'static_route_oif_name'", ")", "static_route_dest", "=", "ET", ".", "SubElement", "(", "static_route_oif", ",", "\"static-route-dest\"", ")", "static_route_dest", ".", "text", "=", "kwargs", ".", "pop", "(", "'static_route_dest'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
allenai/allennlp
allennlp/data/instance.py
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/data/instance.py#L74-L82
def get_padding_lengths(self) -> Dict[str, Dict[str, int]]: """ Returns a dictionary of padding lengths, keyed by field name. Each ``Field`` returns a mapping from padding keys to actual lengths, and we just key that dictionary by field name. """ lengths = {} for field_name, field in self.fields.items(): lengths[field_name] = field.get_padding_lengths() return lengths
[ "def", "get_padding_lengths", "(", "self", ")", "->", "Dict", "[", "str", ",", "Dict", "[", "str", ",", "int", "]", "]", ":", "lengths", "=", "{", "}", "for", "field_name", ",", "field", "in", "self", ".", "fields", ".", "items", "(", ")", ":", "lengths", "[", "field_name", "]", "=", "field", ".", "get_padding_lengths", "(", ")", "return", "lengths" ]
Returns a dictionary of padding lengths, keyed by field name. Each ``Field`` returns a mapping from padding keys to actual lengths, and we just key that dictionary by field name.
[ "Returns", "a", "dictionary", "of", "padding", "lengths", "keyed", "by", "field", "name", ".", "Each", "Field", "returns", "a", "mapping", "from", "padding", "keys", "to", "actual", "lengths", "and", "we", "just", "key", "that", "dictionary", "by", "field", "name", "." ]
python
train
iopipe/iopipe-python
iopipe/report.py
https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/report.py#L82-L117
def extract_context_data(self): """ Returns the contents of a AWS Lambda context. :returns: A dict of relevant context data. :rtype: dict """ data = {} for k, v in { # camel case names in the report to align with AWS standards "functionName": "function_name", "functionVersion": "function_version", "memoryLimitInMB": "memory_limit_in_mb", "invokedFunctionArn": "invoked_function_arn", "awsRequestId": "aws_request_id", "logGroupName": "log_group_name", "logStreamName": "log_stream_name", }.items(): if hasattr(self.context, v): data[k] = getattr(self.context, v) if ( hasattr(self.context, "invoked_function_arn") and "AWS_SAM_LOCAL" in os.environ ): data["invokedFunctionArn"] = ( "arn:aws:lambda:local:0:function:%s" % data.get("functionName", "unknown") ) if hasattr(self.context, "get_remaining_time_in_millis") and callable( self.context.get_remaining_time_in_millis ): data[ "getRemainingTimeInMillis" ] = self.context.get_remaining_time_in_millis() data["traceId"] = os.getenv("_X_AMZN_TRACE_ID", "") return data
[ "def", "extract_context_data", "(", "self", ")", ":", "data", "=", "{", "}", "for", "k", ",", "v", "in", "{", "# camel case names in the report to align with AWS standards", "\"functionName\"", ":", "\"function_name\"", ",", "\"functionVersion\"", ":", "\"function_version\"", ",", "\"memoryLimitInMB\"", ":", "\"memory_limit_in_mb\"", ",", "\"invokedFunctionArn\"", ":", "\"invoked_function_arn\"", ",", "\"awsRequestId\"", ":", "\"aws_request_id\"", ",", "\"logGroupName\"", ":", "\"log_group_name\"", ",", "\"logStreamName\"", ":", "\"log_stream_name\"", ",", "}", ".", "items", "(", ")", ":", "if", "hasattr", "(", "self", ".", "context", ",", "v", ")", ":", "data", "[", "k", "]", "=", "getattr", "(", "self", ".", "context", ",", "v", ")", "if", "(", "hasattr", "(", "self", ".", "context", ",", "\"invoked_function_arn\"", ")", "and", "\"AWS_SAM_LOCAL\"", "in", "os", ".", "environ", ")", ":", "data", "[", "\"invokedFunctionArn\"", "]", "=", "(", "\"arn:aws:lambda:local:0:function:%s\"", "%", "data", ".", "get", "(", "\"functionName\"", ",", "\"unknown\"", ")", ")", "if", "hasattr", "(", "self", ".", "context", ",", "\"get_remaining_time_in_millis\"", ")", "and", "callable", "(", "self", ".", "context", ".", "get_remaining_time_in_millis", ")", ":", "data", "[", "\"getRemainingTimeInMillis\"", "]", "=", "self", ".", "context", ".", "get_remaining_time_in_millis", "(", ")", "data", "[", "\"traceId\"", "]", "=", "os", ".", "getenv", "(", "\"_X_AMZN_TRACE_ID\"", ",", "\"\"", ")", "return", "data" ]
Returns the contents of a AWS Lambda context. :returns: A dict of relevant context data. :rtype: dict
[ "Returns", "the", "contents", "of", "a", "AWS", "Lambda", "context", "." ]
python
train
benfred/implicit
implicit/datasets/_download.py
https://github.com/benfred/implicit/blob/6b16c50d1d514a814f2e5b8cf2a829ff23dbba63/implicit/datasets/_download.py#L13-L25
def download_file(url, local_filename): """ Simple wrapper around urlretrieve that uses tqdm to display a progress bar of download progress """ local_filename = os.path.abspath(local_filename) path = os.path.dirname(local_filename) if not os.path.isdir(path): os.makedirs(path) with tqdm(unit='B', unit_scale=True) as progress: def report(chunk, chunksize, total): progress.total = total progress.update(chunksize) return urlretrieve(url, local_filename, reporthook=report)
[ "def", "download_file", "(", "url", ",", "local_filename", ")", ":", "local_filename", "=", "os", ".", "path", ".", "abspath", "(", "local_filename", ")", "path", "=", "os", ".", "path", ".", "dirname", "(", "local_filename", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "os", ".", "makedirs", "(", "path", ")", "with", "tqdm", "(", "unit", "=", "'B'", ",", "unit_scale", "=", "True", ")", "as", "progress", ":", "def", "report", "(", "chunk", ",", "chunksize", ",", "total", ")", ":", "progress", ".", "total", "=", "total", "progress", ".", "update", "(", "chunksize", ")", "return", "urlretrieve", "(", "url", ",", "local_filename", ",", "reporthook", "=", "report", ")" ]
Simple wrapper around urlretrieve that uses tqdm to display a progress bar of download progress
[ "Simple", "wrapper", "around", "urlretrieve", "that", "uses", "tqdm", "to", "display", "a", "progress", "bar", "of", "download", "progress" ]
python
train
dpkp/kafka-python
kafka/consumer/fetcher.py
https://github.com/dpkp/kafka-python/blob/f6a8a38937688ea2cc5dc13d3d1039493be5c9b5/kafka/consumer/fetcher.py#L247-L293
def _retrieve_offsets(self, timestamps, timeout_ms=float("inf")): """Fetch offset for each partition passed in ``timestamps`` map. Blocks until offsets are obtained, a non-retriable exception is raised or ``timeout_ms`` passed. Arguments: timestamps: {TopicPartition: int} dict with timestamps to fetch offsets by. -1 for the latest available, -2 for the earliest available. Otherwise timestamp is treated as epoch miliseconds. Returns: {TopicPartition: (int, int)}: Mapping of partition to retrieved offset and timestamp. If offset does not exist for the provided timestamp, that partition will be missing from this mapping. """ if not timestamps: return {} start_time = time.time() remaining_ms = timeout_ms while remaining_ms > 0: future = self._send_offset_requests(timestamps) self._client.poll(future=future, timeout_ms=remaining_ms) if future.succeeded(): return future.value if not future.retriable(): raise future.exception # pylint: disable-msg=raising-bad-type elapsed_ms = (time.time() - start_time) * 1000 remaining_ms = timeout_ms - elapsed_ms if remaining_ms < 0: break if future.exception.invalid_metadata: refresh_future = self._client.cluster.request_update() self._client.poll(future=refresh_future, timeout_ms=remaining_ms) else: time.sleep(self.config['retry_backoff_ms'] / 1000.0) elapsed_ms = (time.time() - start_time) * 1000 remaining_ms = timeout_ms - elapsed_ms raise Errors.KafkaTimeoutError( "Failed to get offsets by timestamps in %s ms" % (timeout_ms,))
[ "def", "_retrieve_offsets", "(", "self", ",", "timestamps", ",", "timeout_ms", "=", "float", "(", "\"inf\"", ")", ")", ":", "if", "not", "timestamps", ":", "return", "{", "}", "start_time", "=", "time", ".", "time", "(", ")", "remaining_ms", "=", "timeout_ms", "while", "remaining_ms", ">", "0", ":", "future", "=", "self", ".", "_send_offset_requests", "(", "timestamps", ")", "self", ".", "_client", ".", "poll", "(", "future", "=", "future", ",", "timeout_ms", "=", "remaining_ms", ")", "if", "future", ".", "succeeded", "(", ")", ":", "return", "future", ".", "value", "if", "not", "future", ".", "retriable", "(", ")", ":", "raise", "future", ".", "exception", "# pylint: disable-msg=raising-bad-type", "elapsed_ms", "=", "(", "time", ".", "time", "(", ")", "-", "start_time", ")", "*", "1000", "remaining_ms", "=", "timeout_ms", "-", "elapsed_ms", "if", "remaining_ms", "<", "0", ":", "break", "if", "future", ".", "exception", ".", "invalid_metadata", ":", "refresh_future", "=", "self", ".", "_client", ".", "cluster", ".", "request_update", "(", ")", "self", ".", "_client", ".", "poll", "(", "future", "=", "refresh_future", ",", "timeout_ms", "=", "remaining_ms", ")", "else", ":", "time", ".", "sleep", "(", "self", ".", "config", "[", "'retry_backoff_ms'", "]", "/", "1000.0", ")", "elapsed_ms", "=", "(", "time", ".", "time", "(", ")", "-", "start_time", ")", "*", "1000", "remaining_ms", "=", "timeout_ms", "-", "elapsed_ms", "raise", "Errors", ".", "KafkaTimeoutError", "(", "\"Failed to get offsets by timestamps in %s ms\"", "%", "(", "timeout_ms", ",", ")", ")" ]
Fetch offset for each partition passed in ``timestamps`` map. Blocks until offsets are obtained, a non-retriable exception is raised or ``timeout_ms`` passed. Arguments: timestamps: {TopicPartition: int} dict with timestamps to fetch offsets by. -1 for the latest available, -2 for the earliest available. Otherwise timestamp is treated as epoch miliseconds. Returns: {TopicPartition: (int, int)}: Mapping of partition to retrieved offset and timestamp. If offset does not exist for the provided timestamp, that partition will be missing from this mapping.
[ "Fetch", "offset", "for", "each", "partition", "passed", "in", "timestamps", "map", "." ]
python
train