repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
COLORFULBOARD/revision
revision/orchestrator.py
https://github.com/COLORFULBOARD/revision/blob/2f22e72cce5b60032a80c002ac45c2ecef0ed987/revision/orchestrator.py#L111-L136
def has_commit(self, client_key=None): """ Return True if client has new commit. :param client_key: The client key :type client_key: str :return: :rtype: boolean """ if client_key is None and self.current_client is None: raise ClientNotExist() if client_key: if not self.clients.has_client(client_key): raise ClientNotExist() client = self.clients.get_client(client_key) return client.has_commit() if self.current_client: client = self.current_client return client.has_commit() return False
[ "def", "has_commit", "(", "self", ",", "client_key", "=", "None", ")", ":", "if", "client_key", "is", "None", "and", "self", ".", "current_client", "is", "None", ":", "raise", "ClientNotExist", "(", ")", "if", "client_key", ":", "if", "not", "self", ".", "clients", ".", "has_client", "(", "client_key", ")", ":", "raise", "ClientNotExist", "(", ")", "client", "=", "self", ".", "clients", ".", "get_client", "(", "client_key", ")", "return", "client", ".", "has_commit", "(", ")", "if", "self", ".", "current_client", ":", "client", "=", "self", ".", "current_client", "return", "client", ".", "has_commit", "(", ")", "return", "False" ]
Return True if client has new commit. :param client_key: The client key :type client_key: str :return: :rtype: boolean
[ "Return", "True", "if", "client", "has", "new", "commit", "." ]
python
train
richardchien/nonebot
nonebot/command/__init__.py
https://github.com/richardchien/nonebot/blob/13ed9e4e87d9824b61592520aabda6d2737c8848/nonebot/command/__init__.py#L408-L412
def finish(self, message: Optional[Message_T] = None, **kwargs) -> None: """Finish the session.""" if message: asyncio.ensure_future(self.send(message, **kwargs)) raise _FinishException
[ "def", "finish", "(", "self", ",", "message", ":", "Optional", "[", "Message_T", "]", "=", "None", ",", "*", "*", "kwargs", ")", "->", "None", ":", "if", "message", ":", "asyncio", ".", "ensure_future", "(", "self", ".", "send", "(", "message", ",", "*", "*", "kwargs", ")", ")", "raise", "_FinishException" ]
Finish the session.
[ "Finish", "the", "session", "." ]
python
train
numenta/htmresearch
projects/sdr_paper/poirazi_neuron_model/run_dim_classification_experiment.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/sdr_paper/poirazi_neuron_model/run_dim_classification_experiment.py#L32-L113
def run_false_positive_experiment_dim( numActive = 128, dim = 500, numSamples = 1000, numDendrites = 500, synapses = 24, numTrials = 10000, seed = 42, nonlinearity = sigmoid_nonlinearity(11.5, 5)): """ Run an experiment to test the false positive rate based on number of synapses per dendrite, dimension and sparsity. Uses two competing neurons, along the P&M model. Based on figure 5B in the original SDR paper. """ numpy.random.seed(seed) fps = [] fns = [] totalUnclassified = 0 for trial in range(numTrials): # data = generate_evenly_distributed_data_sparse(dim = dim, # num_active = numActive, # num_samples = numSamples) # labels = numpy.asarray([1 for i in range(numSamples / 2)] + # [-1 for i in range(numSamples / 2)]) # flipped_labels = labels * -1 negData = generate_evenly_distributed_data_sparse(dim = dim, num_active = numActive, num_samples = numSamples/2) posData = generate_evenly_distributed_data_sparse(dim = dim, num_active = numActive, num_samples = numSamples/2) halfLabels = numpy.asarray([1 for _ in range(numSamples / 2)]) flippedHalfLabels = halfLabels * -1 neuron = Neuron(size =synapses * numDendrites, num_dendrites = numDendrites, dendrite_length = synapses, dim = dim, nonlinearity = nonlinearity) neg_neuron = Neuron(size =synapses * numDendrites, num_dendrites = numDendrites, dendrite_length = synapses, dim = dim, nonlinearity = nonlinearity) neuron.HTM_style_initialize_on_positive_data(posData) neg_neuron.HTM_style_initialize_on_positive_data(negData) # Get error for positively labeled data fp, fn, uc = get_error(posData, halfLabels, [neuron], [neg_neuron]) totalUnclassified += uc fps.append(fp) fns.append(fn) # Get error for negatively labeled data fp, fn, uc = get_error(negData, flippedHalfLabels, [neuron], [neg_neuron]) totalUnclassified += uc fps.append(fp) fns.append(fn) print "Error with n = {} : {} FP, {} FN, {} unclassified".format( dim, sum(fps), sum(fns), totalUnclassified) result = { "dim": dim, "totalFP": sum(fps), "totalFN": sum(fns), "total mistakes": sum(fns + fps) + totalUnclassified, "error": float(sum(fns + fps) + totalUnclassified) / (numTrials * numSamples), "totalSamples": numTrials * numSamples, "a": numActive, "num_dendrites": numDendrites, "totalUnclassified": totalUnclassified, "synapses": 24, "seed": seed, } return result
[ "def", "run_false_positive_experiment_dim", "(", "numActive", "=", "128", ",", "dim", "=", "500", ",", "numSamples", "=", "1000", ",", "numDendrites", "=", "500", ",", "synapses", "=", "24", ",", "numTrials", "=", "10000", ",", "seed", "=", "42", ",", "nonlinearity", "=", "sigmoid_nonlinearity", "(", "11.5", ",", "5", ")", ")", ":", "numpy", ".", "random", ".", "seed", "(", "seed", ")", "fps", "=", "[", "]", "fns", "=", "[", "]", "totalUnclassified", "=", "0", "for", "trial", "in", "range", "(", "numTrials", ")", ":", "# data = generate_evenly_distributed_data_sparse(dim = dim,", "# num_active = numActive,", "# num_samples = numSamples)", "# labels = numpy.asarray([1 for i in range(numSamples / 2)] +", "# [-1 for i in range(numSamples / 2)])", "# flipped_labels = labels * -1", "negData", "=", "generate_evenly_distributed_data_sparse", "(", "dim", "=", "dim", ",", "num_active", "=", "numActive", ",", "num_samples", "=", "numSamples", "/", "2", ")", "posData", "=", "generate_evenly_distributed_data_sparse", "(", "dim", "=", "dim", ",", "num_active", "=", "numActive", ",", "num_samples", "=", "numSamples", "/", "2", ")", "halfLabels", "=", "numpy", ".", "asarray", "(", "[", "1", "for", "_", "in", "range", "(", "numSamples", "/", "2", ")", "]", ")", "flippedHalfLabels", "=", "halfLabels", "*", "-", "1", "neuron", "=", "Neuron", "(", "size", "=", "synapses", "*", "numDendrites", ",", "num_dendrites", "=", "numDendrites", ",", "dendrite_length", "=", "synapses", ",", "dim", "=", "dim", ",", "nonlinearity", "=", "nonlinearity", ")", "neg_neuron", "=", "Neuron", "(", "size", "=", "synapses", "*", "numDendrites", ",", "num_dendrites", "=", "numDendrites", ",", "dendrite_length", "=", "synapses", ",", "dim", "=", "dim", ",", "nonlinearity", "=", "nonlinearity", ")", "neuron", ".", "HTM_style_initialize_on_positive_data", "(", "posData", ")", "neg_neuron", ".", "HTM_style_initialize_on_positive_data", "(", "negData", ")", "# Get error for positively labeled data", "fp", ",", "fn", ",", "uc", "=", "get_error", "(", "posData", ",", "halfLabels", ",", "[", "neuron", "]", ",", "[", "neg_neuron", "]", ")", "totalUnclassified", "+=", "uc", "fps", ".", "append", "(", "fp", ")", "fns", ".", "append", "(", "fn", ")", "# Get error for negatively labeled data", "fp", ",", "fn", ",", "uc", "=", "get_error", "(", "negData", ",", "flippedHalfLabels", ",", "[", "neuron", "]", ",", "[", "neg_neuron", "]", ")", "totalUnclassified", "+=", "uc", "fps", ".", "append", "(", "fp", ")", "fns", ".", "append", "(", "fn", ")", "print", "\"Error with n = {} : {} FP, {} FN, {} unclassified\"", ".", "format", "(", "dim", ",", "sum", "(", "fps", ")", ",", "sum", "(", "fns", ")", ",", "totalUnclassified", ")", "result", "=", "{", "\"dim\"", ":", "dim", ",", "\"totalFP\"", ":", "sum", "(", "fps", ")", ",", "\"totalFN\"", ":", "sum", "(", "fns", ")", ",", "\"total mistakes\"", ":", "sum", "(", "fns", "+", "fps", ")", "+", "totalUnclassified", ",", "\"error\"", ":", "float", "(", "sum", "(", "fns", "+", "fps", ")", "+", "totalUnclassified", ")", "/", "(", "numTrials", "*", "numSamples", ")", ",", "\"totalSamples\"", ":", "numTrials", "*", "numSamples", ",", "\"a\"", ":", "numActive", ",", "\"num_dendrites\"", ":", "numDendrites", ",", "\"totalUnclassified\"", ":", "totalUnclassified", ",", "\"synapses\"", ":", "24", ",", "\"seed\"", ":", "seed", ",", "}", "return", "result" ]
Run an experiment to test the false positive rate based on number of synapses per dendrite, dimension and sparsity. Uses two competing neurons, along the P&M model. Based on figure 5B in the original SDR paper.
[ "Run", "an", "experiment", "to", "test", "the", "false", "positive", "rate", "based", "on", "number", "of", "synapses", "per", "dendrite", "dimension", "and", "sparsity", ".", "Uses", "two", "competing", "neurons", "along", "the", "P&M", "model", "." ]
python
train
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_lldp_ext.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_lldp_ext.py#L320-L336
def get_lldp_neighbor_detail_output_lldp_neighbor_detail_lldp_pdu_transmitted(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_lldp_neighbor_detail = ET.Element("get_lldp_neighbor_detail") config = get_lldp_neighbor_detail output = ET.SubElement(get_lldp_neighbor_detail, "output") lldp_neighbor_detail = ET.SubElement(output, "lldp-neighbor-detail") local_interface_name_key = ET.SubElement(lldp_neighbor_detail, "local-interface-name") local_interface_name_key.text = kwargs.pop('local_interface_name') remote_interface_name_key = ET.SubElement(lldp_neighbor_detail, "remote-interface-name") remote_interface_name_key.text = kwargs.pop('remote_interface_name') lldp_pdu_transmitted = ET.SubElement(lldp_neighbor_detail, "lldp-pdu-transmitted") lldp_pdu_transmitted.text = kwargs.pop('lldp_pdu_transmitted') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_lldp_neighbor_detail_output_lldp_neighbor_detail_lldp_pdu_transmitted", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_lldp_neighbor_detail", "=", "ET", ".", "Element", "(", "\"get_lldp_neighbor_detail\"", ")", "config", "=", "get_lldp_neighbor_detail", "output", "=", "ET", ".", "SubElement", "(", "get_lldp_neighbor_detail", ",", "\"output\"", ")", "lldp_neighbor_detail", "=", "ET", ".", "SubElement", "(", "output", ",", "\"lldp-neighbor-detail\"", ")", "local_interface_name_key", "=", "ET", ".", "SubElement", "(", "lldp_neighbor_detail", ",", "\"local-interface-name\"", ")", "local_interface_name_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'local_interface_name'", ")", "remote_interface_name_key", "=", "ET", ".", "SubElement", "(", "lldp_neighbor_detail", ",", "\"remote-interface-name\"", ")", "remote_interface_name_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'remote_interface_name'", ")", "lldp_pdu_transmitted", "=", "ET", ".", "SubElement", "(", "lldp_neighbor_detail", ",", "\"lldp-pdu-transmitted\"", ")", "lldp_pdu_transmitted", ".", "text", "=", "kwargs", ".", "pop", "(", "'lldp_pdu_transmitted'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
jbittel/django-mama-cas
mama_cas/services/__init__.py
https://github.com/jbittel/django-mama-cas/blob/03935d97442b46d8127ab9e1cd8deb96953fe156/mama_cas/services/__init__.py#L8-L18
def _get_backends(): """Retrieve the list of configured service backends.""" backends = [] backend_paths = getattr( settings, 'MAMA_CAS_SERVICE_BACKENDS', ['mama_cas.services.backends.SettingsBackend'] ) for backend_path in backend_paths: backend = import_string(backend_path)() backends.append(backend) return backends
[ "def", "_get_backends", "(", ")", ":", "backends", "=", "[", "]", "backend_paths", "=", "getattr", "(", "settings", ",", "'MAMA_CAS_SERVICE_BACKENDS'", ",", "[", "'mama_cas.services.backends.SettingsBackend'", "]", ")", "for", "backend_path", "in", "backend_paths", ":", "backend", "=", "import_string", "(", "backend_path", ")", "(", ")", "backends", ".", "append", "(", "backend", ")", "return", "backends" ]
Retrieve the list of configured service backends.
[ "Retrieve", "the", "list", "of", "configured", "service", "backends", "." ]
python
train
yamcs/yamcs-python
yamcs-client/yamcs/tmtc/client.py
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/tmtc/client.py#L219-L232
def get_command_history(self, issued_command): """ Gets locally cached CommandHistory for the specified command. :param .IssuedCommand issued_command: object representing a previously issued command. :rtype: .CommandHistory """ #pylint: disable=protected-access entry = issued_command._proto.commandQueueEntry key = self._cache_key(entry.cmdId) if key in self._cache: return self._cache[key] return None
[ "def", "get_command_history", "(", "self", ",", "issued_command", ")", ":", "#pylint: disable=protected-access", "entry", "=", "issued_command", ".", "_proto", ".", "commandQueueEntry", "key", "=", "self", ".", "_cache_key", "(", "entry", ".", "cmdId", ")", "if", "key", "in", "self", ".", "_cache", ":", "return", "self", ".", "_cache", "[", "key", "]", "return", "None" ]
Gets locally cached CommandHistory for the specified command. :param .IssuedCommand issued_command: object representing a previously issued command. :rtype: .CommandHistory
[ "Gets", "locally", "cached", "CommandHistory", "for", "the", "specified", "command", "." ]
python
train
readbeyond/aeneas
aeneas/container.py
https://github.com/readbeyond/aeneas/blob/9d95535ad63eef4a98530cfdff033b8c35315ee1/aeneas/container.py#L322-L341
def compress(self, input_path): """ Compress the contents of the given directory. :param string input_path: path of the input directory :raises: TypeError: if the container path has not been set :raises: ValueError: if ``input_path`` is not an existing directory :raises: OSError: if an error occurred compressing the given container (e.g., empty file, damaged file, etc.) """ self.log([u"Compressing '%s' into this container", input_path]) if self.file_path is None: self.log_exc(u"The container path has not been set", None, True, TypeError) if self.actual_container is None: self.log_exc(u"The actual container object has not been set", None, True, TypeError) if not gf.directory_exists(input_path): self.log_exc(u"The input path is not an existing directory", None, True, ValueError) gf.ensure_parent_directory(input_path) self.actual_container.compress(input_path)
[ "def", "compress", "(", "self", ",", "input_path", ")", ":", "self", ".", "log", "(", "[", "u\"Compressing '%s' into this container\"", ",", "input_path", "]", ")", "if", "self", ".", "file_path", "is", "None", ":", "self", ".", "log_exc", "(", "u\"The container path has not been set\"", ",", "None", ",", "True", ",", "TypeError", ")", "if", "self", ".", "actual_container", "is", "None", ":", "self", ".", "log_exc", "(", "u\"The actual container object has not been set\"", ",", "None", ",", "True", ",", "TypeError", ")", "if", "not", "gf", ".", "directory_exists", "(", "input_path", ")", ":", "self", ".", "log_exc", "(", "u\"The input path is not an existing directory\"", ",", "None", ",", "True", ",", "ValueError", ")", "gf", ".", "ensure_parent_directory", "(", "input_path", ")", "self", ".", "actual_container", ".", "compress", "(", "input_path", ")" ]
Compress the contents of the given directory. :param string input_path: path of the input directory :raises: TypeError: if the container path has not been set :raises: ValueError: if ``input_path`` is not an existing directory :raises: OSError: if an error occurred compressing the given container (e.g., empty file, damaged file, etc.)
[ "Compress", "the", "contents", "of", "the", "given", "directory", "." ]
python
train
sys-git/certifiable
certifiable/utils.py
https://github.com/sys-git/certifiable/blob/a3c33c0d4f3ac2c53be9eded3fae633fa5f697f8/certifiable/utils.py#L123-L146
def certify_parameter(certifier, name, value, kwargs=None): """ Internal certifier for kwargs passed to Certifiable public methods. :param callable certifier: The certifier to use :param str name: The name of the kwargs :param object value: The value of the kwarg. :param bool required: Is the param required. Default=False. :raises CertifierParamError: A parameter failed internal certification. """ try: certifier(value, **kwargs or {}) except CertifierError as err: six.raise_from( CertifierParamError( name, value, ), err)
[ "def", "certify_parameter", "(", "certifier", ",", "name", ",", "value", ",", "kwargs", "=", "None", ")", ":", "try", ":", "certifier", "(", "value", ",", "*", "*", "kwargs", "or", "{", "}", ")", "except", "CertifierError", "as", "err", ":", "six", ".", "raise_from", "(", "CertifierParamError", "(", "name", ",", "value", ",", ")", ",", "err", ")" ]
Internal certifier for kwargs passed to Certifiable public methods. :param callable certifier: The certifier to use :param str name: The name of the kwargs :param object value: The value of the kwarg. :param bool required: Is the param required. Default=False. :raises CertifierParamError: A parameter failed internal certification.
[ "Internal", "certifier", "for", "kwargs", "passed", "to", "Certifiable", "public", "methods", "." ]
python
train
inasafe/inasafe
safe/gui/tools/multi_exposure_dialog.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/multi_exposure_dialog.py#L360-L468
def _create_exposure_combos(self): """Create one combobox for each exposure and insert them in the UI.""" # Map registry may be invalid if QGIS is shutting down project = QgsProject.instance() canvas_layers = self.iface.mapCanvas().layers() # MapLayers returns a QMap<QString id, QgsMapLayer layer> layers = list(project.mapLayers().values()) # Sort by name for tests layers.sort(key=lambda x: x.name()) show_only_visible_layers = setting( 'visibleLayersOnlyFlag', expected_type=bool) # For issue #618 if len(layers) == 0: # self.message_viewer.setHtml(getting_started_message()) return for one_exposure in exposure_all: label = QLabel(one_exposure['name']) combo = QComboBox() combo.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed) combo.addItem(tr('Do not use'), None) self.form_layout.addRow(label, combo) self.combos_exposures[one_exposure['key']] = combo for layer in layers: if (show_only_visible_layers and (layer not in canvas_layers)): continue try: layer_purpose = self.keyword_io.read_keywords( layer, 'layer_purpose') keyword_version = str(self.keyword_io.read_keywords( layer, inasafe_keyword_version_key)) if not is_keyword_version_supported(keyword_version): continue except BaseException: # pylint: disable=W0702 # continue ignoring this layer continue # See if there is a title for this layer, if not, # fallback to the layer's filename # noinspection PyBroadException try: title = self.keyword_io.read_keywords(layer, 'title') except (NoKeywordsFoundError, KeywordNotFoundError, MetadataReadError): # Skip if there are no keywords at all, or missing keyword continue except BaseException: # pylint: disable=W0702 pass else: # Lookup internationalised title if available title = self.tr(title) # Register title with layer set_layer_from_title = setting( 'set_layer_from_title_flag', True, bool) if title and set_layer_from_title: if qgis_version() >= 21800: layer.setName(title) else: # QGIS 2.14 layer.setLayerName(title) source = layer.id() icon = layer_icon(layer) if layer_purpose == layer_purpose_hazard['key']: add_ordered_combo_item( self.cbx_hazard, title, source, icon=icon) elif layer_purpose == layer_purpose_aggregation['key']: if self.use_selected_only: count_selected = layer.selectedFeatureCount() if count_selected > 0: add_ordered_combo_item( self.cbx_aggregation, title, source, count_selected, icon=icon ) else: add_ordered_combo_item( self.cbx_aggregation, title, source, None, icon) else: add_ordered_combo_item( self.cbx_aggregation, title, source, None, icon) elif layer_purpose == layer_purpose_exposure['key']: # fetching the exposure try: exposure_type = self.keyword_io.read_keywords( layer, layer_purpose_exposure['key']) except BaseException: # pylint: disable=W0702 # continue ignoring this layer continue for key, combo in list(self.combos_exposures.items()): if key == exposure_type: add_ordered_combo_item( combo, title, source, icon=icon) self.cbx_aggregation.addItem(entire_area_item_aggregation, None) for combo in list(self.combos_exposures.values()): combo.currentIndexChanged.connect(self.validate_impact_function)
[ "def", "_create_exposure_combos", "(", "self", ")", ":", "# Map registry may be invalid if QGIS is shutting down", "project", "=", "QgsProject", ".", "instance", "(", ")", "canvas_layers", "=", "self", ".", "iface", ".", "mapCanvas", "(", ")", ".", "layers", "(", ")", "# MapLayers returns a QMap<QString id, QgsMapLayer layer>", "layers", "=", "list", "(", "project", ".", "mapLayers", "(", ")", ".", "values", "(", ")", ")", "# Sort by name for tests", "layers", ".", "sort", "(", "key", "=", "lambda", "x", ":", "x", ".", "name", "(", ")", ")", "show_only_visible_layers", "=", "setting", "(", "'visibleLayersOnlyFlag'", ",", "expected_type", "=", "bool", ")", "# For issue #618", "if", "len", "(", "layers", ")", "==", "0", ":", "# self.message_viewer.setHtml(getting_started_message())", "return", "for", "one_exposure", "in", "exposure_all", ":", "label", "=", "QLabel", "(", "one_exposure", "[", "'name'", "]", ")", "combo", "=", "QComboBox", "(", ")", "combo", ".", "setSizePolicy", "(", "QSizePolicy", ".", "Expanding", ",", "QSizePolicy", ".", "Fixed", ")", "combo", ".", "addItem", "(", "tr", "(", "'Do not use'", ")", ",", "None", ")", "self", ".", "form_layout", ".", "addRow", "(", "label", ",", "combo", ")", "self", ".", "combos_exposures", "[", "one_exposure", "[", "'key'", "]", "]", "=", "combo", "for", "layer", "in", "layers", ":", "if", "(", "show_only_visible_layers", "and", "(", "layer", "not", "in", "canvas_layers", ")", ")", ":", "continue", "try", ":", "layer_purpose", "=", "self", ".", "keyword_io", ".", "read_keywords", "(", "layer", ",", "'layer_purpose'", ")", "keyword_version", "=", "str", "(", "self", ".", "keyword_io", ".", "read_keywords", "(", "layer", ",", "inasafe_keyword_version_key", ")", ")", "if", "not", "is_keyword_version_supported", "(", "keyword_version", ")", ":", "continue", "except", "BaseException", ":", "# pylint: disable=W0702", "# continue ignoring this layer", "continue", "# See if there is a title for this layer, if not,", "# fallback to the layer's filename", "# noinspection PyBroadException", "try", ":", "title", "=", "self", ".", "keyword_io", ".", "read_keywords", "(", "layer", ",", "'title'", ")", "except", "(", "NoKeywordsFoundError", ",", "KeywordNotFoundError", ",", "MetadataReadError", ")", ":", "# Skip if there are no keywords at all, or missing keyword", "continue", "except", "BaseException", ":", "# pylint: disable=W0702", "pass", "else", ":", "# Lookup internationalised title if available", "title", "=", "self", ".", "tr", "(", "title", ")", "# Register title with layer", "set_layer_from_title", "=", "setting", "(", "'set_layer_from_title_flag'", ",", "True", ",", "bool", ")", "if", "title", "and", "set_layer_from_title", ":", "if", "qgis_version", "(", ")", ">=", "21800", ":", "layer", ".", "setName", "(", "title", ")", "else", ":", "# QGIS 2.14", "layer", ".", "setLayerName", "(", "title", ")", "source", "=", "layer", ".", "id", "(", ")", "icon", "=", "layer_icon", "(", "layer", ")", "if", "layer_purpose", "==", "layer_purpose_hazard", "[", "'key'", "]", ":", "add_ordered_combo_item", "(", "self", ".", "cbx_hazard", ",", "title", ",", "source", ",", "icon", "=", "icon", ")", "elif", "layer_purpose", "==", "layer_purpose_aggregation", "[", "'key'", "]", ":", "if", "self", ".", "use_selected_only", ":", "count_selected", "=", "layer", ".", "selectedFeatureCount", "(", ")", "if", "count_selected", ">", "0", ":", "add_ordered_combo_item", "(", "self", ".", "cbx_aggregation", ",", "title", ",", "source", ",", "count_selected", ",", "icon", "=", "icon", ")", "else", ":", "add_ordered_combo_item", "(", "self", ".", "cbx_aggregation", ",", "title", ",", "source", ",", "None", ",", "icon", ")", "else", ":", "add_ordered_combo_item", "(", "self", ".", "cbx_aggregation", ",", "title", ",", "source", ",", "None", ",", "icon", ")", "elif", "layer_purpose", "==", "layer_purpose_exposure", "[", "'key'", "]", ":", "# fetching the exposure", "try", ":", "exposure_type", "=", "self", ".", "keyword_io", ".", "read_keywords", "(", "layer", ",", "layer_purpose_exposure", "[", "'key'", "]", ")", "except", "BaseException", ":", "# pylint: disable=W0702", "# continue ignoring this layer", "continue", "for", "key", ",", "combo", "in", "list", "(", "self", ".", "combos_exposures", ".", "items", "(", ")", ")", ":", "if", "key", "==", "exposure_type", ":", "add_ordered_combo_item", "(", "combo", ",", "title", ",", "source", ",", "icon", "=", "icon", ")", "self", ".", "cbx_aggregation", ".", "addItem", "(", "entire_area_item_aggregation", ",", "None", ")", "for", "combo", "in", "list", "(", "self", ".", "combos_exposures", ".", "values", "(", ")", ")", ":", "combo", ".", "currentIndexChanged", ".", "connect", "(", "self", ".", "validate_impact_function", ")" ]
Create one combobox for each exposure and insert them in the UI.
[ "Create", "one", "combobox", "for", "each", "exposure", "and", "insert", "them", "in", "the", "UI", "." ]
python
train
Othernet-Project/bottle-fdsend
setup.py
https://github.com/Othernet-Project/bottle-fdsend/blob/5ff27e605e8cf878e24c71c1446dcf5c8caf4898/setup.py#L12-L21
def read(fname): """ Return content of specified file """ path = os.path.join(SCRIPTDIR, fname) if PY3: f = open(path, 'r', encoding='utf8') else: f = open(path, 'r') content = f.read() f.close() return content
[ "def", "read", "(", "fname", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "SCRIPTDIR", ",", "fname", ")", "if", "PY3", ":", "f", "=", "open", "(", "path", ",", "'r'", ",", "encoding", "=", "'utf8'", ")", "else", ":", "f", "=", "open", "(", "path", ",", "'r'", ")", "content", "=", "f", ".", "read", "(", ")", "f", ".", "close", "(", ")", "return", "content" ]
Return content of specified file
[ "Return", "content", "of", "specified", "file" ]
python
train
AutomatedTester/Bugsy
bugsy/search.py
https://github.com/AutomatedTester/Bugsy/blob/ac14df84e744a148e81aeaae20a144bc5f3cebf1/bugsy/search.py#L48-L57
def component(self, *components): r""" When search() is called it will limit results to items in a component. :param component: items passed in will be turned into a list :returns: :class:`Search` """ for component in components: self._component.append(component) return self
[ "def", "component", "(", "self", ",", "*", "components", ")", ":", "for", "component", "in", "components", ":", "self", ".", "_component", ".", "append", "(", "component", ")", "return", "self" ]
r""" When search() is called it will limit results to items in a component. :param component: items passed in will be turned into a list :returns: :class:`Search`
[ "r", "When", "search", "()", "is", "called", "it", "will", "limit", "results", "to", "items", "in", "a", "component", "." ]
python
train
stuaxo/vext
vext/env/__init__.py
https://github.com/stuaxo/vext/blob/fa98a21ecfbbc1c3d1b84085d69ec42defdd2f69/vext/env/__init__.py#L49-L81
def in_venv(): """ :return: True if in running from a virtualenv Has to detect the case where the python binary is run directly, so VIRTUAL_ENV may not be set """ global _in_venv if _in_venv is not None: return _in_venv if not (os.path.isfile(ORIG_PREFIX_TXT) or os.path.isfile(PY_VENV_CFG)): logger.debug("in_venv no orig_prefix_txt [%s]", ORIG_PREFIX_TXT) logger.debug("in_venv no py_venv_cfg [%s]", PY_VENV_CFG) # TODO - check this is actually valid ! _in_venv = False return _in_venv if 'VIRTUAL_ENV' in os.environ: logger.debug("in_venv VIRTUAL_ENV set.") _in_venv = True else: # Find first python in path ... if its not this one, # ...we are in a different python python = basename(sys.executable) for p in os.environ['PATH'].split(os.pathsep): py_path = join(p, python) if isfile(py_path): logger.debug("in_venv py_at [%s] return: %s", (py_path, sys.executable != py_path)) _in_venv = sys.executable != py_path break return _in_venv
[ "def", "in_venv", "(", ")", ":", "global", "_in_venv", "if", "_in_venv", "is", "not", "None", ":", "return", "_in_venv", "if", "not", "(", "os", ".", "path", ".", "isfile", "(", "ORIG_PREFIX_TXT", ")", "or", "os", ".", "path", ".", "isfile", "(", "PY_VENV_CFG", ")", ")", ":", "logger", ".", "debug", "(", "\"in_venv no orig_prefix_txt [%s]\"", ",", "ORIG_PREFIX_TXT", ")", "logger", ".", "debug", "(", "\"in_venv no py_venv_cfg [%s]\"", ",", "PY_VENV_CFG", ")", "# TODO - check this is actually valid !", "_in_venv", "=", "False", "return", "_in_venv", "if", "'VIRTUAL_ENV'", "in", "os", ".", "environ", ":", "logger", ".", "debug", "(", "\"in_venv VIRTUAL_ENV set.\"", ")", "_in_venv", "=", "True", "else", ":", "# Find first python in path ... if its not this one,", "# ...we are in a different python", "python", "=", "basename", "(", "sys", ".", "executable", ")", "for", "p", "in", "os", ".", "environ", "[", "'PATH'", "]", ".", "split", "(", "os", ".", "pathsep", ")", ":", "py_path", "=", "join", "(", "p", ",", "python", ")", "if", "isfile", "(", "py_path", ")", ":", "logger", ".", "debug", "(", "\"in_venv py_at [%s] return: %s\"", ",", "(", "py_path", ",", "sys", ".", "executable", "!=", "py_path", ")", ")", "_in_venv", "=", "sys", ".", "executable", "!=", "py_path", "break", "return", "_in_venv" ]
:return: True if in running from a virtualenv Has to detect the case where the python binary is run directly, so VIRTUAL_ENV may not be set
[ ":", "return", ":", "True", "if", "in", "running", "from", "a", "virtualenv" ]
python
train
goshuirc/irc
girc/events.py
https://github.com/goshuirc/irc/blob/d6a5e3e04d337566c009b087f108cd76f9e122cc/girc/events.py#L186-L386
def message_to_event(direction, message): """Prepare an ``RFC1459Message`` for event dispatch. We do this because we have to handle special things as well, such as CTCP and deconstructing verbs properly. """ server = message.server # change numerics into nice names if message.verb in numerics: message.verb = numerics[message.verb] verb = message.verb.lower() # modify public/private verbs if verb == 'privmsg': if server.is_channel(message.params[0]): verb = 'pubmsg' if verb == 'notice': verb = 'privnotice' if server.is_channel(message.params[0]): verb = 'pubnotice' elif verb == 'mode': verb = 'umode' if server.is_channel(message.params[0]): verb = 'cmode' # this is the same as ircreactor does info = message.__dict__ info['direction'] = direction info['verb'] = verb if 'time' in info['tags']: info['server_time'] = time.strptime(info['tags']['time'], '%Y-%m-%dT%H:%M:%S.%fZ') infos = [[verb, info], ] # handle shitty ctcp if verb in ('privmsg', 'pubmsg', 'privnotice', 'pubnotice'): infos = ctcp_unpack_message(info) # work on each info object separately i = -1 while i < (len(infos) - 1): i += 1 name = infos[i][NAME_ATTR] # standard message attributes for attr, param_map in _verb_param_map.items(): # escaping escaped = False if attr.startswith('escaped_'): attr = attr.lstrip('escaped_') escaped = True for param_number, verbs in param_map.items(): if len(infos[i][INFO_ATTR]['params']) > param_number and name in verbs: value = infos[i][INFO_ATTR]['params'][param_number] if escaped: value = escape(value) infos[i][INFO_ATTR][attr] = value # custom processing if name == 'welcome': # for servers where a low nicklen makes them silently truncate our nick server.nick = server.istring(infos[i][INFO_ATTR]['nick']) # custom message attributes if name == 'ctcp': if infos[i][INFO_ATTR]['ctcp_verb'] == 'action': info = dict(infos[i][INFO_ATTR]) info['message'] = info['ctcp_text'] if server.is_channel(info['target']): name = 'pubaction' info['channel'] = info['target'] else: name = 'privaction' infos.append([name, info]) if name == 'umode' and len(infos[i][INFO_ATTR]['params']) > 1: modestring = infos[i][INFO_ATTR]['params'][1:] modes = parse_modes(modestring) infos[i][INFO_ATTR]['modestring'] = ' '.join(modestring).strip() infos[i][INFO_ATTR]['modes'] = modes if name == 'cmode' and len(infos[i][INFO_ATTR]['params']) > 1: modestring = infos[i][INFO_ATTR]['params'][1:] chanmodes = server.features.get('chanmodes') prefixes = list(server.features.get('prefix').keys()) modes = parse_modes(modestring, chanmodes, prefixes) infos[i][INFO_ATTR]['modestring'] = ' '.join(modestring).strip() infos[i][INFO_ATTR]['modes'] = modes if name == 'cmodeis': if len(infos[i][INFO_ATTR]['params']) > 2: modestring = infos[i][INFO_ATTR]['params'][2:] chanmodes = server.features.get('chanmodes') modes = parse_modes(modestring, chanmodes) infos[i][INFO_ATTR]['modestring'] = ' '.join(modestring).strip() infos[i][INFO_ATTR]['modes'] = modes else: infos[i][INFO_ATTR]['modestring'] = '' infos[i][INFO_ATTR]['modes'] = [] if name == 'namreply': channel_name = infos[i][INFO_ATTR]['params'][2] server.info.create_channel(channel_name) channel = server.info.channels.get(channel_name) nice_names = [] channel_prefixes = {} if len(infos[i][INFO_ATTR]['params']) > 3: raw_names = infos[i][INFO_ATTR]['params'][3].split(' ') else: raw_names = [] for name in raw_names: # InspIRCd sends us an empty last param because they are cool if not len(name): continue prefixes = '' while name[0] in server.features.available['prefix'].values(): prefixes += name[0] name = name[1:] nick = NickMask(name).nick server.info.create_user(nick) nice_names.append(name) server.info.create_user(name) user = server.info.users.get(nick) channel_prefixes[user] = prefixes channel.add_user(nick, prefixes=prefixes) infos[i][INFO_ATTR]['users'] = ','.join(nice_names) infos[i][INFO_ATTR]['prefixes'] = channel_prefixes # source / target mapping for attr in ('source', 'target', 'channel'): if attr in infos[i][INFO_ATTR] and infos[i][INFO_ATTR][attr]: source = infos[i][INFO_ATTR][attr] if server.is_channel(source): server.info.create_channel(source) infos[i][INFO_ATTR][attr] = server.info.channels.get(source) elif '.' in source and server.is_server(source): server.info.create_server(source) infos[i][INFO_ATTR][attr] = server.info.servers.get(source) elif server.is_nick(source): server.info.create_user(source) infos[i][INFO_ATTR][attr] = server.info.users.get(NickMask(source).nick) else: # we assume this is a user with messed up characters server.info.create_user(source) infos[i][INFO_ATTR][attr] = server.info.users.get(NickMask(source).nick) if 'channels' in infos[i][INFO_ATTR] and infos[i][INFO_ATTR]['channels']: channels = [] for chan in infos[i][INFO_ATTR]['channels'].split(','): server.info.create_channel(chan) channels.append(server.info.channels.get(chan)) infos[i][INFO_ATTR]['channels'] = channels if 'users' in infos[i][INFO_ATTR] and infos[i][INFO_ATTR]['users']: users = [] for user in infos[i][INFO_ATTR]['users'].split(','): server.info.create_user(user) users.append(server.info.users.get(NickMask(user).nick)) infos[i][INFO_ATTR]['users'] = users # custom from_to attribute for ease in bots verb = infos[i][INFO_ATTR]['verb'] dir = infos[i][INFO_ATTR]['direction'] source = infos[i][INFO_ATTR].get('source') target = infos[i][INFO_ATTR].get('target') if verb in ['pubmsg', 'pubnotice', 'pubaction']: infos[i][INFO_ATTR]['from_to'] = target elif verb in ['privmsg', 'privnotice', 'privaction']: if dir == 'out': infos[i][INFO_ATTR]['from_to'] = target elif dir == 'in': if 'echo-message' in server.capabilities.enabled: infos[i][INFO_ATTR]['from_to'] = target else: infos[i][INFO_ATTR]['from_to'] = source if 'from_to' in infos[i][INFO_ATTR] and infos[i][INFO_ATTR]['from_to'] is None: del infos[i][INFO_ATTR]['from_to'] # convenience function so unnecessary messages can get ignored easily infos[i][INFO_ATTR]['will_be_echod'] = False if verb in ['pubmsg', 'pubnotice', 'privmsg', 'privnotice']: if dir == 'out' and 'echo-message' in server.capabilities.enabled: infos[i][INFO_ATTR]['will_be_echod'] = True if 'from_to' in infos[i][INFO_ATTR] and infos[i][INFO_ATTR]['from_to'].is_server: del infos[i][INFO_ATTR]['from_to'] return infos
[ "def", "message_to_event", "(", "direction", ",", "message", ")", ":", "server", "=", "message", ".", "server", "# change numerics into nice names", "if", "message", ".", "verb", "in", "numerics", ":", "message", ".", "verb", "=", "numerics", "[", "message", ".", "verb", "]", "verb", "=", "message", ".", "verb", ".", "lower", "(", ")", "# modify public/private verbs", "if", "verb", "==", "'privmsg'", ":", "if", "server", ".", "is_channel", "(", "message", ".", "params", "[", "0", "]", ")", ":", "verb", "=", "'pubmsg'", "if", "verb", "==", "'notice'", ":", "verb", "=", "'privnotice'", "if", "server", ".", "is_channel", "(", "message", ".", "params", "[", "0", "]", ")", ":", "verb", "=", "'pubnotice'", "elif", "verb", "==", "'mode'", ":", "verb", "=", "'umode'", "if", "server", ".", "is_channel", "(", "message", ".", "params", "[", "0", "]", ")", ":", "verb", "=", "'cmode'", "# this is the same as ircreactor does", "info", "=", "message", ".", "__dict__", "info", "[", "'direction'", "]", "=", "direction", "info", "[", "'verb'", "]", "=", "verb", "if", "'time'", "in", "info", "[", "'tags'", "]", ":", "info", "[", "'server_time'", "]", "=", "time", ".", "strptime", "(", "info", "[", "'tags'", "]", "[", "'time'", "]", ",", "'%Y-%m-%dT%H:%M:%S.%fZ'", ")", "infos", "=", "[", "[", "verb", ",", "info", "]", ",", "]", "# handle shitty ctcp", "if", "verb", "in", "(", "'privmsg'", ",", "'pubmsg'", ",", "'privnotice'", ",", "'pubnotice'", ")", ":", "infos", "=", "ctcp_unpack_message", "(", "info", ")", "# work on each info object separately", "i", "=", "-", "1", "while", "i", "<", "(", "len", "(", "infos", ")", "-", "1", ")", ":", "i", "+=", "1", "name", "=", "infos", "[", "i", "]", "[", "NAME_ATTR", "]", "# standard message attributes", "for", "attr", ",", "param_map", "in", "_verb_param_map", ".", "items", "(", ")", ":", "# escaping", "escaped", "=", "False", "if", "attr", ".", "startswith", "(", "'escaped_'", ")", ":", "attr", "=", "attr", ".", "lstrip", "(", "'escaped_'", ")", "escaped", "=", "True", "for", "param_number", ",", "verbs", "in", "param_map", ".", "items", "(", ")", ":", "if", "len", "(", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", "[", "'params'", "]", ")", ">", "param_number", "and", "name", "in", "verbs", ":", "value", "=", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", "[", "'params'", "]", "[", "param_number", "]", "if", "escaped", ":", "value", "=", "escape", "(", "value", ")", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", "[", "attr", "]", "=", "value", "# custom processing", "if", "name", "==", "'welcome'", ":", "# for servers where a low nicklen makes them silently truncate our nick", "server", ".", "nick", "=", "server", ".", "istring", "(", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", "[", "'nick'", "]", ")", "# custom message attributes", "if", "name", "==", "'ctcp'", ":", "if", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", "[", "'ctcp_verb'", "]", "==", "'action'", ":", "info", "=", "dict", "(", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", ")", "info", "[", "'message'", "]", "=", "info", "[", "'ctcp_text'", "]", "if", "server", ".", "is_channel", "(", "info", "[", "'target'", "]", ")", ":", "name", "=", "'pubaction'", "info", "[", "'channel'", "]", "=", "info", "[", "'target'", "]", "else", ":", "name", "=", "'privaction'", "infos", ".", "append", "(", "[", "name", ",", "info", "]", ")", "if", "name", "==", "'umode'", "and", "len", "(", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", "[", "'params'", "]", ")", ">", "1", ":", "modestring", "=", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", "[", "'params'", "]", "[", "1", ":", "]", "modes", "=", "parse_modes", "(", "modestring", ")", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", "[", "'modestring'", "]", "=", "' '", ".", "join", "(", "modestring", ")", ".", "strip", "(", ")", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", "[", "'modes'", "]", "=", "modes", "if", "name", "==", "'cmode'", "and", "len", "(", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", "[", "'params'", "]", ")", ">", "1", ":", "modestring", "=", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", "[", "'params'", "]", "[", "1", ":", "]", "chanmodes", "=", "server", ".", "features", ".", "get", "(", "'chanmodes'", ")", "prefixes", "=", "list", "(", "server", ".", "features", ".", "get", "(", "'prefix'", ")", ".", "keys", "(", ")", ")", "modes", "=", "parse_modes", "(", "modestring", ",", "chanmodes", ",", "prefixes", ")", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", "[", "'modestring'", "]", "=", "' '", ".", "join", "(", "modestring", ")", ".", "strip", "(", ")", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", "[", "'modes'", "]", "=", "modes", "if", "name", "==", "'cmodeis'", ":", "if", "len", "(", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", "[", "'params'", "]", ")", ">", "2", ":", "modestring", "=", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", "[", "'params'", "]", "[", "2", ":", "]", "chanmodes", "=", "server", ".", "features", ".", "get", "(", "'chanmodes'", ")", "modes", "=", "parse_modes", "(", "modestring", ",", "chanmodes", ")", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", "[", "'modestring'", "]", "=", "' '", ".", "join", "(", "modestring", ")", ".", "strip", "(", ")", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", "[", "'modes'", "]", "=", "modes", "else", ":", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", "[", "'modestring'", "]", "=", "''", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", "[", "'modes'", "]", "=", "[", "]", "if", "name", "==", "'namreply'", ":", "channel_name", "=", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", "[", "'params'", "]", "[", "2", "]", "server", ".", "info", ".", "create_channel", "(", "channel_name", ")", "channel", "=", "server", ".", "info", ".", "channels", ".", "get", "(", "channel_name", ")", "nice_names", "=", "[", "]", "channel_prefixes", "=", "{", "}", "if", "len", "(", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", "[", "'params'", "]", ")", ">", "3", ":", "raw_names", "=", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", "[", "'params'", "]", "[", "3", "]", ".", "split", "(", "' '", ")", "else", ":", "raw_names", "=", "[", "]", "for", "name", "in", "raw_names", ":", "# InspIRCd sends us an empty last param because they are cool", "if", "not", "len", "(", "name", ")", ":", "continue", "prefixes", "=", "''", "while", "name", "[", "0", "]", "in", "server", ".", "features", ".", "available", "[", "'prefix'", "]", ".", "values", "(", ")", ":", "prefixes", "+=", "name", "[", "0", "]", "name", "=", "name", "[", "1", ":", "]", "nick", "=", "NickMask", "(", "name", ")", ".", "nick", "server", ".", "info", ".", "create_user", "(", "nick", ")", "nice_names", ".", "append", "(", "name", ")", "server", ".", "info", ".", "create_user", "(", "name", ")", "user", "=", "server", ".", "info", ".", "users", ".", "get", "(", "nick", ")", "channel_prefixes", "[", "user", "]", "=", "prefixes", "channel", ".", "add_user", "(", "nick", ",", "prefixes", "=", "prefixes", ")", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", "[", "'users'", "]", "=", "','", ".", "join", "(", "nice_names", ")", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", "[", "'prefixes'", "]", "=", "channel_prefixes", "# source / target mapping", "for", "attr", "in", "(", "'source'", ",", "'target'", ",", "'channel'", ")", ":", "if", "attr", "in", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", "and", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", "[", "attr", "]", ":", "source", "=", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", "[", "attr", "]", "if", "server", ".", "is_channel", "(", "source", ")", ":", "server", ".", "info", ".", "create_channel", "(", "source", ")", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", "[", "attr", "]", "=", "server", ".", "info", ".", "channels", ".", "get", "(", "source", ")", "elif", "'.'", "in", "source", "and", "server", ".", "is_server", "(", "source", ")", ":", "server", ".", "info", ".", "create_server", "(", "source", ")", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", "[", "attr", "]", "=", "server", ".", "info", ".", "servers", ".", "get", "(", "source", ")", "elif", "server", ".", "is_nick", "(", "source", ")", ":", "server", ".", "info", ".", "create_user", "(", "source", ")", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", "[", "attr", "]", "=", "server", ".", "info", ".", "users", ".", "get", "(", "NickMask", "(", "source", ")", ".", "nick", ")", "else", ":", "# we assume this is a user with messed up characters", "server", ".", "info", ".", "create_user", "(", "source", ")", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", "[", "attr", "]", "=", "server", ".", "info", ".", "users", ".", "get", "(", "NickMask", "(", "source", ")", ".", "nick", ")", "if", "'channels'", "in", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", "and", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", "[", "'channels'", "]", ":", "channels", "=", "[", "]", "for", "chan", "in", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", "[", "'channels'", "]", ".", "split", "(", "','", ")", ":", "server", ".", "info", ".", "create_channel", "(", "chan", ")", "channels", ".", "append", "(", "server", ".", "info", ".", "channels", ".", "get", "(", "chan", ")", ")", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", "[", "'channels'", "]", "=", "channels", "if", "'users'", "in", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", "and", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", "[", "'users'", "]", ":", "users", "=", "[", "]", "for", "user", "in", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", "[", "'users'", "]", ".", "split", "(", "','", ")", ":", "server", ".", "info", ".", "create_user", "(", "user", ")", "users", ".", "append", "(", "server", ".", "info", ".", "users", ".", "get", "(", "NickMask", "(", "user", ")", ".", "nick", ")", ")", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", "[", "'users'", "]", "=", "users", "# custom from_to attribute for ease in bots", "verb", "=", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", "[", "'verb'", "]", "dir", "=", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", "[", "'direction'", "]", "source", "=", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", ".", "get", "(", "'source'", ")", "target", "=", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", ".", "get", "(", "'target'", ")", "if", "verb", "in", "[", "'pubmsg'", ",", "'pubnotice'", ",", "'pubaction'", "]", ":", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", "[", "'from_to'", "]", "=", "target", "elif", "verb", "in", "[", "'privmsg'", ",", "'privnotice'", ",", "'privaction'", "]", ":", "if", "dir", "==", "'out'", ":", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", "[", "'from_to'", "]", "=", "target", "elif", "dir", "==", "'in'", ":", "if", "'echo-message'", "in", "server", ".", "capabilities", ".", "enabled", ":", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", "[", "'from_to'", "]", "=", "target", "else", ":", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", "[", "'from_to'", "]", "=", "source", "if", "'from_to'", "in", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", "and", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", "[", "'from_to'", "]", "is", "None", ":", "del", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", "[", "'from_to'", "]", "# convenience function so unnecessary messages can get ignored easily", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", "[", "'will_be_echod'", "]", "=", "False", "if", "verb", "in", "[", "'pubmsg'", ",", "'pubnotice'", ",", "'privmsg'", ",", "'privnotice'", "]", ":", "if", "dir", "==", "'out'", "and", "'echo-message'", "in", "server", ".", "capabilities", ".", "enabled", ":", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", "[", "'will_be_echod'", "]", "=", "True", "if", "'from_to'", "in", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", "and", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", "[", "'from_to'", "]", ".", "is_server", ":", "del", "infos", "[", "i", "]", "[", "INFO_ATTR", "]", "[", "'from_to'", "]", "return", "infos" ]
Prepare an ``RFC1459Message`` for event dispatch. We do this because we have to handle special things as well, such as CTCP and deconstructing verbs properly.
[ "Prepare", "an", "RFC1459Message", "for", "event", "dispatch", "." ]
python
train
apple/turicreate
src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L723-L764
def add_upsample(self, name, scaling_factor_h, scaling_factor_w, input_name, output_name, mode = 'NN'): """ Add upsample layer to the model. Parameters ---------- name: str The name of this layer. scaling_factor_h: int Scaling factor on the vertical direction. scaling_factor_w: int Scaling factor on the horizontal direction. input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. mode: str Following values are supported: 'NN': nearest neighbour 'BILINEAR' : bilinear interpolation See Also -------- add_sequence_repeat, add_elementwise """ spec = self.spec nn_spec = self.nn_spec # Add a new inner-product layer spec_layer = nn_spec.layers.add() spec_layer.name = name spec_layer.input.append(input_name) spec_layer.output.append(output_name) spec_layer_params = spec_layer.upsample spec_layer_params.scalingFactor.append(scaling_factor_h) spec_layer_params.scalingFactor.append(scaling_factor_w) if mode == 'NN': spec_layer_params.mode = _NeuralNetwork_pb2.UpsampleLayerParams.InterpolationMode.Value('NN') elif mode == 'BILINEAR': spec_layer_params.mode = _NeuralNetwork_pb2.UpsampleLayerParams.InterpolationMode.Value('BILINEAR') else: raise ValueError("Unsupported upsampling mode %s" % mode)
[ "def", "add_upsample", "(", "self", ",", "name", ",", "scaling_factor_h", ",", "scaling_factor_w", ",", "input_name", ",", "output_name", ",", "mode", "=", "'NN'", ")", ":", "spec", "=", "self", ".", "spec", "nn_spec", "=", "self", ".", "nn_spec", "# Add a new inner-product layer", "spec_layer", "=", "nn_spec", ".", "layers", ".", "add", "(", ")", "spec_layer", ".", "name", "=", "name", "spec_layer", ".", "input", ".", "append", "(", "input_name", ")", "spec_layer", ".", "output", ".", "append", "(", "output_name", ")", "spec_layer_params", "=", "spec_layer", ".", "upsample", "spec_layer_params", ".", "scalingFactor", ".", "append", "(", "scaling_factor_h", ")", "spec_layer_params", ".", "scalingFactor", ".", "append", "(", "scaling_factor_w", ")", "if", "mode", "==", "'NN'", ":", "spec_layer_params", ".", "mode", "=", "_NeuralNetwork_pb2", ".", "UpsampleLayerParams", ".", "InterpolationMode", ".", "Value", "(", "'NN'", ")", "elif", "mode", "==", "'BILINEAR'", ":", "spec_layer_params", ".", "mode", "=", "_NeuralNetwork_pb2", ".", "UpsampleLayerParams", ".", "InterpolationMode", ".", "Value", "(", "'BILINEAR'", ")", "else", ":", "raise", "ValueError", "(", "\"Unsupported upsampling mode %s\"", "%", "mode", ")" ]
Add upsample layer to the model. Parameters ---------- name: str The name of this layer. scaling_factor_h: int Scaling factor on the vertical direction. scaling_factor_w: int Scaling factor on the horizontal direction. input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. mode: str Following values are supported: 'NN': nearest neighbour 'BILINEAR' : bilinear interpolation See Also -------- add_sequence_repeat, add_elementwise
[ "Add", "upsample", "layer", "to", "the", "model", "." ]
python
train
PixxxeL/python-html-purifier
purifier/purifier.py
https://github.com/PixxxeL/python-html-purifier/blob/00f38b6e7f66be02aa21100949f9ffa5be661e8d/purifier/purifier.py#L105-L120
def __set_whitelist(self, whitelist=None): """ Update default white list by customer white list """ # add tag's names as key and list of enabled attributes as value for defaults self.whitelist = {} # tags that removed with contents self.sanitizelist = ['script', 'style'] if isinstance(whitelist, dict) and '*' in whitelist.keys(): self.isNotPurify = True self.whitelist_keys = [] return else: self.isNotPurify = False self.whitelist.update(whitelist or {}) self.whitelist_keys = self.whitelist.keys()
[ "def", "__set_whitelist", "(", "self", ",", "whitelist", "=", "None", ")", ":", "# add tag's names as key and list of enabled attributes as value for defaults", "self", ".", "whitelist", "=", "{", "}", "# tags that removed with contents", "self", ".", "sanitizelist", "=", "[", "'script'", ",", "'style'", "]", "if", "isinstance", "(", "whitelist", ",", "dict", ")", "and", "'*'", "in", "whitelist", ".", "keys", "(", ")", ":", "self", ".", "isNotPurify", "=", "True", "self", ".", "whitelist_keys", "=", "[", "]", "return", "else", ":", "self", ".", "isNotPurify", "=", "False", "self", ".", "whitelist", ".", "update", "(", "whitelist", "or", "{", "}", ")", "self", ".", "whitelist_keys", "=", "self", ".", "whitelist", ".", "keys", "(", ")" ]
Update default white list by customer white list
[ "Update", "default", "white", "list", "by", "customer", "white", "list" ]
python
train
annoviko/pyclustering
pyclustering/container/kdtree.py
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/container/kdtree.py#L520-L544
def __recursive_nearest_nodes(self, point, distance, sqrt_distance, node_head, best_nodes): """! @brief Returns list of neighbors such as tuple (distance, node) that is located in area that is covered by distance. @param[in] point (list): Coordinates that is considered as centroind for searching @param[in] distance (double): Distance from the center where seaching is performed. @param[in] sqrt_distance (double): Square distance from the center where searching is performed. @param[in] node_head (node): Node from that searching is performed. @param[in|out] best_nodes (list): List of founded nodes. """ if node_head.right is not None: minimum = node_head.data[node_head.disc] - distance if point[node_head.disc] >= minimum: self.__recursive_nearest_nodes(point, distance, sqrt_distance, node_head.right, best_nodes) if node_head.left is not None: maximum = node_head.data[node_head.disc] + distance if point[node_head.disc] < maximum: self.__recursive_nearest_nodes(point, distance, sqrt_distance, node_head.left, best_nodes) candidate_distance = euclidean_distance_square(point, node_head.data) if candidate_distance <= sqrt_distance: best_nodes.append( (candidate_distance, node_head) )
[ "def", "__recursive_nearest_nodes", "(", "self", ",", "point", ",", "distance", ",", "sqrt_distance", ",", "node_head", ",", "best_nodes", ")", ":", "if", "node_head", ".", "right", "is", "not", "None", ":", "minimum", "=", "node_head", ".", "data", "[", "node_head", ".", "disc", "]", "-", "distance", "if", "point", "[", "node_head", ".", "disc", "]", ">=", "minimum", ":", "self", ".", "__recursive_nearest_nodes", "(", "point", ",", "distance", ",", "sqrt_distance", ",", "node_head", ".", "right", ",", "best_nodes", ")", "if", "node_head", ".", "left", "is", "not", "None", ":", "maximum", "=", "node_head", ".", "data", "[", "node_head", ".", "disc", "]", "+", "distance", "if", "point", "[", "node_head", ".", "disc", "]", "<", "maximum", ":", "self", ".", "__recursive_nearest_nodes", "(", "point", ",", "distance", ",", "sqrt_distance", ",", "node_head", ".", "left", ",", "best_nodes", ")", "candidate_distance", "=", "euclidean_distance_square", "(", "point", ",", "node_head", ".", "data", ")", "if", "candidate_distance", "<=", "sqrt_distance", ":", "best_nodes", ".", "append", "(", "(", "candidate_distance", ",", "node_head", ")", ")" ]
! @brief Returns list of neighbors such as tuple (distance, node) that is located in area that is covered by distance. @param[in] point (list): Coordinates that is considered as centroind for searching @param[in] distance (double): Distance from the center where seaching is performed. @param[in] sqrt_distance (double): Square distance from the center where searching is performed. @param[in] node_head (node): Node from that searching is performed. @param[in|out] best_nodes (list): List of founded nodes.
[ "!" ]
python
valid
dbcli/athenacli
athenacli/packages/parseutils.py
https://github.com/dbcli/athenacli/blob/bcab59e4953145866430083e902ed4d042d4ebba/athenacli/packages/parseutils.py#L137-L151
def extract_tables(sql): """Extract the table names from an SQL statment. Returns a list of (schema, table, alias) tuples """ parsed = sqlparse.parse(sql) if not parsed: return [] # INSERT statements must stop looking for tables at the sign of first # Punctuation. eg: INSERT INTO abc (col1, col2) VALUES (1, 2) # abc is the table name, but if we don't stop at the first lparen, then # we'll identify abc, col1 and col2 as table names. insert_stmt = parsed[0].token_first().value.lower() == 'insert' stream = extract_from_part(parsed[0], stop_at_punctuation=insert_stmt) return list(extract_table_identifiers(stream))
[ "def", "extract_tables", "(", "sql", ")", ":", "parsed", "=", "sqlparse", ".", "parse", "(", "sql", ")", "if", "not", "parsed", ":", "return", "[", "]", "# INSERT statements must stop looking for tables at the sign of first", "# Punctuation. eg: INSERT INTO abc (col1, col2) VALUES (1, 2)", "# abc is the table name, but if we don't stop at the first lparen, then", "# we'll identify abc, col1 and col2 as table names.", "insert_stmt", "=", "parsed", "[", "0", "]", ".", "token_first", "(", ")", ".", "value", ".", "lower", "(", ")", "==", "'insert'", "stream", "=", "extract_from_part", "(", "parsed", "[", "0", "]", ",", "stop_at_punctuation", "=", "insert_stmt", ")", "return", "list", "(", "extract_table_identifiers", "(", "stream", ")", ")" ]
Extract the table names from an SQL statment. Returns a list of (schema, table, alias) tuples
[ "Extract", "the", "table", "names", "from", "an", "SQL", "statment", ".", "Returns", "a", "list", "of", "(", "schema", "table", "alias", ")", "tuples" ]
python
train
sebp/scikit-survival
sksurv/kernels/clinical.py
https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/kernels/clinical.py#L61-L107
def clinical_kernel(x, y=None): """Computes clinical kernel The clinical kernel distinguishes between continuous ordinal,and nominal variables. Parameters ---------- x : pandas.DataFrame, shape = (n_samples_x, n_features) Training data y : pandas.DataFrame, shape = (n_samples_y, n_features) Testing data Returns ------- kernel : array, shape = (n_samples_x, n_samples_y) Kernel matrix. Values are normalized to lie within [0, 1]. References ---------- .. [1] Daemen, A., De Moor, B., "Development of a kernel function for clinical data". Annual International Conference of the IEEE Engineering in Medicine and Biology Society, 5913-7, 2009 """ if y is not None: if x.shape[1] != y.shape[1]: raise ValueError('x and y have different number of features') if not x.columns.equals(y.columns): raise ValueError('columns do not match') else: y = x mat = numpy.zeros((x.shape[0], y.shape[0]), dtype=float) x_numeric, nominal_columns = _get_continuous_and_ordinal_array(x) if id(x) != id(y): y_numeric, _ = _get_continuous_and_ordinal_array(y) else: y_numeric = x_numeric continuous_ordinal_kernel(x_numeric, y_numeric, mat) _nominal_kernel(x.loc[:, nominal_columns].values, y.loc[:, nominal_columns].values, mat) mat /= x.shape[1] return mat
[ "def", "clinical_kernel", "(", "x", ",", "y", "=", "None", ")", ":", "if", "y", "is", "not", "None", ":", "if", "x", ".", "shape", "[", "1", "]", "!=", "y", ".", "shape", "[", "1", "]", ":", "raise", "ValueError", "(", "'x and y have different number of features'", ")", "if", "not", "x", ".", "columns", ".", "equals", "(", "y", ".", "columns", ")", ":", "raise", "ValueError", "(", "'columns do not match'", ")", "else", ":", "y", "=", "x", "mat", "=", "numpy", ".", "zeros", "(", "(", "x", ".", "shape", "[", "0", "]", ",", "y", ".", "shape", "[", "0", "]", ")", ",", "dtype", "=", "float", ")", "x_numeric", ",", "nominal_columns", "=", "_get_continuous_and_ordinal_array", "(", "x", ")", "if", "id", "(", "x", ")", "!=", "id", "(", "y", ")", ":", "y_numeric", ",", "_", "=", "_get_continuous_and_ordinal_array", "(", "y", ")", "else", ":", "y_numeric", "=", "x_numeric", "continuous_ordinal_kernel", "(", "x_numeric", ",", "y_numeric", ",", "mat", ")", "_nominal_kernel", "(", "x", ".", "loc", "[", ":", ",", "nominal_columns", "]", ".", "values", ",", "y", ".", "loc", "[", ":", ",", "nominal_columns", "]", ".", "values", ",", "mat", ")", "mat", "/=", "x", ".", "shape", "[", "1", "]", "return", "mat" ]
Computes clinical kernel The clinical kernel distinguishes between continuous ordinal,and nominal variables. Parameters ---------- x : pandas.DataFrame, shape = (n_samples_x, n_features) Training data y : pandas.DataFrame, shape = (n_samples_y, n_features) Testing data Returns ------- kernel : array, shape = (n_samples_x, n_samples_y) Kernel matrix. Values are normalized to lie within [0, 1]. References ---------- .. [1] Daemen, A., De Moor, B., "Development of a kernel function for clinical data". Annual International Conference of the IEEE Engineering in Medicine and Biology Society, 5913-7, 2009
[ "Computes", "clinical", "kernel" ]
python
train
glormph/msstitch
src/app/actions/mzidtsv/percolator.py
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/actions/mzidtsv/percolator.py#L6-L39
def add_percolator_to_mzidtsv(mzidfn, tsvfn, multipsm, oldheader): """Takes a MSGF+ tsv and corresponding mzId, adds percolatordata to tsv lines. Generator yields the lines. Multiple PSMs per scan can be delivered, in which case rank is also reported. """ namespace = readers.get_mzid_namespace(mzidfn) try: xmlns = '{%s}' % namespace['xmlns'] except TypeError: xmlns = '' specfnids = readers.get_mzid_specfile_ids(mzidfn, namespace) mzidpepmap = {} for peptide in readers.generate_mzid_peptides(mzidfn, namespace): pep_id, seq = readers.get_mzid_peptidedata(peptide, xmlns) mzidpepmap[pep_id] = seq mzidpercomap = {} for specid_data in readers.generate_mzid_spec_id_items(mzidfn, namespace, xmlns, specfnids): scan, fn, pepid, spec_id = specid_data percodata = readers.get_specidentitem_percolator_data(spec_id, xmlns) try: mzidpercomap[fn][scan][mzidpepmap[pepid]] = percodata except KeyError: try: mzidpercomap[fn][scan] = {mzidpepmap[pepid]: percodata} except KeyError: mzidpercomap[fn] = {scan: {mzidpepmap[pepid]: percodata}} for line in tsvreader.generate_tsv_psms(tsvfn, oldheader): outline = {k: v for k, v in line.items()} fn = line[mzidtsvdata.HEADER_SPECFILE] scan = line[mzidtsvdata.HEADER_SCANNR] seq = line[mzidtsvdata.HEADER_PEPTIDE] outline.update(mzidpercomap[fn][scan][seq]) yield outline
[ "def", "add_percolator_to_mzidtsv", "(", "mzidfn", ",", "tsvfn", ",", "multipsm", ",", "oldheader", ")", ":", "namespace", "=", "readers", ".", "get_mzid_namespace", "(", "mzidfn", ")", "try", ":", "xmlns", "=", "'{%s}'", "%", "namespace", "[", "'xmlns'", "]", "except", "TypeError", ":", "xmlns", "=", "''", "specfnids", "=", "readers", ".", "get_mzid_specfile_ids", "(", "mzidfn", ",", "namespace", ")", "mzidpepmap", "=", "{", "}", "for", "peptide", "in", "readers", ".", "generate_mzid_peptides", "(", "mzidfn", ",", "namespace", ")", ":", "pep_id", ",", "seq", "=", "readers", ".", "get_mzid_peptidedata", "(", "peptide", ",", "xmlns", ")", "mzidpepmap", "[", "pep_id", "]", "=", "seq", "mzidpercomap", "=", "{", "}", "for", "specid_data", "in", "readers", ".", "generate_mzid_spec_id_items", "(", "mzidfn", ",", "namespace", ",", "xmlns", ",", "specfnids", ")", ":", "scan", ",", "fn", ",", "pepid", ",", "spec_id", "=", "specid_data", "percodata", "=", "readers", ".", "get_specidentitem_percolator_data", "(", "spec_id", ",", "xmlns", ")", "try", ":", "mzidpercomap", "[", "fn", "]", "[", "scan", "]", "[", "mzidpepmap", "[", "pepid", "]", "]", "=", "percodata", "except", "KeyError", ":", "try", ":", "mzidpercomap", "[", "fn", "]", "[", "scan", "]", "=", "{", "mzidpepmap", "[", "pepid", "]", ":", "percodata", "}", "except", "KeyError", ":", "mzidpercomap", "[", "fn", "]", "=", "{", "scan", ":", "{", "mzidpepmap", "[", "pepid", "]", ":", "percodata", "}", "}", "for", "line", "in", "tsvreader", ".", "generate_tsv_psms", "(", "tsvfn", ",", "oldheader", ")", ":", "outline", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "line", ".", "items", "(", ")", "}", "fn", "=", "line", "[", "mzidtsvdata", ".", "HEADER_SPECFILE", "]", "scan", "=", "line", "[", "mzidtsvdata", ".", "HEADER_SCANNR", "]", "seq", "=", "line", "[", "mzidtsvdata", ".", "HEADER_PEPTIDE", "]", "outline", ".", "update", "(", "mzidpercomap", "[", "fn", "]", "[", "scan", "]", "[", "seq", "]", ")", "yield", "outline" ]
Takes a MSGF+ tsv and corresponding mzId, adds percolatordata to tsv lines. Generator yields the lines. Multiple PSMs per scan can be delivered, in which case rank is also reported.
[ "Takes", "a", "MSGF", "+", "tsv", "and", "corresponding", "mzId", "adds", "percolatordata", "to", "tsv", "lines", ".", "Generator", "yields", "the", "lines", ".", "Multiple", "PSMs", "per", "scan", "can", "be", "delivered", "in", "which", "case", "rank", "is", "also", "reported", "." ]
python
train
tanghaibao/goatools
goatools/grouper/sorter.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/grouper/sorter.py#L178-L187
def get_fields(desc2nts): """Return grouped, sorted namedtuples in either format: flat, sections.""" if 'flat' in desc2nts: nts_flat = desc2nts.get('flat') if nts_flat: return nts_flat[0]._fields if 'sections' in desc2nts: nts_sections = desc2nts.get('sections') if nts_sections: return nts_sections[0][1][0]._fields
[ "def", "get_fields", "(", "desc2nts", ")", ":", "if", "'flat'", "in", "desc2nts", ":", "nts_flat", "=", "desc2nts", ".", "get", "(", "'flat'", ")", "if", "nts_flat", ":", "return", "nts_flat", "[", "0", "]", ".", "_fields", "if", "'sections'", "in", "desc2nts", ":", "nts_sections", "=", "desc2nts", ".", "get", "(", "'sections'", ")", "if", "nts_sections", ":", "return", "nts_sections", "[", "0", "]", "[", "1", "]", "[", "0", "]", ".", "_fields" ]
Return grouped, sorted namedtuples in either format: flat, sections.
[ "Return", "grouped", "sorted", "namedtuples", "in", "either", "format", ":", "flat", "sections", "." ]
python
train
janpipek/physt
physt/histogram1d.py
https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/histogram1d.py#L222-L236
def std(self) -> Optional[float]: #, ddof=0): """Standard deviation of all values entered into histogram. This number is precise, because we keep the necessary data separate from bin contents. Returns ------- float """ # TODO: Add DOF if self._stats: return np.sqrt(self.variance()) else: return None
[ "def", "std", "(", "self", ")", "->", "Optional", "[", "float", "]", ":", "#, ddof=0):", "# TODO: Add DOF", "if", "self", ".", "_stats", ":", "return", "np", ".", "sqrt", "(", "self", ".", "variance", "(", ")", ")", "else", ":", "return", "None" ]
Standard deviation of all values entered into histogram. This number is precise, because we keep the necessary data separate from bin contents. Returns ------- float
[ "Standard", "deviation", "of", "all", "values", "entered", "into", "histogram", "." ]
python
train
Erotemic/utool
utool/util_numpy.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_numpy.py#L368-L372
def deterministic_sample(list_, nSample, seed=0, rng=None, strict=False): """ Grabs data randomly, but in a repeatable way """ rng = ensure_rng(seed if rng is None else rng) sample_list = random_sample(list_, nSample, strict=strict, rng=rng) return sample_list
[ "def", "deterministic_sample", "(", "list_", ",", "nSample", ",", "seed", "=", "0", ",", "rng", "=", "None", ",", "strict", "=", "False", ")", ":", "rng", "=", "ensure_rng", "(", "seed", "if", "rng", "is", "None", "else", "rng", ")", "sample_list", "=", "random_sample", "(", "list_", ",", "nSample", ",", "strict", "=", "strict", ",", "rng", "=", "rng", ")", "return", "sample_list" ]
Grabs data randomly, but in a repeatable way
[ "Grabs", "data", "randomly", "but", "in", "a", "repeatable", "way" ]
python
train
gmr/rejected
rejected/mcp.py
https://github.com/gmr/rejected/blob/610a3e1401122ecb98d891b6795cca0255e5b044/rejected/mcp.py#L196-L217
def collect_results(self, data_values): """Receive the data from the consumers polled and process it. :param dict data_values: The poll data returned from the consumer :type data_values: dict """ self.last_poll_results['timestamp'] = self.poll_data['timestamp'] # Get the name and consumer name and remove it from what is reported consumer_name = data_values['consumer_name'] del data_values['consumer_name'] process_name = data_values['name'] del data_values['name'] # Add it to our last poll global data if consumer_name not in self.last_poll_results: self.last_poll_results[consumer_name] = dict() self.last_poll_results[consumer_name][process_name] = data_values # Calculate the stats self.stats = self.calculate_stats(self.last_poll_results)
[ "def", "collect_results", "(", "self", ",", "data_values", ")", ":", "self", ".", "last_poll_results", "[", "'timestamp'", "]", "=", "self", ".", "poll_data", "[", "'timestamp'", "]", "# Get the name and consumer name and remove it from what is reported", "consumer_name", "=", "data_values", "[", "'consumer_name'", "]", "del", "data_values", "[", "'consumer_name'", "]", "process_name", "=", "data_values", "[", "'name'", "]", "del", "data_values", "[", "'name'", "]", "# Add it to our last poll global data", "if", "consumer_name", "not", "in", "self", ".", "last_poll_results", ":", "self", ".", "last_poll_results", "[", "consumer_name", "]", "=", "dict", "(", ")", "self", ".", "last_poll_results", "[", "consumer_name", "]", "[", "process_name", "]", "=", "data_values", "# Calculate the stats", "self", ".", "stats", "=", "self", ".", "calculate_stats", "(", "self", ".", "last_poll_results", ")" ]
Receive the data from the consumers polled and process it. :param dict data_values: The poll data returned from the consumer :type data_values: dict
[ "Receive", "the", "data", "from", "the", "consumers", "polled", "and", "process", "it", "." ]
python
train
peerplays-network/python-peerplays
peerplays/cli/info.py
https://github.com/peerplays-network/python-peerplays/blob/188f04238e7e21d5f73e9b01099eea44289ef6b7/peerplays/cli/info.py#L19-L98
def info(ctx, objects): """ Obtain all kinds of information """ if not objects: t = PrettyTable(["Key", "Value"]) t.align = "l" info = ctx.peerplays.rpc.get_dynamic_global_properties() for key in info: t.add_row([key, info[key]]) click.echo(t.get_string(sortby="Key")) for obj in objects: # Block if re.match("^[0-9]*$", obj): block = Block(obj, peerplays_instance=ctx.peerplays) if block: t = PrettyTable(["Key", "Value"]) t.align = "l" for key in sorted(block): value = block[key] if key == "transactions": value = json.dumps(value, indent=4) t.add_row([key, value]) click.echo(t) else: click.echo("Block number %s unknown" % obj) # Object Id elif len(obj.split(".")) == 3: data = ctx.peerplays.rpc.get_object(obj) if data: t = PrettyTable(["Key", "Value"]) t.align = "l" for key in sorted(data): value = data[key] if isinstance(value, dict) or isinstance(value, list): value = json.dumps(value, indent=4) t.add_row([key, value]) click.echo(t) else: click.echo("Object %s unknown" % obj) # Asset elif obj.upper() == obj: data = Asset(obj) t = PrettyTable(["Key", "Value"]) t.align = "l" for key in sorted(data): value = data[key] if isinstance(value, dict): value = json.dumps(value, indent=4) t.add_row([key, value]) click.echo(t) # Public Key elif re.match("^PPY.{48,55}$", obj): account = ctx.peerplays.wallet.getAccountFromPublicKey(obj) if account: t = PrettyTable(["Account"]) t.align = "l" t.add_row([account]) click.echo(t) else: click.echo("Public Key not known" % obj) # Account name elif re.match("^[a-zA-Z0-9\-\._]{2,64}$", obj): account = Account(obj, full=True) if account: t = PrettyTable(["Key", "Value"]) t.align = "l" for key in sorted(account): value = account[key] if isinstance(value, dict) or isinstance(value, list): value = json.dumps(value, indent=4) t.add_row([key, value]) click.echo(t) else: click.echo("Account %s unknown" % obj) else: click.echo("Couldn't identify object to read")
[ "def", "info", "(", "ctx", ",", "objects", ")", ":", "if", "not", "objects", ":", "t", "=", "PrettyTable", "(", "[", "\"Key\"", ",", "\"Value\"", "]", ")", "t", ".", "align", "=", "\"l\"", "info", "=", "ctx", ".", "peerplays", ".", "rpc", ".", "get_dynamic_global_properties", "(", ")", "for", "key", "in", "info", ":", "t", ".", "add_row", "(", "[", "key", ",", "info", "[", "key", "]", "]", ")", "click", ".", "echo", "(", "t", ".", "get_string", "(", "sortby", "=", "\"Key\"", ")", ")", "for", "obj", "in", "objects", ":", "# Block", "if", "re", ".", "match", "(", "\"^[0-9]*$\"", ",", "obj", ")", ":", "block", "=", "Block", "(", "obj", ",", "peerplays_instance", "=", "ctx", ".", "peerplays", ")", "if", "block", ":", "t", "=", "PrettyTable", "(", "[", "\"Key\"", ",", "\"Value\"", "]", ")", "t", ".", "align", "=", "\"l\"", "for", "key", "in", "sorted", "(", "block", ")", ":", "value", "=", "block", "[", "key", "]", "if", "key", "==", "\"transactions\"", ":", "value", "=", "json", ".", "dumps", "(", "value", ",", "indent", "=", "4", ")", "t", ".", "add_row", "(", "[", "key", ",", "value", "]", ")", "click", ".", "echo", "(", "t", ")", "else", ":", "click", ".", "echo", "(", "\"Block number %s unknown\"", "%", "obj", ")", "# Object Id", "elif", "len", "(", "obj", ".", "split", "(", "\".\"", ")", ")", "==", "3", ":", "data", "=", "ctx", ".", "peerplays", ".", "rpc", ".", "get_object", "(", "obj", ")", "if", "data", ":", "t", "=", "PrettyTable", "(", "[", "\"Key\"", ",", "\"Value\"", "]", ")", "t", ".", "align", "=", "\"l\"", "for", "key", "in", "sorted", "(", "data", ")", ":", "value", "=", "data", "[", "key", "]", "if", "isinstance", "(", "value", ",", "dict", ")", "or", "isinstance", "(", "value", ",", "list", ")", ":", "value", "=", "json", ".", "dumps", "(", "value", ",", "indent", "=", "4", ")", "t", ".", "add_row", "(", "[", "key", ",", "value", "]", ")", "click", ".", "echo", "(", "t", ")", "else", ":", "click", ".", "echo", "(", "\"Object %s unknown\"", "%", "obj", ")", "# Asset", "elif", "obj", ".", "upper", "(", ")", "==", "obj", ":", "data", "=", "Asset", "(", "obj", ")", "t", "=", "PrettyTable", "(", "[", "\"Key\"", ",", "\"Value\"", "]", ")", "t", ".", "align", "=", "\"l\"", "for", "key", "in", "sorted", "(", "data", ")", ":", "value", "=", "data", "[", "key", "]", "if", "isinstance", "(", "value", ",", "dict", ")", ":", "value", "=", "json", ".", "dumps", "(", "value", ",", "indent", "=", "4", ")", "t", ".", "add_row", "(", "[", "key", ",", "value", "]", ")", "click", ".", "echo", "(", "t", ")", "# Public Key", "elif", "re", ".", "match", "(", "\"^PPY.{48,55}$\"", ",", "obj", ")", ":", "account", "=", "ctx", ".", "peerplays", ".", "wallet", ".", "getAccountFromPublicKey", "(", "obj", ")", "if", "account", ":", "t", "=", "PrettyTable", "(", "[", "\"Account\"", "]", ")", "t", ".", "align", "=", "\"l\"", "t", ".", "add_row", "(", "[", "account", "]", ")", "click", ".", "echo", "(", "t", ")", "else", ":", "click", ".", "echo", "(", "\"Public Key not known\"", "%", "obj", ")", "# Account name", "elif", "re", ".", "match", "(", "\"^[a-zA-Z0-9\\-\\._]{2,64}$\"", ",", "obj", ")", ":", "account", "=", "Account", "(", "obj", ",", "full", "=", "True", ")", "if", "account", ":", "t", "=", "PrettyTable", "(", "[", "\"Key\"", ",", "\"Value\"", "]", ")", "t", ".", "align", "=", "\"l\"", "for", "key", "in", "sorted", "(", "account", ")", ":", "value", "=", "account", "[", "key", "]", "if", "isinstance", "(", "value", ",", "dict", ")", "or", "isinstance", "(", "value", ",", "list", ")", ":", "value", "=", "json", ".", "dumps", "(", "value", ",", "indent", "=", "4", ")", "t", ".", "add_row", "(", "[", "key", ",", "value", "]", ")", "click", ".", "echo", "(", "t", ")", "else", ":", "click", ".", "echo", "(", "\"Account %s unknown\"", "%", "obj", ")", "else", ":", "click", ".", "echo", "(", "\"Couldn't identify object to read\"", ")" ]
Obtain all kinds of information
[ "Obtain", "all", "kinds", "of", "information" ]
python
train
hellock/icrawler
icrawler/utils/proxy_pool.py
https://github.com/hellock/icrawler/blob/38c925758fd3d3e568d3ecc993f77bc0acfa4788/icrawler/utils/proxy_pool.py#L468-L479
def scan_cnproxy(self): """Scan candidate (mainland) proxies from http://cn-proxy.com""" self.logger.info( 'start scanning http://cn-proxy.com for proxy list...') response = requests.get('http://cn-proxy.com') soup = BeautifulSoup(response.content, 'lxml') tables = soup.find_all('table', class_='sortable') for table in tables: for tr in table.tbody.find_all('tr'): info = tr.find_all('td') addr = '{}:{}'.format(info[0].string, info[1].string) self.proxy_queue.put({'addr': addr, 'protocol': 'http'})
[ "def", "scan_cnproxy", "(", "self", ")", ":", "self", ".", "logger", ".", "info", "(", "'start scanning http://cn-proxy.com for proxy list...'", ")", "response", "=", "requests", ".", "get", "(", "'http://cn-proxy.com'", ")", "soup", "=", "BeautifulSoup", "(", "response", ".", "content", ",", "'lxml'", ")", "tables", "=", "soup", ".", "find_all", "(", "'table'", ",", "class_", "=", "'sortable'", ")", "for", "table", "in", "tables", ":", "for", "tr", "in", "table", ".", "tbody", ".", "find_all", "(", "'tr'", ")", ":", "info", "=", "tr", ".", "find_all", "(", "'td'", ")", "addr", "=", "'{}:{}'", ".", "format", "(", "info", "[", "0", "]", ".", "string", ",", "info", "[", "1", "]", ".", "string", ")", "self", ".", "proxy_queue", ".", "put", "(", "{", "'addr'", ":", "addr", ",", "'protocol'", ":", "'http'", "}", ")" ]
Scan candidate (mainland) proxies from http://cn-proxy.com
[ "Scan", "candidate", "(", "mainland", ")", "proxies", "from", "http", ":", "//", "cn", "-", "proxy", ".", "com" ]
python
train
peri-source/peri
peri/comp/exactpsf.py
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/comp/exactpsf.py#L321-L323
def _p2k(self, v): """ Convert from pixel to 1/k_incoming (laser_wavelength/(2\pi)) units """ return 2*np.pi*self.pxsize*v/self.param_dict['psf-laser-wavelength']
[ "def", "_p2k", "(", "self", ",", "v", ")", ":", "return", "2", "*", "np", ".", "pi", "*", "self", ".", "pxsize", "*", "v", "/", "self", ".", "param_dict", "[", "'psf-laser-wavelength'", "]" ]
Convert from pixel to 1/k_incoming (laser_wavelength/(2\pi)) units
[ "Convert", "from", "pixel", "to", "1", "/", "k_incoming", "(", "laser_wavelength", "/", "(", "2", "\\", "pi", "))", "units" ]
python
valid
saltstack/salt
salt/platform/win.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/platform/win.py#L1131-L1144
def dup_token(th): ''' duplicate the access token ''' # TODO: is `duplicate_token` the same? sec_attr = win32security.SECURITY_ATTRIBUTES() sec_attr.bInheritHandle = True return win32security.DuplicateTokenEx( th, win32security.SecurityImpersonation, win32con.MAXIMUM_ALLOWED, win32security.TokenPrimary, sec_attr, )
[ "def", "dup_token", "(", "th", ")", ":", "# TODO: is `duplicate_token` the same?", "sec_attr", "=", "win32security", ".", "SECURITY_ATTRIBUTES", "(", ")", "sec_attr", ".", "bInheritHandle", "=", "True", "return", "win32security", ".", "DuplicateTokenEx", "(", "th", ",", "win32security", ".", "SecurityImpersonation", ",", "win32con", ".", "MAXIMUM_ALLOWED", ",", "win32security", ".", "TokenPrimary", ",", "sec_attr", ",", ")" ]
duplicate the access token
[ "duplicate", "the", "access", "token" ]
python
train
StanfordBioinformatics/loom
server/loomengine_server/api/models/data_nodes.py
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/server/loomengine_server/api/models/data_nodes.py#L328-L339
def _check_index(self, index): """Verify that the given index is consistent with the degree of the node. """ if self.degree is None: raise UnknownDegreeError( 'Cannot access child DataNode on a parent with degree of None. '\ 'Set the degree on the parent first.') if index < 0 or index >= self.degree: raise IndexOutOfRangeError( 'Out of range index %s. DataNode parent has degree %s, so index '\ 'should be in the range 0 to %s' % ( index, self.degree, self.degree-1))
[ "def", "_check_index", "(", "self", ",", "index", ")", ":", "if", "self", ".", "degree", "is", "None", ":", "raise", "UnknownDegreeError", "(", "'Cannot access child DataNode on a parent with degree of None. '", "'Set the degree on the parent first.'", ")", "if", "index", "<", "0", "or", "index", ">=", "self", ".", "degree", ":", "raise", "IndexOutOfRangeError", "(", "'Out of range index %s. DataNode parent has degree %s, so index '", "'should be in the range 0 to %s'", "%", "(", "index", ",", "self", ".", "degree", ",", "self", ".", "degree", "-", "1", ")", ")" ]
Verify that the given index is consistent with the degree of the node.
[ "Verify", "that", "the", "given", "index", "is", "consistent", "with", "the", "degree", "of", "the", "node", "." ]
python
train
myaooo/pysbrl
pysbrl/rule_list.py
https://github.com/myaooo/pysbrl/blob/74bba8c6913a7f82e32313108f8c3e025b89d9c7/pysbrl/rule_list.py#L227-L242
def from_raw(self, rule_ids, outputs, raw_rules): """ A helper function that converts the results returned from C function :param rule_ids: :param outputs: :param raw_rules: :return: """ self._rule_pool = [([], [])] + raw_rules self._rule_list = [] for i, idx in enumerate(rule_ids): rule = Rule([Clause(f, c) for f, c in zip(*self._rule_pool[idx])], outputs[i]) self._rule_list.append(rule) # self._rule_list.append(rule_str2rule(_rule_name, outputs[i])) self._rule_ids = rule_ids self._rule_outputs = outputs
[ "def", "from_raw", "(", "self", ",", "rule_ids", ",", "outputs", ",", "raw_rules", ")", ":", "self", ".", "_rule_pool", "=", "[", "(", "[", "]", ",", "[", "]", ")", "]", "+", "raw_rules", "self", ".", "_rule_list", "=", "[", "]", "for", "i", ",", "idx", "in", "enumerate", "(", "rule_ids", ")", ":", "rule", "=", "Rule", "(", "[", "Clause", "(", "f", ",", "c", ")", "for", "f", ",", "c", "in", "zip", "(", "*", "self", ".", "_rule_pool", "[", "idx", "]", ")", "]", ",", "outputs", "[", "i", "]", ")", "self", ".", "_rule_list", ".", "append", "(", "rule", ")", "# self._rule_list.append(rule_str2rule(_rule_name, outputs[i]))", "self", ".", "_rule_ids", "=", "rule_ids", "self", ".", "_rule_outputs", "=", "outputs" ]
A helper function that converts the results returned from C function :param rule_ids: :param outputs: :param raw_rules: :return:
[ "A", "helper", "function", "that", "converts", "the", "results", "returned", "from", "C", "function", ":", "param", "rule_ids", ":", ":", "param", "outputs", ":", ":", "param", "raw_rules", ":", ":", "return", ":" ]
python
train
django-py/django-doberman
doberman/auth.py
https://github.com/django-py/django-doberman/blob/2e5959737a1b64234ed5a179c93f96a0de1c3e5c/doberman/auth.py#L97-L137
def inspect(self): """ Inspect access attempt, used for catpcha flow :return: """ last_attempt = self.get_last_failed_access_attempt( ip_address=self.ip, captcha_enabled=True, captcha_passed=False, is_expired=False ) if last_attempt is None and not self.request.user.is_authenticated(): # create a new entry user_access = self._FailedAccessAttemptModel( ip_address=self.ip, username=self.username, captcha_enabled=True, captcha_passed=False, is_expired=False ) elif last_attempt: user_access = last_attempt if self.request.method == 'POST': if not self.request.user.is_authenticated(): user_access.user_agent = self.request.META.get('HTTP_USER_AGENT', '<unknown user agent>')[:255] user_access.username = self.username user_access.failed_attempts += 1 user_access.params_get = self.request.GET user_access.params_post = self.request.POST if user_access.failed_attempts >= self.max_failed_attempts: user_access.is_locked = True user_access.save() elif self.request.user.is_authenticated() and last_attempt: last_attempt.is_expired = True last_attempt.save()
[ "def", "inspect", "(", "self", ")", ":", "last_attempt", "=", "self", ".", "get_last_failed_access_attempt", "(", "ip_address", "=", "self", ".", "ip", ",", "captcha_enabled", "=", "True", ",", "captcha_passed", "=", "False", ",", "is_expired", "=", "False", ")", "if", "last_attempt", "is", "None", "and", "not", "self", ".", "request", ".", "user", ".", "is_authenticated", "(", ")", ":", "# create a new entry", "user_access", "=", "self", ".", "_FailedAccessAttemptModel", "(", "ip_address", "=", "self", ".", "ip", ",", "username", "=", "self", ".", "username", ",", "captcha_enabled", "=", "True", ",", "captcha_passed", "=", "False", ",", "is_expired", "=", "False", ")", "elif", "last_attempt", ":", "user_access", "=", "last_attempt", "if", "self", ".", "request", ".", "method", "==", "'POST'", ":", "if", "not", "self", ".", "request", ".", "user", ".", "is_authenticated", "(", ")", ":", "user_access", ".", "user_agent", "=", "self", ".", "request", ".", "META", ".", "get", "(", "'HTTP_USER_AGENT'", ",", "'<unknown user agent>'", ")", "[", ":", "255", "]", "user_access", ".", "username", "=", "self", ".", "username", "user_access", ".", "failed_attempts", "+=", "1", "user_access", ".", "params_get", "=", "self", ".", "request", ".", "GET", "user_access", ".", "params_post", "=", "self", ".", "request", ".", "POST", "if", "user_access", ".", "failed_attempts", ">=", "self", ".", "max_failed_attempts", ":", "user_access", ".", "is_locked", "=", "True", "user_access", ".", "save", "(", ")", "elif", "self", ".", "request", ".", "user", ".", "is_authenticated", "(", ")", "and", "last_attempt", ":", "last_attempt", ".", "is_expired", "=", "True", "last_attempt", ".", "save", "(", ")" ]
Inspect access attempt, used for catpcha flow :return:
[ "Inspect", "access", "attempt", "used", "for", "catpcha", "flow", ":", "return", ":" ]
python
train
sdispater/orator
orator/dbal/platforms/platform.py
https://github.com/sdispater/orator/blob/bd90bf198ee897751848f9a92e49d18e60a74136/orator/dbal/platforms/platform.py#L515-L527
def get_drop_table_sql(self, table): """ Returns the SQL snippet to drop an existing table. :param table: The table :type table: Table or str :rtype: str """ if isinstance(table, Table): table = table.get_quoted_name(self) return "DROP TABLE %s" % table
[ "def", "get_drop_table_sql", "(", "self", ",", "table", ")", ":", "if", "isinstance", "(", "table", ",", "Table", ")", ":", "table", "=", "table", ".", "get_quoted_name", "(", "self", ")", "return", "\"DROP TABLE %s\"", "%", "table" ]
Returns the SQL snippet to drop an existing table. :param table: The table :type table: Table or str :rtype: str
[ "Returns", "the", "SQL", "snippet", "to", "drop", "an", "existing", "table", "." ]
python
train
codeinn/vcs
vcs/backends/git/repository.py
https://github.com/codeinn/vcs/blob/e6cd94188e9c36d273411bf3adc0584ac6ab92a0/vcs/backends/git/repository.py#L636-L662
def get_config_value(self, section, name, config_file=None): """ Returns configuration value for a given [``section``] and ``name``. :param section: Section we want to retrieve value from :param name: Name of configuration we want to retrieve :param config_file: A path to file which should be used to retrieve configuration from (might also be a list of file paths) """ if config_file is None: config_file = [] elif isinstance(config_file, basestring): config_file = [config_file] def gen_configs(): for path in config_file + self._config_files: try: yield ConfigFile.from_path(path) except (IOError, OSError, ValueError): continue for config in gen_configs(): try: return config.get(section, name) except KeyError: continue return None
[ "def", "get_config_value", "(", "self", ",", "section", ",", "name", ",", "config_file", "=", "None", ")", ":", "if", "config_file", "is", "None", ":", "config_file", "=", "[", "]", "elif", "isinstance", "(", "config_file", ",", "basestring", ")", ":", "config_file", "=", "[", "config_file", "]", "def", "gen_configs", "(", ")", ":", "for", "path", "in", "config_file", "+", "self", ".", "_config_files", ":", "try", ":", "yield", "ConfigFile", ".", "from_path", "(", "path", ")", "except", "(", "IOError", ",", "OSError", ",", "ValueError", ")", ":", "continue", "for", "config", "in", "gen_configs", "(", ")", ":", "try", ":", "return", "config", ".", "get", "(", "section", ",", "name", ")", "except", "KeyError", ":", "continue", "return", "None" ]
Returns configuration value for a given [``section``] and ``name``. :param section: Section we want to retrieve value from :param name: Name of configuration we want to retrieve :param config_file: A path to file which should be used to retrieve configuration from (might also be a list of file paths)
[ "Returns", "configuration", "value", "for", "a", "given", "[", "section", "]", "and", "name", "." ]
python
train
DLR-RM/RAFCON
source/rafcon/gui/models/abstract_state.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/models/abstract_state.py#L574-L601
def _parse_for_element_meta_data(self, meta_data): """Load meta data for state elements The meta data of the state meta data file also contains the meta data for state elements (data ports, outcomes, etc). This method parses the loaded meta data for each state element model. The meta data of the elements is removed from the passed dictionary. :param meta_data: Dictionary of loaded meta data """ # print("_parse meta data", meta_data) for data_port_m in self.input_data_ports: self._copy_element_meta_data_from_meta_file_data(meta_data, data_port_m, "input_data_port", data_port_m.data_port.data_port_id) for data_port_m in self.output_data_ports: self._copy_element_meta_data_from_meta_file_data(meta_data, data_port_m, "output_data_port", data_port_m.data_port.data_port_id) for outcome_m in self.outcomes: self._copy_element_meta_data_from_meta_file_data(meta_data, outcome_m, "outcome", outcome_m.outcome.outcome_id) if "income" in meta_data: if "gui" in meta_data and "editor_gaphas" in meta_data["gui"] and \ "income" in meta_data["gui"]["editor_gaphas"]: # chain necessary to prevent key generation del meta_data["gui"]["editor_gaphas"]["income"] elif "gui" in meta_data and "editor_gaphas" in meta_data["gui"] and \ "income" in meta_data["gui"]["editor_gaphas"]: # chain necessary to prevent key generation in meta data meta_data["income"]["gui"]["editor_gaphas"] = meta_data["gui"]["editor_gaphas"]["income"] del meta_data["gui"]["editor_gaphas"]["income"] self._copy_element_meta_data_from_meta_file_data(meta_data, self.income, "income", "")
[ "def", "_parse_for_element_meta_data", "(", "self", ",", "meta_data", ")", ":", "# print(\"_parse meta data\", meta_data)", "for", "data_port_m", "in", "self", ".", "input_data_ports", ":", "self", ".", "_copy_element_meta_data_from_meta_file_data", "(", "meta_data", ",", "data_port_m", ",", "\"input_data_port\"", ",", "data_port_m", ".", "data_port", ".", "data_port_id", ")", "for", "data_port_m", "in", "self", ".", "output_data_ports", ":", "self", ".", "_copy_element_meta_data_from_meta_file_data", "(", "meta_data", ",", "data_port_m", ",", "\"output_data_port\"", ",", "data_port_m", ".", "data_port", ".", "data_port_id", ")", "for", "outcome_m", "in", "self", ".", "outcomes", ":", "self", ".", "_copy_element_meta_data_from_meta_file_data", "(", "meta_data", ",", "outcome_m", ",", "\"outcome\"", ",", "outcome_m", ".", "outcome", ".", "outcome_id", ")", "if", "\"income\"", "in", "meta_data", ":", "if", "\"gui\"", "in", "meta_data", "and", "\"editor_gaphas\"", "in", "meta_data", "[", "\"gui\"", "]", "and", "\"income\"", "in", "meta_data", "[", "\"gui\"", "]", "[", "\"editor_gaphas\"", "]", ":", "# chain necessary to prevent key generation", "del", "meta_data", "[", "\"gui\"", "]", "[", "\"editor_gaphas\"", "]", "[", "\"income\"", "]", "elif", "\"gui\"", "in", "meta_data", "and", "\"editor_gaphas\"", "in", "meta_data", "[", "\"gui\"", "]", "and", "\"income\"", "in", "meta_data", "[", "\"gui\"", "]", "[", "\"editor_gaphas\"", "]", ":", "# chain necessary to prevent key generation in meta data", "meta_data", "[", "\"income\"", "]", "[", "\"gui\"", "]", "[", "\"editor_gaphas\"", "]", "=", "meta_data", "[", "\"gui\"", "]", "[", "\"editor_gaphas\"", "]", "[", "\"income\"", "]", "del", "meta_data", "[", "\"gui\"", "]", "[", "\"editor_gaphas\"", "]", "[", "\"income\"", "]", "self", ".", "_copy_element_meta_data_from_meta_file_data", "(", "meta_data", ",", "self", ".", "income", ",", "\"income\"", ",", "\"\"", ")" ]
Load meta data for state elements The meta data of the state meta data file also contains the meta data for state elements (data ports, outcomes, etc). This method parses the loaded meta data for each state element model. The meta data of the elements is removed from the passed dictionary. :param meta_data: Dictionary of loaded meta data
[ "Load", "meta", "data", "for", "state", "elements" ]
python
train
DataDog/integrations-core
postgres/datadog_checks/postgres/postgres.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/postgres/datadog_checks/postgres/postgres.py#L385-L390
def _set_server_known(cls, host, port): """ Store the host/port combination for this server """ with PostgreSql._known_servers_lock: PostgreSql._known_servers.add((host, port))
[ "def", "_set_server_known", "(", "cls", ",", "host", ",", "port", ")", ":", "with", "PostgreSql", ".", "_known_servers_lock", ":", "PostgreSql", ".", "_known_servers", ".", "add", "(", "(", "host", ",", "port", ")", ")" ]
Store the host/port combination for this server
[ "Store", "the", "host", "/", "port", "combination", "for", "this", "server" ]
python
train
CityOfZion/neo-python
neo/Core/TX/StateTransaction.py
https://github.com/CityOfZion/neo-python/blob/fe90f62e123d720d4281c79af0598d9df9e776fb/neo/Core/TX/StateTransaction.py#L46-L58
def DeserializeExclusiveData(self, reader): """ Deserialize full object. Args: reader (neo.IO.BinaryReader): Raises: Exception: If the transaction type is incorrect or if there are no claims. """ self.Type = TransactionType.StateTransaction self.Descriptors = reader.ReadSerializableArray('neo.Core.State.StateDescriptor.StateDescriptor')
[ "def", "DeserializeExclusiveData", "(", "self", ",", "reader", ")", ":", "self", ".", "Type", "=", "TransactionType", ".", "StateTransaction", "self", ".", "Descriptors", "=", "reader", ".", "ReadSerializableArray", "(", "'neo.Core.State.StateDescriptor.StateDescriptor'", ")" ]
Deserialize full object. Args: reader (neo.IO.BinaryReader): Raises: Exception: If the transaction type is incorrect or if there are no claims.
[ "Deserialize", "full", "object", "." ]
python
train
facebook/pyre-check
sapp/sapp/pysa_taint_parser.py
https://github.com/facebook/pyre-check/blob/4a9604d943d28ef20238505a51acfb1f666328d7/sapp/sapp/pysa_taint_parser.py#L202-L204
def _parse_leaves(self, leaves) -> List[Tuple[str, int]]: """Returns a list of pairs (leaf_name, distance)""" return [(self._leaf_name(leaf), 0) for leaf in leaves]
[ "def", "_parse_leaves", "(", "self", ",", "leaves", ")", "->", "List", "[", "Tuple", "[", "str", ",", "int", "]", "]", ":", "return", "[", "(", "self", ".", "_leaf_name", "(", "leaf", ")", ",", "0", ")", "for", "leaf", "in", "leaves", "]" ]
Returns a list of pairs (leaf_name, distance)
[ "Returns", "a", "list", "of", "pairs", "(", "leaf_name", "distance", ")" ]
python
train
lsst-sqre/ltd-conveyor
ltdconveyor/s3/upload.py
https://github.com/lsst-sqre/ltd-conveyor/blob/c492937c4c1e050ccc4a0b9dcc38f9980d57e305/ltdconveyor/s3/upload.py#L356-L407
def list_dirnames_in_directory(self, dirname): """List all names of directories that exist at the root of this bucket directory. Note that *directories* don't exist in S3; rather directories are inferred from path names. Parameters ---------- dirname : `str` Directory name in the bucket relative to ``bucket_root``. Returns ------- dirnames : `list` List of directory names (`str`), relative to ``bucket_root/``, that exist at the root of ``dirname``. """ prefix = self._create_prefix(dirname) dirnames = [] for obj in self._bucket.objects.filter(Prefix=prefix): # get directory name of every object under this path prefix dirname = os.path.dirname(obj.key) # dirname is empty if the object happens to be the directory # redirect object object for the prefix directory (directory # redirect objects are named after directories and have metadata # that tells Fastly to redirect the browser to the index.html # contained in the directory). if dirname == '': dirname = obj.key + '/' # Strip out the path prefix from the directory name rel_dirname = os.path.relpath(dirname, start=prefix) # If there's only one part then this directory is at the root # relative to the prefix. We want this. dir_parts = rel_dirname.split('/') if len(dir_parts) == 1: dirnames.append(dir_parts[0]) # Above algorithm finds root directories for all *files* in sub # subdirectories; trim down to the unique set. dirnames = list(set(dirnames)) # Remove posix-like relative directory names that can appear # in the bucket listing. for filtered_dir in ('.', '..'): if filtered_dir in dirnames: dirnames.remove(filtered_dir) return dirnames
[ "def", "list_dirnames_in_directory", "(", "self", ",", "dirname", ")", ":", "prefix", "=", "self", ".", "_create_prefix", "(", "dirname", ")", "dirnames", "=", "[", "]", "for", "obj", "in", "self", ".", "_bucket", ".", "objects", ".", "filter", "(", "Prefix", "=", "prefix", ")", ":", "# get directory name of every object under this path prefix", "dirname", "=", "os", ".", "path", ".", "dirname", "(", "obj", ".", "key", ")", "# dirname is empty if the object happens to be the directory", "# redirect object object for the prefix directory (directory", "# redirect objects are named after directories and have metadata", "# that tells Fastly to redirect the browser to the index.html", "# contained in the directory).", "if", "dirname", "==", "''", ":", "dirname", "=", "obj", ".", "key", "+", "'/'", "# Strip out the path prefix from the directory name", "rel_dirname", "=", "os", ".", "path", ".", "relpath", "(", "dirname", ",", "start", "=", "prefix", ")", "# If there's only one part then this directory is at the root", "# relative to the prefix. We want this.", "dir_parts", "=", "rel_dirname", ".", "split", "(", "'/'", ")", "if", "len", "(", "dir_parts", ")", "==", "1", ":", "dirnames", ".", "append", "(", "dir_parts", "[", "0", "]", ")", "# Above algorithm finds root directories for all *files* in sub", "# subdirectories; trim down to the unique set.", "dirnames", "=", "list", "(", "set", "(", "dirnames", ")", ")", "# Remove posix-like relative directory names that can appear", "# in the bucket listing.", "for", "filtered_dir", "in", "(", "'.'", ",", "'..'", ")", ":", "if", "filtered_dir", "in", "dirnames", ":", "dirnames", ".", "remove", "(", "filtered_dir", ")", "return", "dirnames" ]
List all names of directories that exist at the root of this bucket directory. Note that *directories* don't exist in S3; rather directories are inferred from path names. Parameters ---------- dirname : `str` Directory name in the bucket relative to ``bucket_root``. Returns ------- dirnames : `list` List of directory names (`str`), relative to ``bucket_root/``, that exist at the root of ``dirname``.
[ "List", "all", "names", "of", "directories", "that", "exist", "at", "the", "root", "of", "this", "bucket", "directory", "." ]
python
test
kako-nawao/django-group-by
django_group_by/mixin.py
https://github.com/kako-nawao/django-group-by/blob/0d901513620acebc736722b040cff83d6483789a/django_group_by/mixin.py#L23-L74
def _expand_group_by_fields(cls, model, fields): """ Expand FK fields into all related object's fields to avoid future lookups. :param fields: fields to "group by" :return: expanded fields """ # Containers for resulting fields and related model fields res = [] related = {} # Add own fields and populate related fields for field_name in fields: if '__' in field_name: # Related model field: append to related model's fields fk_field_name, related_field = field_name.split('__', 1) if fk_field_name not in related: related[fk_field_name] = [related_field] else: related[fk_field_name].append(related_field) else: # Simple field, get the field instance model_field = model._meta.get_field(field_name) if isinstance(model_field, (ForeignKey, ManyToManyField)): # It's a related field, get model related_model = model_field.related_model # Append all its fields with the correct prefix res.extend('{}__{}'.format(field_name, f.column) for f in related_model._meta.fields) else: # It's a common field, just append it res.append(field_name) # Resolve all related fields for fk_field_name, field_names in related.items(): # Get field fk = model._meta.get_field(fk_field_name) # Get all fields for that related model related_fields = cls._expand_group_by_fields(fk.related_model, field_names) # Append them with the correct prefix res.extend('{}__{}'.format(fk_field_name, f) for f in related_fields) # Return all fields return res
[ "def", "_expand_group_by_fields", "(", "cls", ",", "model", ",", "fields", ")", ":", "# Containers for resulting fields and related model fields", "res", "=", "[", "]", "related", "=", "{", "}", "# Add own fields and populate related fields", "for", "field_name", "in", "fields", ":", "if", "'__'", "in", "field_name", ":", "# Related model field: append to related model's fields", "fk_field_name", ",", "related_field", "=", "field_name", ".", "split", "(", "'__'", ",", "1", ")", "if", "fk_field_name", "not", "in", "related", ":", "related", "[", "fk_field_name", "]", "=", "[", "related_field", "]", "else", ":", "related", "[", "fk_field_name", "]", ".", "append", "(", "related_field", ")", "else", ":", "# Simple field, get the field instance", "model_field", "=", "model", ".", "_meta", ".", "get_field", "(", "field_name", ")", "if", "isinstance", "(", "model_field", ",", "(", "ForeignKey", ",", "ManyToManyField", ")", ")", ":", "# It's a related field, get model", "related_model", "=", "model_field", ".", "related_model", "# Append all its fields with the correct prefix", "res", ".", "extend", "(", "'{}__{}'", ".", "format", "(", "field_name", ",", "f", ".", "column", ")", "for", "f", "in", "related_model", ".", "_meta", ".", "fields", ")", "else", ":", "# It's a common field, just append it", "res", ".", "append", "(", "field_name", ")", "# Resolve all related fields", "for", "fk_field_name", ",", "field_names", "in", "related", ".", "items", "(", ")", ":", "# Get field", "fk", "=", "model", ".", "_meta", ".", "get_field", "(", "fk_field_name", ")", "# Get all fields for that related model", "related_fields", "=", "cls", ".", "_expand_group_by_fields", "(", "fk", ".", "related_model", ",", "field_names", ")", "# Append them with the correct prefix", "res", ".", "extend", "(", "'{}__{}'", ".", "format", "(", "fk_field_name", ",", "f", ")", "for", "f", "in", "related_fields", ")", "# Return all fields", "return", "res" ]
Expand FK fields into all related object's fields to avoid future lookups. :param fields: fields to "group by" :return: expanded fields
[ "Expand", "FK", "fields", "into", "all", "related", "object", "s", "fields", "to", "avoid", "future", "lookups", "." ]
python
train
noxdafox/pebble
pebble/pool/process.py
https://github.com/noxdafox/pebble/blob/d8f3d989655715754f0a65d7419cfa584491f614/pebble/pool/process.py#L328-L340
def inspect_workers(self): """Updates the workers status. Returns the workers which have unexpectedly ended. """ workers = tuple(self.workers.values()) expired = tuple(w for w in workers if not w.is_alive()) for worker in expired: self.workers.pop(worker.pid) return ((w.pid, w.exitcode) for w in expired if w.exitcode != 0)
[ "def", "inspect_workers", "(", "self", ")", ":", "workers", "=", "tuple", "(", "self", ".", "workers", ".", "values", "(", ")", ")", "expired", "=", "tuple", "(", "w", "for", "w", "in", "workers", "if", "not", "w", ".", "is_alive", "(", ")", ")", "for", "worker", "in", "expired", ":", "self", ".", "workers", ".", "pop", "(", "worker", ".", "pid", ")", "return", "(", "(", "w", ".", "pid", ",", "w", ".", "exitcode", ")", "for", "w", "in", "expired", "if", "w", ".", "exitcode", "!=", "0", ")" ]
Updates the workers status. Returns the workers which have unexpectedly ended.
[ "Updates", "the", "workers", "status", "." ]
python
train
dagwieers/vmguestlib
vmguestlib.py
https://github.com/dagwieers/vmguestlib/blob/2ba9333a745628cf9e6b4c767427a5bd997a71ad/vmguestlib.py#L385-L391
def GetMemSharedMB(self): '''Retrieves the amount of physical memory associated with this virtual machine that is copy-on-write (COW) shared on the host.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemSharedMB(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
[ "def", "GetMemSharedMB", "(", "self", ")", ":", "counter", "=", "c_uint", "(", ")", "ret", "=", "vmGuestLib", ".", "VMGuestLib_GetMemSharedMB", "(", "self", ".", "handle", ".", "value", ",", "byref", "(", "counter", ")", ")", "if", "ret", "!=", "VMGUESTLIB_ERROR_SUCCESS", ":", "raise", "VMGuestLibException", "(", "ret", ")", "return", "counter", ".", "value" ]
Retrieves the amount of physical memory associated with this virtual machine that is copy-on-write (COW) shared on the host.
[ "Retrieves", "the", "amount", "of", "physical", "memory", "associated", "with", "this", "virtual", "machine", "that", "is", "copy", "-", "on", "-", "write", "(", "COW", ")", "shared", "on", "the", "host", "." ]
python
train
chrislit/abydos
abydos/fingerprint/_phonetic.py
https://github.com/chrislit/abydos/blob/165466b3ff6afd8024a4c8660421b0c4e7773db9/abydos/fingerprint/_phonetic.py#L49-L97
def fingerprint( self, phrase, phonetic_algorithm=double_metaphone, joiner=' ', *args, **kwargs ): """Return the phonetic fingerprint of a phrase. Parameters ---------- phrase : str The string from which to calculate the phonetic fingerprint phonetic_algorithm : function A phonetic algorithm that takes a string and returns a string (presumably a phonetic representation of the original string). By default, this function uses :py:func:`.double_metaphone`. joiner : str The string that will be placed between each word *args Variable length argument list **kwargs Arbitrary keyword arguments Returns ------- str The phonetic fingerprint of the phrase Examples -------- >>> pf = Phonetic() >>> pf.fingerprint('The quick brown fox jumped over the lazy dog.') '0 afr fks jmpt kk ls prn tk' >>> from abydos.phonetic import soundex >>> pf.fingerprint('The quick brown fox jumped over the lazy dog.', ... phonetic_algorithm=soundex) 'b650 d200 f200 j513 l200 o160 q200 t000' """ phonetic = '' for word in phrase.split(): word = phonetic_algorithm(word, *args, **kwargs) if not isinstance(word, text_type) and hasattr(word, '__iter__'): word = word[0] phonetic += word + joiner phonetic = phonetic[: -len(joiner)] return super(self.__class__, self).fingerprint(phonetic)
[ "def", "fingerprint", "(", "self", ",", "phrase", ",", "phonetic_algorithm", "=", "double_metaphone", ",", "joiner", "=", "' '", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "phonetic", "=", "''", "for", "word", "in", "phrase", ".", "split", "(", ")", ":", "word", "=", "phonetic_algorithm", "(", "word", ",", "*", "args", ",", "*", "*", "kwargs", ")", "if", "not", "isinstance", "(", "word", ",", "text_type", ")", "and", "hasattr", "(", "word", ",", "'__iter__'", ")", ":", "word", "=", "word", "[", "0", "]", "phonetic", "+=", "word", "+", "joiner", "phonetic", "=", "phonetic", "[", ":", "-", "len", "(", "joiner", ")", "]", "return", "super", "(", "self", ".", "__class__", ",", "self", ")", ".", "fingerprint", "(", "phonetic", ")" ]
Return the phonetic fingerprint of a phrase. Parameters ---------- phrase : str The string from which to calculate the phonetic fingerprint phonetic_algorithm : function A phonetic algorithm that takes a string and returns a string (presumably a phonetic representation of the original string). By default, this function uses :py:func:`.double_metaphone`. joiner : str The string that will be placed between each word *args Variable length argument list **kwargs Arbitrary keyword arguments Returns ------- str The phonetic fingerprint of the phrase Examples -------- >>> pf = Phonetic() >>> pf.fingerprint('The quick brown fox jumped over the lazy dog.') '0 afr fks jmpt kk ls prn tk' >>> from abydos.phonetic import soundex >>> pf.fingerprint('The quick brown fox jumped over the lazy dog.', ... phonetic_algorithm=soundex) 'b650 d200 f200 j513 l200 o160 q200 t000'
[ "Return", "the", "phonetic", "fingerprint", "of", "a", "phrase", "." ]
python
valid
kieferk/dfply
dfply/reshape.py
https://github.com/kieferk/dfply/blob/6a858f066602735a90f8b6b85106bc39ceadc282/dfply/reshape.py#L46-L58
def rename(df, **kwargs): """Renames columns, where keyword argument values are the current names of columns and keys are the new names. Args: df (:obj:`pandas.DataFrame`): DataFrame passed in via `>>` pipe. Kwargs: **kwargs: key:value pairs where keys are new names for columns and values are current names of columns. """ return df.rename(columns={v: k for k, v in kwargs.items()})
[ "def", "rename", "(", "df", ",", "*", "*", "kwargs", ")", ":", "return", "df", ".", "rename", "(", "columns", "=", "{", "v", ":", "k", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", "}", ")" ]
Renames columns, where keyword argument values are the current names of columns and keys are the new names. Args: df (:obj:`pandas.DataFrame`): DataFrame passed in via `>>` pipe. Kwargs: **kwargs: key:value pairs where keys are new names for columns and values are current names of columns.
[ "Renames", "columns", "where", "keyword", "argument", "values", "are", "the", "current", "names", "of", "columns", "and", "keys", "are", "the", "new", "names", "." ]
python
train
hollenstein/maspy
maspy/core.py
https://github.com/hollenstein/maspy/blob/f15fcfd24df306d8420540460d902aa3073ec133/maspy/core.py#L935-L953
def jsonHook(encoded): """Custom JSON decoder that allows construction of a new ``Smi`` instance from a decoded JSON object. :param encoded: a JSON decoded object literal (a dict) :returns: "encoded" or one of the these objects: :class:`Smi`, :class:`MzmlScan`, :class:`MzmlProduct`, :class:`MzmlPrecursor` """ if '__Smi__' in encoded: return Smi._fromJSON(encoded['__Smi__']) elif '__MzmlScan__' in encoded: return MzmlScan._fromJSON(encoded['__MzmlScan__']) elif '__MzmlProduct__' in encoded: return MzmlProduct._fromJSON(encoded['__MzmlProduct__']) elif '__MzmlPrecursor__' in encoded: return MzmlPrecursor._fromJSON(encoded['__MzmlPrecursor__']) else: return encoded
[ "def", "jsonHook", "(", "encoded", ")", ":", "if", "'__Smi__'", "in", "encoded", ":", "return", "Smi", ".", "_fromJSON", "(", "encoded", "[", "'__Smi__'", "]", ")", "elif", "'__MzmlScan__'", "in", "encoded", ":", "return", "MzmlScan", ".", "_fromJSON", "(", "encoded", "[", "'__MzmlScan__'", "]", ")", "elif", "'__MzmlProduct__'", "in", "encoded", ":", "return", "MzmlProduct", ".", "_fromJSON", "(", "encoded", "[", "'__MzmlProduct__'", "]", ")", "elif", "'__MzmlPrecursor__'", "in", "encoded", ":", "return", "MzmlPrecursor", ".", "_fromJSON", "(", "encoded", "[", "'__MzmlPrecursor__'", "]", ")", "else", ":", "return", "encoded" ]
Custom JSON decoder that allows construction of a new ``Smi`` instance from a decoded JSON object. :param encoded: a JSON decoded object literal (a dict) :returns: "encoded" or one of the these objects: :class:`Smi`, :class:`MzmlScan`, :class:`MzmlProduct`, :class:`MzmlPrecursor`
[ "Custom", "JSON", "decoder", "that", "allows", "construction", "of", "a", "new", "Smi", "instance", "from", "a", "decoded", "JSON", "object", "." ]
python
train
dropbox/pyannotate
pyannotate_runtime/collect_types.py
https://github.com/dropbox/pyannotate/blob/d128c76b8a86f208e5c78716f2a917003650cebc/pyannotate_runtime/collect_types.py#L965-L978
def init_types_collection(filter_filename=default_filter_filename): # type: (Callable[[Optional[str]], Optional[str]]) -> None """ Setup profiler hooks to enable type collection. Call this one time from the main thread. The optional argument is a filter that maps a filename (from code.co_filename) to either a normalized filename or None. For the default filter see default_filter_filename(). """ global _filter_filename _filter_filename = filter_filename sys.setprofile(_trace_dispatch) threading.setprofile(_trace_dispatch)
[ "def", "init_types_collection", "(", "filter_filename", "=", "default_filter_filename", ")", ":", "# type: (Callable[[Optional[str]], Optional[str]]) -> None", "global", "_filter_filename", "_filter_filename", "=", "filter_filename", "sys", ".", "setprofile", "(", "_trace_dispatch", ")", "threading", ".", "setprofile", "(", "_trace_dispatch", ")" ]
Setup profiler hooks to enable type collection. Call this one time from the main thread. The optional argument is a filter that maps a filename (from code.co_filename) to either a normalized filename or None. For the default filter see default_filter_filename().
[ "Setup", "profiler", "hooks", "to", "enable", "type", "collection", ".", "Call", "this", "one", "time", "from", "the", "main", "thread", "." ]
python
train
tomplus/kubernetes_asyncio
kubernetes_asyncio/client/api/settings_v1alpha1_api.py
https://github.com/tomplus/kubernetes_asyncio/blob/f9ab15317ec921409714c7afef11aeb0f579985d/kubernetes_asyncio/client/api/settings_v1alpha1_api.py#L499-L528
def list_namespaced_pod_preset(self, namespace, **kwargs): # noqa: E501 """list_namespaced_pod_preset # noqa: E501 list or watch objects of kind PodPreset # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_namespaced_pod_preset(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1alpha1PodPresetList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_namespaced_pod_preset_with_http_info(namespace, **kwargs) # noqa: E501 else: (data) = self.list_namespaced_pod_preset_with_http_info(namespace, **kwargs) # noqa: E501 return data
[ "def", "list_namespaced_pod_preset", "(", "self", ",", "namespace", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "list_namespaced_pod_preset_with_http_info", "(", "namespace", ",", "*", "*", "kwargs", ")", "# noqa: E501", "else", ":", "(", "data", ")", "=", "self", ".", "list_namespaced_pod_preset_with_http_info", "(", "namespace", ",", "*", "*", "kwargs", ")", "# noqa: E501", "return", "data" ]
list_namespaced_pod_preset # noqa: E501 list or watch objects of kind PodPreset # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_namespaced_pod_preset(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1alpha1PodPresetList If the method is called asynchronously, returns the request thread.
[ "list_namespaced_pod_preset", "#", "noqa", ":", "E501" ]
python
train
joopert/nad_receiver
nad_receiver/__init__.py
https://github.com/joopert/nad_receiver/blob/416de0173a330c75cc73f9c90b0c5df32e5e0ba3/nad_receiver/__init__.py#L183-L213
def _send(self, message, read_reply=False): """Send a command string to the amplifier.""" sock = None for tries in range(0, 3): try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((self._host, self.PORT)) break except (ConnectionError, BrokenPipeError): if tries == 3: print("socket connect failed.") return sleep(0.1) sock.send(codecs.decode(message, 'hex_codec')) if read_reply: sleep(0.1) reply = '' tries = 0 max_tries = 20 while len(reply) < len(message) and tries < max_tries: try: reply += codecs.encode(sock.recv(self.BUFFERSIZE), 'hex')\ .decode("utf-8") except (ConnectionError, BrokenPipeError): pass tries += 1 sock.close() if tries >= max_tries: return return reply sock.close()
[ "def", "_send", "(", "self", ",", "message", ",", "read_reply", "=", "False", ")", ":", "sock", "=", "None", "for", "tries", "in", "range", "(", "0", ",", "3", ")", ":", "try", ":", "sock", "=", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_STREAM", ")", "sock", ".", "connect", "(", "(", "self", ".", "_host", ",", "self", ".", "PORT", ")", ")", "break", "except", "(", "ConnectionError", ",", "BrokenPipeError", ")", ":", "if", "tries", "==", "3", ":", "print", "(", "\"socket connect failed.\"", ")", "return", "sleep", "(", "0.1", ")", "sock", ".", "send", "(", "codecs", ".", "decode", "(", "message", ",", "'hex_codec'", ")", ")", "if", "read_reply", ":", "sleep", "(", "0.1", ")", "reply", "=", "''", "tries", "=", "0", "max_tries", "=", "20", "while", "len", "(", "reply", ")", "<", "len", "(", "message", ")", "and", "tries", "<", "max_tries", ":", "try", ":", "reply", "+=", "codecs", ".", "encode", "(", "sock", ".", "recv", "(", "self", ".", "BUFFERSIZE", ")", ",", "'hex'", ")", ".", "decode", "(", "\"utf-8\"", ")", "except", "(", "ConnectionError", ",", "BrokenPipeError", ")", ":", "pass", "tries", "+=", "1", "sock", ".", "close", "(", ")", "if", "tries", ">=", "max_tries", ":", "return", "return", "reply", "sock", ".", "close", "(", ")" ]
Send a command string to the amplifier.
[ "Send", "a", "command", "string", "to", "the", "amplifier", "." ]
python
test
flashingpumpkin/django-socialregistration
socialregistration/views.py
https://github.com/flashingpumpkin/django-socialregistration/blob/9da9fb83c9bf79997ff81fe1378ab5ca3074b32b/socialregistration/views.py#L85-L115
def generate_username_and_redirect(self, request, user, profile, client): """ Generate a username and then redirect the user to the correct place. This method is called when ``SOCIALREGISTRATION_GENERATE_USERNAME`` is set. :param request: The current request object :param user: The unsaved user object :param profile: The unsaved profile object :param client: The API client """ func = self.get_username_function() user.username = func(user, profile, client) user.set_unusable_password() user.save() profile.user = user profile.save() user = profile.authenticate() self.send_connect_signal(request, user, profile, client) self.login(request, user) self.send_login_signal(request, user, profile, client) self.delete_session_data(request) return HttpResponseRedirect(self.get_next(request))
[ "def", "generate_username_and_redirect", "(", "self", ",", "request", ",", "user", ",", "profile", ",", "client", ")", ":", "func", "=", "self", ".", "get_username_function", "(", ")", "user", ".", "username", "=", "func", "(", "user", ",", "profile", ",", "client", ")", "user", ".", "set_unusable_password", "(", ")", "user", ".", "save", "(", ")", "profile", ".", "user", "=", "user", "profile", ".", "save", "(", ")", "user", "=", "profile", ".", "authenticate", "(", ")", "self", ".", "send_connect_signal", "(", "request", ",", "user", ",", "profile", ",", "client", ")", "self", ".", "login", "(", "request", ",", "user", ")", "self", ".", "send_login_signal", "(", "request", ",", "user", ",", "profile", ",", "client", ")", "self", ".", "delete_session_data", "(", "request", ")", "return", "HttpResponseRedirect", "(", "self", ".", "get_next", "(", "request", ")", ")" ]
Generate a username and then redirect the user to the correct place. This method is called when ``SOCIALREGISTRATION_GENERATE_USERNAME`` is set. :param request: The current request object :param user: The unsaved user object :param profile: The unsaved profile object :param client: The API client
[ "Generate", "a", "username", "and", "then", "redirect", "the", "user", "to", "the", "correct", "place", ".", "This", "method", "is", "called", "when", "SOCIALREGISTRATION_GENERATE_USERNAME", "is", "set", "." ]
python
train
edeposit/edeposit.amqp.ftp
src/edeposit/amqp/ftp/api.py
https://github.com/edeposit/edeposit.amqp.ftp/blob/fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71/src/edeposit/amqp/ftp/api.py#L177-L197
def change_password(username, new_password): """ Change password for given `username`. Args: username (str): User's name. new_password (str): User's new password. """ assert username in passwd_reader.load_users(),\ "Username '%s' not found!" % username sh.ftpasswd( "--change-password", passwd=True, # passwd file, not group file name=username, stdin=True, # tell ftpasswd to read password from stdin file=settings.LOGIN_FILE, _in=new_password ) reload_configuration()
[ "def", "change_password", "(", "username", ",", "new_password", ")", ":", "assert", "username", "in", "passwd_reader", ".", "load_users", "(", ")", ",", "\"Username '%s' not found!\"", "%", "username", "sh", ".", "ftpasswd", "(", "\"--change-password\"", ",", "passwd", "=", "True", ",", "# passwd file, not group file", "name", "=", "username", ",", "stdin", "=", "True", ",", "# tell ftpasswd to read password from stdin", "file", "=", "settings", ".", "LOGIN_FILE", ",", "_in", "=", "new_password", ")", "reload_configuration", "(", ")" ]
Change password for given `username`. Args: username (str): User's name. new_password (str): User's new password.
[ "Change", "password", "for", "given", "username", "." ]
python
train
pydata/xarray
xarray/convert.py
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/convert.py#L125-L135
def _get_iris_args(attrs): """ Converts the xarray attrs into args that can be passed into Iris """ # iris.unit is deprecated in Iris v1.9 import cf_units args = {'attributes': _filter_attrs(attrs, iris_forbidden_keys)} args.update(_pick_attrs(attrs, ('standard_name', 'long_name',))) unit_args = _pick_attrs(attrs, ('calendar',)) if 'units' in attrs: args['units'] = cf_units.Unit(attrs['units'], **unit_args) return args
[ "def", "_get_iris_args", "(", "attrs", ")", ":", "# iris.unit is deprecated in Iris v1.9", "import", "cf_units", "args", "=", "{", "'attributes'", ":", "_filter_attrs", "(", "attrs", ",", "iris_forbidden_keys", ")", "}", "args", ".", "update", "(", "_pick_attrs", "(", "attrs", ",", "(", "'standard_name'", ",", "'long_name'", ",", ")", ")", ")", "unit_args", "=", "_pick_attrs", "(", "attrs", ",", "(", "'calendar'", ",", ")", ")", "if", "'units'", "in", "attrs", ":", "args", "[", "'units'", "]", "=", "cf_units", ".", "Unit", "(", "attrs", "[", "'units'", "]", ",", "*", "*", "unit_args", ")", "return", "args" ]
Converts the xarray attrs into args that can be passed into Iris
[ "Converts", "the", "xarray", "attrs", "into", "args", "that", "can", "be", "passed", "into", "Iris" ]
python
train
hubo1016/namedstruct
misc/openflow/nicira_ext.py
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/misc/openflow/nicira_ext.py#L1130-L3077
def create_extension(namespace, nicira_header, nx_action, nx_stats_request, nx_stats_reply, msg_subtype, action_subtype, stats_subtype): ''' /* This command enables or disables an Open vSwitch extension that allows a * controller to specify the OpenFlow table to which a flow should be added, * instead of having the switch decide which table is most appropriate as * required by OpenFlow 1.0. Because NXM was designed as an extension to * OpenFlow 1.0, the extension applies equally to ofp10_flow_mod and * nx_flow_mod. By default, the extension is disabled. * * When this feature is enabled, Open vSwitch treats struct ofp10_flow_mod's * and struct nx_flow_mod's 16-bit 'command' member as two separate fields. * The upper 8 bits are used as the table ID, the lower 8 bits specify the * command as usual. A table ID of 0xff is treated like a wildcarded table ID. * * The specific treatment of the table ID depends on the type of flow mod: * * - OFPFC_ADD: Given a specific table ID, the flow is always placed in that * table. If an identical flow already exists in that table only, then it * is replaced. If the flow cannot be placed in the specified table, * either because the table is full or because the table cannot support * flows of the given type, the switch replies with an OFPFMFC_TABLE_FULL * error. (A controller can distinguish these cases by comparing the * current and maximum number of entries reported in ofp_table_stats.) * * If the table ID is wildcarded, the switch picks an appropriate table * itself. If an identical flow already exist in the selected flow table, * then it is replaced. The choice of table might depend on the flows * that are already in the switch; for example, if one table fills up then * the switch might fall back to another one. * * - OFPFC_MODIFY, OFPFC_DELETE: Given a specific table ID, only flows * within that table are matched and modified or deleted. If the table ID * is wildcarded, flows within any table may be matched and modified or * deleted. * * - OFPFC_MODIFY_STRICT, OFPFC_DELETE_STRICT: Given a specific table ID, * only a flow within that table may be matched and modified or deleted. * If the table ID is wildcarded and exactly one flow within any table * matches, then it is modified or deleted; if flows in more than one * table match, then none is modified or deleted. */ ''' with _warnings.catch_warnings(): _warnings.filterwarnings('ignore', '^padding', StructDefWarning) nx_flow_mod_table_id = nstruct( (uint8, 'set'), # /* Nonzero to enable, zero to disable. */ (uint8[7],), name = 'nx_flow_mod_table_id', base = nicira_header, criteria = lambda x: getattr(x, msg_subtype) == NXT_FLOW_MOD_TABLE_ID, classifyby = (NXT_FLOW_MOD_TABLE_ID,), init = packvalue(NXT_FLOW_MOD_TABLE_ID, msg_subtype) ) namespace['nx_flow_mod_table_id'] = nx_flow_mod_table_id ''' /* NXT_SET_PACKET_IN_FORMAT request. */ ''' nx_set_packet_in_format = nstruct( (uint32, 'format'), # /* One of NXPIF_*. */ name = 'nx_set_packet_in_format', base = nicira_header, criteria = lambda x: getattr(x, msg_subtype) == NXT_SET_PACKET_IN_FORMAT, classifyby = (NXT_SET_PACKET_IN_FORMAT,), init = packvalue(NXT_SET_PACKET_IN_FORMAT, msg_subtype) ) namespace['nx_set_packet_in_format'] = nx_set_packet_in_format ''' /* NXT_PACKET_IN (analogous to OFPT_PACKET_IN). * * NXT_PACKET_IN is similar to the OpenFlow 1.2 OFPT_PACKET_IN. The * differences are: * * - NXT_PACKET_IN includes the cookie of the rule that triggered the * message. (OpenFlow 1.3 OFPT_PACKET_IN also includes the cookie.) * * - The metadata fields use NXM (instead of OXM) field numbers. * * Open vSwitch 1.9.0 and later omits metadata fields that are zero (as allowed * by OpenFlow 1.2). Earlier versions included all implemented metadata * fields. * * Open vSwitch does not include non-metadata in the nx_match, because by * definition that information can be found in the packet itself. The format * and the standards allow this, however, so controllers should be prepared to * tolerate future changes. * * The NXM format is convenient for reporting metadata values, but it is * important not to interpret the format as matching against a flow, because it * does not. Nothing is being matched; arbitrary metadata masks would not be * meaningful. * * Whereas in most cases a controller can expect to only get back NXM fields * that it set up itself (e.g. flow dumps will ordinarily report only NXM * fields from flows that the controller added), NXT_PACKET_IN messages might * contain fields that the controller does not understand, because the switch * might support fields (new registers, new protocols, etc.) that the * controller does not. The controller must prepared to tolerate these. * * The 'cookie' field has no meaning when 'reason' is OFPR_NO_MATCH. In this * case it should be UINT64_MAX. */ ''' if 'ofp_oxm' in namespace: nx_match = namespace['ofp_oxm'] namespace['nx_match'] = nx_match nx_match_mask = namespace['ofp_oxm_mask'] namespace['nx_match_mask'] = nx_match_mask nx_match_nomask = namespace['ofp_oxm_nomask'] namespace['nx_match_nomask'] = nx_match_nomask create_nxm = namespace['create_oxm'] namespace['create_nxm'] = create_nxm nx_match_nomask_ext = nstruct( base = nx_match_nomask, criteria = lambda x: NXM_VENDOR(x.header) <= 1, extend = {'header': nxm_header}, name = 'nx_match_nomask_ext' ) namespace['nx_match_nomask_ext'] = nx_match_nomask_ext nx_match_mask_ext = nstruct( base = nx_match_mask, criteria = lambda x: NXM_VENDOR(x.header) <= 1, extend = {'header': nxm_header}, name = 'nx_match_mask_ext' ) namespace['nx_match_mask_ext'] = nx_match_mask_ext else: nx_match = nstruct( (nxm_header, 'header'), name = 'nx_match', padding = 1, size = lambda x: NXM_LENGTH(x.header) + 4 ) namespace['nx_match'] = nx_match nx_match_nomask = nstruct( (raw, 'value'), base = nx_match, criteria = lambda x: not NXM_HASMASK(x.header), init = packvalue(NXM_OF_IN_PORT, 'header'), name = 'nx_match_nomask' ) namespace['nx_match_nomask'] = nx_match_nomask _nxm_mask_value = nstruct( (raw, 'value'), name = 'nxm_mask_value', size = lambda x: NXM_LENGTH(x.header) // 2, padding = 1 ) nx_match_mask = nstruct( (_nxm_mask_value,), (raw, 'mask'), base = nx_match, criteria = lambda x: NXM_HASMASK(x.header), init = packvalue(NXM_OF_ETH_SRC_W, 'header'), name = 'nx_match_mask', ) namespace['nx_match_mask'] = nx_match_mask def create_nxm(header, value = None, mask = None): if NXM_HASMASK(header): nxm = nx_match_mask.new() size = NXM_LENGTH(header) // 2 else: nxm = nx_match_nomask.new() size = NXM_LENGTH(header) nxm.header = header nxm.value = common.create_binary(value, size) if NXM_HASMASK(header): nxm.mask = common.create_binary(mask, size) nxm._pack() nxm._autosubclass() return nxm namespace['create_nxm'] = create_nxm nx_match_nomask_ext = nx_match_nomask nx_match_mask_ext = nx_match_mask namespace['nx_match_nomask_ext'] = nx_match_nomask_ext namespace['nx_match_mask_ext'] = nx_match_mask_ext from namedstruct.namedstruct import rawtype as _rawtype import socket as _socket if 'ip4_addr_bytes' in namespace: ip4_addr_bytes = namespace['ip4_addr_bytes'] else: ip4_addr_bytes = prim('4s', 'ip4_addr_bytes') ip4_addr_bytes.formatter = lambda x: _socket.inet_ntoa(x) namespace['ip4_addr_bytes'] = ip4_addr_bytes nxm_mask_ipv4 = nstruct(name = 'nxm_mask_ipv4', base = nx_match_mask_ext, criteria = lambda x: x.header in (NXM_OF_IP_SRC_W, NXM_OF_IP_DST_W, NXM_OF_ARP_SPA_W, NXM_OF_ARP_TPA_W, NXM_NX_TUN_IPV4_SRC_W, NXM_NX_TUN_IPV4_DST_W), init = packvalue(NXM_OF_IP_SRC_W, 'header'), extend = {'value' : ip4_addr_bytes, 'mask' : ip4_addr_bytes} ) namespace['nxm_mask_ipv4'] = nxm_mask_ipv4 nxm_nomask_ipv4 = nstruct(name = 'nxm_nomask_ipv4', base = nx_match_nomask_ext, criteria = lambda x: x.header in (NXM_OF_IP_SRC, NXM_OF_IP_DST, NXM_OF_ARP_SPA, NXM_OF_ARP_TPA, NXM_NX_TUN_IPV4_SRC, NXM_NX_TUN_IPV4_DST), init = packvalue(NXM_OF_IP_SRC, 'header'), extend = {'value' : ip4_addr_bytes} ) namespace['nxm_nomask_ipv4'] = nxm_nomask_ipv4 if 'mac_addr_bytes' in namespace: mac_addr_bytes = namespace['mac_addr_bytes'] else: mac_addr_bytes = _rawtype() mac_addr_bytes.formatter = lambda x: ':'.join('%02X' % (c,) for c in bytearray(x)) namespace['mac_addr_bytes'] = mac_addr_bytes nxm_mask_eth = nstruct(name = 'nxm_mask_eth', base = nx_match_mask_ext, criteria = lambda x: x.header in (NXM_OF_ETH_SRC_W, NXM_OF_ETH_DST_W), init = packvalue(NXM_OF_ETH_SRC_W, 'header'), extend = {'value' : mac_addr_bytes, 'mask' : mac_addr_bytes}) namespace['nxm_mask_eth'] = nxm_mask_eth nxm_nomask_eth = nstruct(name = 'nxm_nomask_eth', base = nx_match_nomask_ext, criteria = lambda x: x.header in (NXM_OF_ETH_SRC, NXM_OF_ETH_DST, NXM_NX_ND_SLL, NXM_NX_ND_TLL, NXM_NX_ARP_SHA, NXM_NX_ARP_THA), init = packvalue(NXM_OF_ETH_SRC, 'header'), extend = {'value' : mac_addr_bytes}) namespace['nxm_nomask_eth'] = nxm_nomask_eth ofp_port_no = namespace['ofp_port_no'] nx_port_no = enum('nx_port_no', None, uint16, **dict((k, v & 0xffff) for k,v in ofp_port_no.getDict().items()) ) nxm_port_no_raw = _rawtype() nxm_port_no_raw.formatter = lambda x: nx_port_no.formatter(nx_port_no.parse(x)[0]) namespace['nx_port_no'] = nx_port_no namespace['nxm_port_no_raw'] = nxm_port_no_raw nxm_nomask_port = nstruct(name = 'nxm_nomask_port', base = nx_match_nomask_ext, criteria = lambda x: x.header == NXM_OF_IN_PORT, init = packvalue(NXM_OF_IN_PORT, 'header'), extend = {'value': nxm_port_no_raw} ) namespace['nxm_nomask_port'] = nxm_nomask_port if 'ethtype_raw' in namespace: ethtype_raw = namespace['ethtype_raw'] else: ethtype_raw = _rawtype() ethtype_raw.formatter = lambda x: ethertype.formatter(ethertype.parse(x)[0]) namespace['ethtype_raw'] = ethtype_raw nxm_nomask_ethertype = nstruct(name = 'nxm_nomask_ethertype', base = nx_match_nomask_ext, criteria = lambda x: x.header == NXM_OF_ETH_TYPE, init = packvalue(NXM_OF_ETH_TYPE, 'header'), extend = {'value': ethtype_raw}) namespace['nxm_nomask_ethertype'] = nxm_nomask_ethertype if 'arpop_raw' in namespace: arpop_raw = namespace['arpop_raw'] else: arpop_raw = _rawtype() arpop_raw.formatter = lambda x: arp_op_code.formatter(arp_op_code.parse(x)[0]) namespace['arpop_raw'] = arpop_raw nxm_nomask_arpopcode = nstruct(name = 'nxm_nomask_arpopcode', base = nx_match_nomask_ext, criteria = lambda x: x.header == NXM_OF_ARP_OP, init = packvalue(NXM_OF_ARP_OP, 'header'), extend = {'value': arpop_raw}) namespace['nxm_nomask_arpopcode'] = nxm_nomask_arpopcode if 'ip_protocol_raw' in namespace: ip_protocol_raw = namespace['ip_protocol_raw'] else: ip_protocol_raw = _rawtype() ip_protocol_raw.formatter = lambda x: ip_protocol.formatter(ip_protocol.parse(x)[0]) namespace['ip_protocol_raw'] = ip_protocol_raw nxm_nomask_ip_protocol = nstruct(name = 'nxm_nomask_ip_protocol', base = nx_match_nomask_ext, criteria = lambda x: x.header == NXM_OF_IP_PROTO, init = packvalue(NXM_OF_IP_PROTO, 'header'), extend = {'value': ip_protocol_raw}) namespace['nxm_nomask_ip_protocol'] = nxm_nomask_ip_protocol if 'ip6_addr_bytes' in namespace: nxm_nomask_ipv6 = nstruct(name = 'nxm_nomask_ipv6', base = nx_match_nomask_ext, criteria = lambda x: x.header in (NXM_NX_IPV6_SRC, NXM_NX_IPV6_DST, NXM_NX_ND_TARGET), init = packvalue(NXM_NX_IPV6_SRC, 'header'), extend = {'value': ip6_addr_bytes}) namespace['nxm_nomask_ipv6'] = nxm_nomask_ipv6 nxm_mask_ipv6 = nstruct(name = 'nxm_mask_ipv6', base = nx_match_mask_ext, criteria = lambda x: x.header in (NXM_NX_IPV6_SRC_W, NXM_NX_IPV6_DST_W), init = packvalue(NXM_NX_IPV6_SRC_W, 'header'), extend = {'value': ip6_addr_bytes, 'mask': ip6_addr_bytes}) namespace['nxm_mask_ipv6'] = nxm_mask_ipv6 nx_ip_frag_raw = _rawtype() nx_ip_frag_raw.formatter = lambda x: nx_ip_frag.formatter(nx_ip_frag.parse(x)[0]) nxm_nomask_ipfrag = nstruct(name = 'nxm_nomask_ipfrag', base = nx_match_nomask_ext, criteria = lambda x: x.header == NXM_NX_IP_FRAG, init = packvalue(NXM_NX_IP_FRAG, 'header'), extend = {'value': nx_ip_frag_raw}) namespace['nxm_nomask_ipfrag'] = nxm_nomask_ipfrag nxm_mask_ipfrag = nstruct(name = 'nxm_mask_ipfrag', base = nx_match_mask_ext, criteria = lambda x: x.header == NXM_NX_IP_FRAG_W, init = packvalue(NXM_NX_IP_FRAG_W, 'header'), extend = {'value': nx_ip_frag_raw, 'mask': nx_ip_frag_raw}) namespace['nxm_mask_ipfrag'] = nxm_mask_ipfrag nx_matches = nstruct( (nx_match[0], 'matches'), name = 'nx_matches', size = sizefromlen(65536, 'match_len'), prepack = packrealsize('match_len'), padding = 8 ) namespace['nx_matches'] = nx_matches nx_packet_in = nstruct( (uint32, 'buffer_id'), # /* ID assigned by datapath. */ (uint16, 'total_len'), # /* Full length of frame. */ (uint8, 'reason'), # /* Reason packet is sent (one of OFPR_*). */ (uint8, 'table_id'), # /* ID of the table that was looked up. */ (uint64, 'cookie'), # /* Cookie of the rule that was looked up. */ (uint16, 'match_len'), # /* Size of nx_match. */ (uint8[6],), # /* Align to 64-bits. */ (nx_matches,), (uint8[2],), (raw, 'data'), name = 'nx_packet_in', base = nicira_header, classifyby = (NXT_PACKET_IN,), criteria = lambda x: getattr(x, msg_subtype) == NXT_PACKET_IN, init = packvalue(NXT_PACKET_IN, msg_subtype) ) namespace['nx_packet_in'] = nx_packet_in ''' /* Configures the "role" of the sending controller. The default role is: * * - Other (NX_ROLE_OTHER), which allows the controller access to all * OpenFlow features. * * The other possible roles are a related pair: * * - Master (NX_ROLE_MASTER) is equivalent to Other, except that there may * be at most one Master controller at a time: when a controller * configures itself as Master, any existing Master is demoted to the * Slave role. * * - Slave (NX_ROLE_SLAVE) allows the controller read-only access to * OpenFlow features. In particular attempts to modify the flow table * will be rejected with an OFPBRC_EPERM error. * * Slave controllers do not receive OFPT_PACKET_IN or OFPT_FLOW_REMOVED * messages, but they do receive OFPT_PORT_STATUS messages. */ ''' nx_role_request = nstruct( (nx_role, 'role'), # /* One of NX_ROLE_*. */ name = 'nx_role_request', base = nicira_header, classifyby = (NXT_ROLE_REQUEST, NXT_ROLE_REPLY), criteria = lambda x: getattr(x, msg_subtype) == NXT_ROLE_REQUEST or getattr(x, msg_subtype) == NXT_ROLE_REPLY, init = packvalue(NXT_ROLE_REQUEST, msg_subtype) ) namespace['nx_role_request'] = nx_role_request ''' /* NXT_SET_ASYNC_CONFIG. * * Sent by a controller, this message configures the asynchronous messages that * the controller wants to receive. Element 0 in each array specifies messages * of interest when the controller has an "other" or "master" role; element 1, * when the controller has a "slave" role. * * Each array element is a bitmask in which a 0-bit disables receiving a * particular message and a 1-bit enables receiving it. Each bit controls the * message whose 'reason' corresponds to the bit index. For example, the bit * with value 1<<2 == 4 in port_status_mask[1] determines whether the * controller will receive OFPT_PORT_STATUS messages with reason OFPPR_MODIFY * (value 2) when the controller has a "slave" role. * * As a side effect, for service controllers, this message changes the * miss_send_len from default of zero to OFP_DEFAULT_MISS_SEND_LEN (128). */ ''' ofp_packet_in_reason = namespace['ofp_packet_in_reason'] if 'ofp_packet_in_reason_bitwise' in namespace: ofp_packet_in_reason_bitwise = namespace['ofp_packet_in_reason_bitwise'] else: ofp_packet_in_reason_bitwise = enum('ofp_packet_in_reason_bitwise', None, uint32, **dict((k, 1<<v) for k,v in ofp_packet_in_reason.getDict().items())) namespace['ofp_packet_in_reason_bitwise'] = ofp_packet_in_reason_bitwise ofp_port_reason = namespace['ofp_port_reason'] if 'ofp_port_reason_bitwise' in namespace: ofp_port_reason_bitwise = namespace['ofp_port_reason_bitwise'] else: ofp_port_reason_bitwise = enum('ofp_port_reason_bitwise', None, uint32, **dict((k, 1<<v) for k,v in ofp_port_reason.getDict().items())) namespace['ofp_port_reason_bitwise'] = ofp_port_reason_bitwise ofp_flow_removed_reason = namespace['ofp_flow_removed_reason'] if 'ofp_flow_removed_reason_bitwise' in namespace: ofp_flow_removed_reason_bitwise = namespace['ofp_flow_removed_reason_bitwise'] else: ofp_flow_removed_reason_bitwise = enum('ofp_flow_removed_reason_bitwise', None, uint32, **dict((k, 1<<v) for k,v in ofp_flow_removed_reason.getDict().items())) namespace['ofp_flow_removed_reason_bitwise'] = ofp_flow_removed_reason_bitwise nx_async_config = nstruct( (ofp_packet_in_reason_bitwise[2], 'packet_in_mask'), # /* Bitmasks of OFPR_* values. */ (ofp_port_reason_bitwise[2], 'port_status_mask'), # /* Bitmasks of OFPRR_* values. */ (ofp_flow_removed_reason_bitwise[2], 'flow_removed_mask'), #/* Bitmasks of OFPPR_* values. */ name = 'nx_async_config', base = nicira_header, classifyby = (NXT_SET_ASYNC_CONFIG,), criteria = lambda x: getattr(x, msg_subtype) == NXT_SET_ASYNC_CONFIG, init = packvalue(NXT_SET_ASYNC_CONFIG, msg_subtype) ) namespace['nx_async_config'] = nx_async_config ''' /* Nicira vendor flow actions. */ ''' ''' /* Action structures for NXAST_RESUBMIT and NXAST_RESUBMIT_TABLE. * * These actions search one of the switch's flow tables: * * - For NXAST_RESUBMIT_TABLE only, if the 'table' member is not 255, then * it specifies the table to search. * * - Otherwise (for NXAST_RESUBMIT_TABLE with a 'table' of 255, or for * NXAST_RESUBMIT regardless of 'table'), it searches the current flow * table, that is, the OpenFlow flow table that contains the flow from * which this action was obtained. If this action did not come from a * flow table (e.g. it came from an OFPT_PACKET_OUT message), then table 0 * is the current table. * * The flow table lookup uses a flow that may be slightly modified from the * original lookup: * * - For NXAST_RESUBMIT, the 'in_port' member of struct nx_action_resubmit * is used as the flow's in_port. * * - For NXAST_RESUBMIT_TABLE, if the 'in_port' member is not OFPP_IN_PORT, * then its value is used as the flow's in_port. Otherwise, the original * in_port is used. * * - If actions that modify the flow (e.g. OFPAT_SET_VLAN_VID) precede the * resubmit action, then the flow is updated with the new values. * * Following the lookup, the original in_port is restored. * * If the modified flow matched in the flow table, then the corresponding * actions are executed. Afterward, actions following the resubmit in the * original set of actions, if any, are executed; any changes made to the * packet (e.g. changes to VLAN) by secondary actions persist when those * actions are executed, although the original in_port is restored. * * Resubmit actions may be used any number of times within a set of actions. * * Resubmit actions may nest to an implementation-defined depth. Beyond this * implementation-defined depth, further resubmit actions are simply ignored. * * NXAST_RESUBMIT ignores 'table' and 'pad'. NXAST_RESUBMIT_TABLE requires * 'pad' to be all-bits-zero. * * Open vSwitch 1.0.1 and earlier did not support recursion. Open vSwitch * before 1.2.90 did not support NXAST_RESUBMIT_TABLE. */ ''' nx_action_resubmit = nstruct( (nx_port_no, 'in_port'), # /* New in_port for checking flow table. */ (uint8, 'table'), # /* NXAST_RESUBMIT_TABLE: table to use. */ (uint8[3],), base = nx_action, criteria = lambda x: getattr(x, action_subtype) == NXAST_RESUBMIT_TABLE or getattr(x, action_subtype) == NXAST_RESUBMIT, classifyby = (NXAST_RESUBMIT_TABLE, NXAST_RESUBMIT), name = 'nx_action_resubmit', init = packvalue(NXAST_RESUBMIT_TABLE, action_subtype) ) namespace['nx_action_resubmit'] = nx_action_resubmit ''' /* Action structure for NXAST_SET_TUNNEL. * * Sets the encapsulating tunnel ID to a 32-bit value. The most-significant 32 * bits of the tunnel ID are set to 0. */ ''' nx_action_set_tunnel = nstruct( (uint8[2],), (uint32, 'tun_id'), # /* Tunnel ID. */ name = 'nx_action_set_tunnel', base = nx_action, classifyby = (NXAST_SET_TUNNEL,), criteria = lambda x: getattr(x, action_subtype) == NXAST_SET_TUNNEL, init = packvalue(NXAST_SET_TUNNEL, action_subtype) ) namespace['nx_action_set_tunnel'] = nx_action_set_tunnel ''' /* Action structure for NXAST_SET_TUNNEL64. * * Sets the encapsulating tunnel ID to a 64-bit value. */ ''' nx_action_set_tunnel64 = nstruct( (uint8[6],), (uint64, 'tun_id'), # /* Tunnel ID. */ name = 'nx_action_set_tunnel64', base = nx_action, classifyby = (NXAST_SET_TUNNEL64,), criteria = lambda x: getattr(x, action_subtype) == NXAST_SET_TUNNEL64, init = packvalue(NXAST_SET_TUNNEL64, action_subtype) ) namespace['nx_action_set_tunnel64'] = nx_action_set_tunnel64 ''' /* Action structure for NXAST_SET_QUEUE. * * Set the queue that should be used when packets are output. This is similar * to the OpenFlow OFPAT_ENQUEUE action, but does not take the output port as * an argument. This allows the queue to be defined before the port is * known. */ ''' nx_action_set_queue = nstruct( (uint8[2],), (uint32, 'queue_id'), # /* Where to enqueue packets. */ name = 'nx_action_set_queue', base = nx_action, classifyby = (NXAST_SET_QUEUE,), criteria = lambda x: getattr(x, action_subtype) == NXAST_SET_QUEUE, init = packvalue(NXAST_SET_QUEUE, action_subtype) ) namespace['nx_action_set_queue'] = nx_action_set_queue ''' /* Action structure for NXAST_POP_QUEUE. * * Restores the queue to the value it was before any NXAST_SET_QUEUE actions * were used. Only the original queue can be restored this way; no stack is * maintained. */ ''' nx_action_pop_queue = nstruct( (uint8[6],), name = 'nx_action_pop_queue', base = nx_action, classifyby = (NXAST_POP_QUEUE,), criteria = lambda x: getattr(x, action_subtype) == NXAST_POP_QUEUE, init = packvalue(NXAST_POP_QUEUE, action_subtype) ) namespace['nx_action_pop_queue'] = nx_action_pop_queue ''' /* Action structure for NXAST_REG_MOVE. * * Copies src[src_ofs:src_ofs+n_bits] to dst[dst_ofs:dst_ofs+n_bits], where * a[b:c] denotes the bits within 'a' numbered 'b' through 'c' (not including * bit 'c'). Bit numbering starts at 0 for the least-significant bit, 1 for * the next most significant bit, and so on. * * 'src' and 'dst' are nxm_header values with nxm_hasmask=0. (It doesn't make * sense to use nxm_hasmask=1 because the action does not do any kind of * matching; it uses the actual value of a field.) * * The following nxm_header values are potentially acceptable as 'src': * * - NXM_OF_IN_PORT * - NXM_OF_ETH_DST * - NXM_OF_ETH_SRC * - NXM_OF_ETH_TYPE * - NXM_OF_VLAN_TCI * - NXM_OF_IP_TOS * - NXM_OF_IP_PROTO * - NXM_OF_IP_SRC * - NXM_OF_IP_DST * - NXM_OF_TCP_SRC * - NXM_OF_TCP_DST * - NXM_OF_UDP_SRC * - NXM_OF_UDP_DST * - NXM_OF_ICMP_TYPE * - NXM_OF_ICMP_CODE * - NXM_OF_ARP_OP * - NXM_OF_ARP_SPA * - NXM_OF_ARP_TPA * - NXM_NX_TUN_ID * - NXM_NX_ARP_SHA * - NXM_NX_ARP_THA * - NXM_NX_ICMPV6_TYPE * - NXM_NX_ICMPV6_CODE * - NXM_NX_ND_SLL * - NXM_NX_ND_TLL * - NXM_NX_REG(idx) for idx in the switch's accepted range. * - NXM_NX_PKT_MARK * - NXM_NX_TUN_IPV4_SRC * - NXM_NX_TUN_IPV4_DST * * The following nxm_header values are potentially acceptable as 'dst': * * - NXM_OF_ETH_DST * - NXM_OF_ETH_SRC * - NXM_OF_IP_TOS * - NXM_OF_IP_SRC * - NXM_OF_IP_DST * - NXM_OF_TCP_SRC * - NXM_OF_TCP_DST * - NXM_OF_UDP_SRC * - NXM_OF_UDP_DST * - NXM_NX_ARP_SHA * - NXM_NX_ARP_THA * - NXM_OF_ARP_OP * - NXM_OF_ARP_SPA * - NXM_OF_ARP_TPA * Modifying any of the above fields changes the corresponding packet * header. * * - NXM_OF_IN_PORT * * - NXM_NX_REG(idx) for idx in the switch's accepted range. * * - NXM_NX_PKT_MARK * * - NXM_OF_VLAN_TCI. Modifying this field's value has side effects on the * packet's 802.1Q header. Setting a value with CFI=0 removes the 802.1Q * header (if any), ignoring the other bits. Setting a value with CFI=1 * adds or modifies the 802.1Q header appropriately, setting the TCI field * to the field's new value (with the CFI bit masked out). * * - NXM_NX_TUN_ID, NXM_NX_TUN_IPV4_SRC, NXM_NX_TUN_IPV4_DST. Modifying * any of these values modifies the corresponding tunnel header field used * for the packet's next tunnel encapsulation, if allowed by the * configuration of the output tunnel port. * * A given nxm_header value may be used as 'src' or 'dst' only on a flow whose * nx_match satisfies its prerequisites. For example, NXM_OF_IP_TOS may be * used only if the flow's nx_match includes an nxm_entry that specifies * nxm_type=NXM_OF_ETH_TYPE, nxm_hasmask=0, and nxm_value=0x0800. * * The switch will reject actions for which src_ofs+n_bits is greater than the * width of 'src' or dst_ofs+n_bits is greater than the width of 'dst' with * error type OFPET_BAD_ACTION, code OFPBAC_BAD_ARGUMENT. * * This action behaves properly when 'src' overlaps with 'dst', that is, it * behaves as if 'src' were copied out to a temporary buffer, then the * temporary buffer copied to 'dst'. */ ''' nx_action_reg_move = nstruct( (uint16, 'n_bits'), # /* Number of bits. */ (uint16, 'src_ofs'), # /* Starting bit offset in source. */ (uint16, 'dst_ofs'), # /* Starting bit offset in destination. */ (nxm_header, 'src'), # /* Source register. */ (nxm_header, 'dst'), # /* Destination register. */ name = 'nx_action_reg_move', base = nx_action, classifyby = (NXAST_REG_MOVE,), criteria = lambda x: getattr(x, action_subtype) == NXAST_REG_MOVE, init = packvalue(NXAST_REG_MOVE, action_subtype), formatter = _createdesc(lambda x:'move:%s[%d..%d]->%s[%d..%d]' % (x['src'], x['src_ofs'], x['src_ofs'] + x['n_bits'] - 1, x['dst'], x['dst_ofs'], x['dst_ofs'] + x['n_bits'] - 1)) ) namespace['nx_action_reg_move'] = nx_action_reg_move ''' /* Action structure for NXAST_REG_LOAD. * * Copies value[0:n_bits] to dst[ofs:ofs+n_bits], where a[b:c] denotes the bits * within 'a' numbered 'b' through 'c' (not including bit 'c'). Bit numbering * starts at 0 for the least-significant bit, 1 for the next most significant * bit, and so on. * * 'dst' is an nxm_header with nxm_hasmask=0. See the documentation for * NXAST_REG_MOVE, above, for the permitted fields and for the side effects of * loading them. * * The 'ofs' and 'n_bits' fields are combined into a single 'ofs_nbits' field * to avoid enlarging the structure by another 8 bytes. To allow 'n_bits' to * take a value between 1 and 64 (inclusive) while taking up only 6 bits, it is * also stored as one less than its true value: * * 15 6 5 0 * +------------------------------+------------------+ * | ofs | n_bits - 1 | * +------------------------------+------------------+ * * The switch will reject actions for which ofs+n_bits is greater than the * width of 'dst', or in which any bits in 'value' with value 2**n_bits or * greater are set to 1, with error type OFPET_BAD_ACTION, code * OFPBAC_BAD_ARGUMENT. */ ''' nx_action_reg_load = nstruct( (uint16, 'ofs_nbits'), # /* (ofs << 6) | (n_bits - 1). */ (nxm_header, 'dst'), # /* Destination register. */ (uint64, 'value'), # /* Immediate value. */ name = 'nx_action_reg_load', base = nx_action, classifyby = (NXAST_REG_LOAD,), criteria = lambda x: getattr(x, action_subtype) == NXAST_REG_LOAD, init = packvalue(NXAST_REG_LOAD, action_subtype), formatter = _createdesc(lambda x: 'load:0x%x->%s[%d..%d]' % (x['value'], x['dst'], x['ofs_nbits'] >> 6, (x['ofs_nbits'] >> 6) + (x['ofs_nbits'] & 0x3f))) ) namespace['nx_action_reg_load'] = nx_action_reg_load ''' /* Action structure for NXAST_STACK_PUSH and NXAST_STACK_POP. * * Pushes (or pops) field[offset: offset + n_bits] to (or from) * top of the stack. */ ''' nx_action_stack = nstruct( (uint16, 'offset'), # /* Bit offset into the field. */ (nxm_header, 'field'), # /* The field used for push or pop. */ (uint16, 'n_bits'), # /* (n_bits + 1) bits of the field. */ (uint8[6],), # /* Reserved, must be zero. */ name = 'nx_action_stack', base = nx_action, classifyby = (NXAST_STACK_PUSH, NXAST_STACK_POP), criteria = lambda x: getattr(x, action_subtype) == NXAST_STACK_PUSH or getattr(x, action_subtype) == NXAST_STACK_POP, init = packvalue(NXAST_STACK_PUSH, action_subtype), formatter = _createdesc(lambda x: '%s:%s[%d..%d]' % ('push' if x[action_subtype] == 'NXAST_STACK_PUSH' else 'pop', x['field'], x['offset'], (x['offset'] + x['n_bits'] - 1))) ) namespace['nx_action_stack'] = nx_action_stack ''' /* Action structure for NXAST_NOTE. * * This action has no effect. It is variable length. The switch does not * attempt to interpret the user-defined 'note' data in any way. A controller * can use this action to attach arbitrary metadata to a flow. * * This action might go away in the future. */ ''' nx_action_note = nstruct( (varchr, 'note'), name = 'nx_action_note', base = nx_action, classifyby = (NXAST_NOTE,), criteria = lambda x: getattr(x, action_subtype) == NXAST_NOTE, init = packvalue(NXAST_NOTE, action_subtype) ) namespace['nx_action_note'] = nx_action_note ''' /* Action structure for NXAST_MULTIPATH. * * This action performs the following steps in sequence: * * 1. Hashes the fields designated by 'fields', one of NX_HASH_FIELDS_*. * Refer to the definition of "enum nx_mp_fields" for details. * * The 'basis' value is used as a universal hash parameter, that is, * different values of 'basis' yield different hash functions. The * particular universal hash function used is implementation-defined. * * The hashed fields' values are drawn from the current state of the * flow, including all modifications that have been made by actions up to * this point. * * 2. Applies the multipath link choice algorithm specified by 'algorithm', * one of NX_MP_ALG_*. Refer to the definition of "enum nx_mp_algorithm" * for details. * * The output of the algorithm is 'link', an unsigned integer less than * or equal to 'max_link'. * * Some algorithms use 'arg' as an additional argument. * * 3. Stores 'link' in dst[ofs:ofs+n_bits]. The format and semantics of * 'dst' and 'ofs_nbits' are similar to those for the NXAST_REG_LOAD * action. * * The switch will reject actions that have an unknown 'fields', or an unknown * 'algorithm', or in which ofs+n_bits is greater than the width of 'dst', or * in which 'max_link' is greater than or equal to 2**n_bits, with error type * OFPET_BAD_ACTION, code OFPBAC_BAD_ARGUMENT. */ ''' nx_action_multipath = nstruct( #/* What fields to hash and how. */ (nx_hash_fields, 'fields'), # /* One of NX_HASH_FIELDS_*. */ (uint16, 'basis'), # /* Universal hash parameter. */ (uint16,), #/* Multipath link choice algorithm to apply to hash value. */ (nx_mp_algorithm, 'algorithm'), # /* One of NX_MP_ALG_*. */ (uint16, 'max_link'), # /* Number of output links, minus 1. */ (uint32, 'arg'), # /* Algorithm-specific argument. */ (uint16,), # /* Where to store the result. */ (uint16, 'ofs_nbits'), # /* (ofs << 6) | (n_bits - 1). */ (nxm_header, 'dst'), # /* Destination. */ name = 'nx_action_multipath', base = nx_action, classifyby = (NXAST_MULTIPATH,), criteria = lambda x: getattr(x, action_subtype) == NXAST_MULTIPATH, init = packvalue(NXAST_MULTIPATH, action_subtype), formatter = _createdesc(lambda x: 'multipath(%s,%d,%s,%d,%d,%s[%d..%d])' % (x['fields'], x['basis'], x['algorithm'],x['max_link'] + 1, x['arg'], x['dst'], x['ofs_nbits'] >> 6, (x['ofs_nbits'] >> 6) + (x['ofs_nbits'] & 0x3f))) ) namespace['nx_action_multipath'] = nx_action_multipath ''' /* Action structure for NXAST_LEARN. * * This action adds or modifies a flow in an OpenFlow table, similar to * OFPT_FLOW_MOD with OFPFC_MODIFY_STRICT as 'command'. The new flow has the * specified idle timeout, hard timeout, priority, cookie, and flags. The new * flow's match criteria and actions are built by applying each of the series * of flow_mod_spec elements included as part of the action. * * A flow_mod_spec starts with a 16-bit header. A header that is all-bits-0 is * a no-op used for padding the action as a whole to a multiple of 8 bytes in * length. Otherwise, the flow_mod_spec can be thought of as copying 'n_bits' * bits from a source to a destination. In this case, the header contains * multiple fields: * * 15 14 13 12 11 10 0 * +------+---+------+---------------------------------+ * | 0 |src| dst | n_bits | * +------+---+------+---------------------------------+ * * The meaning and format of a flow_mod_spec depends on 'src' and 'dst'. The * following table summarizes the meaning of each possible combination. * Details follow the table: * * src dst meaning * --- --- ---------------------------------------------------------- * 0 0 Add match criteria based on value in a field. * 1 0 Add match criteria based on an immediate value. * 0 1 Add NXAST_REG_LOAD action to copy field into a different field. * 1 1 Add NXAST_REG_LOAD action to load immediate value into a field. * 0 2 Add OFPAT_OUTPUT action to output to port from specified field. * All other combinations are undefined and not allowed. * * The flow_mod_spec header is followed by a source specification and a * destination specification. The format and meaning of the source * specification depends on 'src': * * - If 'src' is 0, the source bits are taken from a field in the flow to * which this action is attached. (This should be a wildcarded field. If * its value is fully specified then the source bits being copied have * constant values.) * * The source specification is an ovs_be32 'field' and an ovs_be16 'ofs'. * 'field' is an nxm_header with nxm_hasmask=0, and 'ofs' the starting bit * offset within that field. The source bits are field[ofs:ofs+n_bits-1]. * 'field' and 'ofs' are subject to the same restrictions as the source * field in NXAST_REG_MOVE. * * - If 'src' is 1, the source bits are a constant value. The source * specification is (n_bits+15)/16*2 bytes long. Taking those bytes as a * number in network order, the source bits are the 'n_bits' * least-significant bits. The switch will report an error if other bits * in the constant are nonzero. * * The flow_mod_spec destination specification, for 'dst' of 0 or 1, is an * ovs_be32 'field' and an ovs_be16 'ofs'. 'field' is an nxm_header with * nxm_hasmask=0 and 'ofs' is a starting bit offset within that field. The * meaning of the flow_mod_spec depends on 'dst': * * - If 'dst' is 0, the flow_mod_spec specifies match criteria for the new * flow. The new flow matches only if bits field[ofs:ofs+n_bits-1] in a * packet equal the source bits. 'field' may be any nxm_header with * nxm_hasmask=0 that is allowed in NXT_FLOW_MOD. * * Order is significant. Earlier flow_mod_specs must satisfy any * prerequisites for matching fields specified later, by copying constant * values into prerequisite fields. * * The switch will reject flow_mod_specs that do not satisfy NXM masking * restrictions. * * - If 'dst' is 1, the flow_mod_spec specifies an NXAST_REG_LOAD action for * the new flow. The new flow copies the source bits into * field[ofs:ofs+n_bits-1]. Actions are executed in the same order as the * flow_mod_specs. * * A single NXAST_REG_LOAD action writes no more than 64 bits, so n_bits * greater than 64 yields multiple NXAST_REG_LOAD actions. * * The flow_mod_spec destination spec for 'dst' of 2 (when 'src' is 0) is * empty. It has the following meaning: * * - The flow_mod_spec specifies an OFPAT_OUTPUT action for the new flow. * The new flow outputs to the OpenFlow port specified by the source field. * Of the special output ports with value OFPP_MAX or larger, OFPP_IN_PORT, * OFPP_FLOOD, OFPP_LOCAL, and OFPP_ALL are supported. Other special ports * may not be used. * * Resource Management * ------------------- * * A switch has a finite amount of flow table space available for learning. * When this space is exhausted, no new learning table entries will be learned * until some existing flow table entries expire. The controller should be * prepared to handle this by flooding (which can be implemented as a * low-priority flow). * * If a learned flow matches a single TCP stream with a relatively long * timeout, one may make the best of resource constraints by setting * 'fin_idle_timeout' or 'fin_hard_timeout' (both measured in seconds), or * both, to shorter timeouts. When either of these is specified as a nonzero * value, OVS adds a NXAST_FIN_TIMEOUT action, with the specified timeouts, to * the learned flow. * * Examples * -------- * * The following examples give a prose description of the flow_mod_specs along * with informal notation for how those would be represented and a hex dump of * the bytes that would be required. * * These examples could work with various nx_action_learn parameters. Typical * values would be idle_timeout=OFP_FLOW_PERMANENT, hard_timeout=60, * priority=OFP_DEFAULT_PRIORITY, flags=0, table_id=10. * * 1. Learn input port based on the source MAC, with lookup into * NXM_NX_REG1[16:31] by resubmit to in_port=99: * * Match on in_port=99: * ovs_be16(src=1, dst=0, n_bits=16), 20 10 * ovs_be16(99), 00 63 * ovs_be32(NXM_OF_IN_PORT), ovs_be16(0) 00 00 00 02 00 00 * * Match Ethernet destination on Ethernet source from packet: * ovs_be16(src=0, dst=0, n_bits=48), 00 30 * ovs_be32(NXM_OF_ETH_SRC), ovs_be16(0) 00 00 04 06 00 00 * ovs_be32(NXM_OF_ETH_DST), ovs_be16(0) 00 00 02 06 00 00 * * Set NXM_NX_REG1[16:31] to the packet's input port: * ovs_be16(src=0, dst=1, n_bits=16), 08 10 * ovs_be32(NXM_OF_IN_PORT), ovs_be16(0) 00 00 00 02 00 00 * ovs_be32(NXM_NX_REG1), ovs_be16(16) 00 01 02 04 00 10 * * Given a packet that arrived on port A with Ethernet source address B, * this would set up the flow "in_port=99, dl_dst=B, * actions=load:A->NXM_NX_REG1[16..31]". * * In syntax accepted by ovs-ofctl, this action is: learn(in_port=99, * NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[], * load:NXM_OF_IN_PORT[]->NXM_NX_REG1[16..31]) * * 2. Output to input port based on the source MAC and VLAN VID, with lookup * into NXM_NX_REG1[16:31]: * * Match on same VLAN ID as packet: * ovs_be16(src=0, dst=0, n_bits=12), 00 0c * ovs_be32(NXM_OF_VLAN_TCI), ovs_be16(0) 00 00 08 02 00 00 * ovs_be32(NXM_OF_VLAN_TCI), ovs_be16(0) 00 00 08 02 00 00 * * Match Ethernet destination on Ethernet source from packet: * ovs_be16(src=0, dst=0, n_bits=48), 00 30 * ovs_be32(NXM_OF_ETH_SRC), ovs_be16(0) 00 00 04 06 00 00 * ovs_be32(NXM_OF_ETH_DST), ovs_be16(0) 00 00 02 06 00 00 * * Output to the packet's input port: * ovs_be16(src=0, dst=2, n_bits=16), 10 10 * ovs_be32(NXM_OF_IN_PORT), ovs_be16(0) 00 00 00 02 00 00 * * Given a packet that arrived on port A with Ethernet source address B in * VLAN C, this would set up the flow "dl_dst=B, vlan_vid=C, * actions=output:A". * * In syntax accepted by ovs-ofctl, this action is: * learn(NXM_OF_VLAN_TCI[0..11], NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[], * output:NXM_OF_IN_PORT[]) * * 3. Here's a recipe for a very simple-minded MAC learning switch. It uses a * 10-second MAC expiration time to make it easier to see what's going on * * ovs-vsctl del-controller br0 * ovs-ofctl del-flows br0 * ovs-ofctl add-flow br0 "table=0 actions=learn(table=1, \ hard_timeout=10, NXM_OF_VLAN_TCI[0..11], \ NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[], \ output:NXM_OF_IN_PORT[]), resubmit(,1)" * ovs-ofctl add-flow br0 "table=1 priority=0 actions=flood" * * You can then dump the MAC learning table with: * * ovs-ofctl dump-flows br0 table=1 * * Usage Advice * ------------ * * For best performance, segregate learned flows into a table that is not used * for any other flows except possibly for a lowest-priority "catch-all" flow * (a flow with no match criteria). If different learning actions specify * different match criteria, use different tables for the learned flows. * * The meaning of 'hard_timeout' and 'idle_timeout' can be counterintuitive. * These timeouts apply to the flow that is added, which means that a flow with * an idle timeout will expire when no traffic has been sent *to* the learned * address. This is not usually the intent in MAC learning; instead, we want * the MAC learn entry to expire when no traffic has been sent *from* the * learned address. Use a hard timeout for that. */ ''' def _nx_flow_mod_spec_formatter(x): if NX_FLOWMODSPEC_SRC(x['header']): srcdesc = '0x' + ''.join('%02x' % (c,) for c in bytearray(x['value'])) else: srcdesc = '%s[%d..%d]' % (x['src'], x['src_ofs'], x['src_ofs'] + NX_FLOWMODSPEC_NBITS(x['header']) - 1) dstv = NX_FLOWMODSPEC_DST(x['header']) if dstv != NX_LEARN_DST_OUTPUT: dstdesc = '%s[%d..%d]' % (x['dst'], x['dst_ofs'], x['dst_ofs'] + NX_FLOWMODSPEC_NBITS(x['header']) - 1) if dstv == NX_LEARN_DST_MATCH: x['_desc'] = '%s=%s' % (dstdesc, srcdesc) elif dstv == NX_LEARN_DST_LOAD: x['_desc'] = 'load:%s->%s' % (srcdesc, dstdesc) elif NX_FLOWMODSPEC_SRC(x['header']): x['_desc'] = 'output:%s' % nxm_port_no_raw.formatter(common.create_binary(x['value'], 2)) else: x['_desc'] = 'output:%s' % (srcdesc,) x['header'] = nx_flow_mod_spec_header.formatter(x['header']) return x nx_flow_mod_spec = nstruct( (uint16, 'header'), (_nx_flow_mod_spec_src,), (_nx_flow_mod_spec_dst,), name = 'nx_flow_mod_spec', padding = 1, formatter = _nx_flow_mod_spec_formatter, lastextra = False # if x.header == 0, size is 14, the padding should not be so large so it will not be successfully parsed ) namespace['nx_flow_mod_spec'] = nx_flow_mod_spec def create_nxfms_matchfield(src, dst, src_ofs = 0, dst_ofs = 0, n_bits = None): if n_bits is None: n_bits = min(NXM_LENGTH(dst) * 8 - dst_ofs, NXM_LENGTH(src) * 8 - src_ofs) if n_bits <= 0: raise ValueError('Cannot create flow mod spec with 0 bits') return nx_flow_mod_spec.parse(_create_header(NX_LEARN_SRC_FIELD, NX_LEARN_DST_MATCH, n_bits) + _create_field(src, src_ofs) + _create_field(dst, dst_ofs))[0] namespace['create_nxfms_matchfield'] = create_nxfms_matchfield def create_nxfms_matchvalue(dst, value, dst_ofs, n_bits = None): if n_bits is None: n_bits = NXM_LENGTH(dst) * 8 - dst_ofs if n_bits <= 0: raise ValueError('Cannot create flow mod spec with 0 bits') return nx_flow_mod_spec.parse(_create_header(NX_LEARN_SRC_IMMEDIATE, NX_LEARN_DST_MATCH, n_bits) + common.create_binary(value, (n_bits + 15) // 16 * 2) + _create_field(dst, dst_ofs))[0] namespace['create_nxfms_matchvalue'] = create_nxfms_matchvalue def create_nxfms_loadfield(src, dst, src_ofs = 0, dst_ofs = 0, n_bits = None): if n_bits is None: n_bits = min(NXM_LENGTH(dst) * 8 - dst_ofs, NXM_LENGTH(src) * 8 - src_ofs) if n_bits <= 0: raise ValueError('Cannot create flow mod spec with 0 bits') return nx_flow_mod_spec.parse(_create_header(NX_LEARN_SRC_FIELD, NX_LEARN_DST_LOAD, n_bits) + _create_field(src, src_ofs) + _create_field(dst, dst_ofs))[0] namespace['create_nxfms_loadfield'] = create_nxfms_loadfield def create_nxfms_loadvalue(dst, value, dst_ofs, n_bits = None): if n_bits is None: n_bits = NXM_LENGTH(dst) * 8 - dst_ofs if n_bits <= 0: raise ValueError('Cannot create flow mod spec with 0 bits') return nx_flow_mod_spec.parse(_create_header(NX_LEARN_SRC_IMMEDIATE, NX_LEARN_DST_LOAD, n_bits) + common.create_binary(value, (n_bits + 15) // 16 * 2) + _create_field(dst, dst_ofs))[0] namespace['create_nxfms_loadvalue'] = create_nxfms_loadvalue def create_nxfms_outputfield(src, src_ofs = 0, n_bits = None): if n_bits is None: n_bits = NXM_LENGTH(src) * 8 - src_ofs if n_bits <= 0: raise ValueError('Cannot create flow mod spec with 0 bits') return nx_flow_mod_spec.parse(_create_header(NX_LEARN_SRC_FIELD, NX_LEARN_DST_OUTPUT, n_bits) + _create_field(src, src_ofs))[0] namespace['create_nxfms_outputfield'] = create_nxfms_outputfield def create_nxfms_outputvalue(dst, value): return nx_flow_mod_spec.parse(_create_header(NX_LEARN_SRC_IMMEDIATE, NX_LEARN_DST_OUTPUT, 16) + common.create_binary(value, 2))[0] namespace['create_nxfms_outputvalue'] = create_nxfms_outputvalue ofp_flow_mod_flags = namespace['ofp_flow_mod_flags'] nx_action_learn = nstruct( (uint16, 'idle_timeout'), # /* Idle time before discarding (seconds). */ (uint16, 'hard_timeout'), # /* Max time before discarding (seconds). */ (uint16, 'priority'), # /* Priority level of flow entry. */ (uint64, 'cookie'), # /* Cookie for new flow. */ (ofp_flow_mod_flags, 'flags'), # /* Either 0 or OFPFF_SEND_FLOW_REM. */ (uint8, 'table_id'), # /* Table to insert flow entry. */ (uint8,), # /* Must be zero. */ (uint16, 'fin_idle_timeout'),# /* Idle timeout after FIN, if nonzero. */ (uint16, 'fin_hard_timeout'),# /* Hard timeout after FIN, if nonzero. */ (nx_flow_mod_spec[0], 'specs'), base = nx_action, name = 'nx_action_learn', classifyby = (NXAST_LEARN,), criteria = lambda x: getattr(x, action_subtype) == NXAST_LEARN, init = packvalue(NXAST_LEARN, action_subtype), ) namespace['nx_action_learn'] = nx_action_learn ''' /* Action structure for NXAST_FIN_TIMEOUT. * * This action changes the idle timeout or hard timeout, or both, of this * OpenFlow rule when the rule matches a TCP packet with the FIN or RST flag. * When such a packet is observed, the action reduces the rule's idle timeout * to 'fin_idle_timeout' and its hard timeout to 'fin_hard_timeout'. This * action has no effect on an existing timeout that is already shorter than the * one that the action specifies. A 'fin_idle_timeout' or 'fin_hard_timeout' * of zero has no effect on the respective timeout. * * 'fin_idle_timeout' and 'fin_hard_timeout' are measured in seconds. * 'fin_hard_timeout' specifies time since the flow's creation, not since the * receipt of the FIN or RST. * * This is useful for quickly discarding learned TCP flows that otherwise will * take a long time to expire. * * This action is intended for use with an OpenFlow rule that matches only a * single TCP flow. If the rule matches multiple TCP flows (e.g. it wildcards * all TCP traffic, or all TCP traffic to a particular port), then any FIN or * RST in any of those flows will cause the entire OpenFlow rule to expire * early, which is not normally desirable. */ ''' nx_action_fin_timeout = nstruct( (uint16, 'fin_idle_timeout'), # /* New idle timeout, if nonzero. */ (uint16, 'fin_hard_timeout'), # /* New hard timeout, if nonzero. */ (uint16,), base = nx_action, name = 'nx_action_fin_timeout', criteria = lambda x: getattr(x, action_subtype) == NXAST_FIN_TIMEOUT, classifyby = (NXAST_FIN_TIMEOUT,), init = packvalue(NXAST_FIN_TIMEOUT, action_subtype) ) namespace['nx_action_fin_timeout'] = nx_action_fin_timeout ''' /* Action structure for NXAST_BUNDLE and NXAST_BUNDLE_LOAD. * * The bundle actions choose a slave from a supplied list of options. * NXAST_BUNDLE outputs to its selection. NXAST_BUNDLE_LOAD writes its * selection to a register. * * The list of possible slaves follows the nx_action_bundle structure. The size * of each slave is governed by its type as indicated by the 'slave_type' * parameter. The list of slaves should be padded at its end with zeros to make * the total length of the action a multiple of 8. * * Switches infer from the 'slave_type' parameter the size of each slave. All * implementations must support the NXM_OF_IN_PORT 'slave_type' which indicates * that the slaves are OpenFlow port numbers with NXM_LENGTH(NXM_OF_IN_PORT) == * 2 byte width. Switches should reject actions which indicate unknown or * unsupported slave types. * * Switches use a strategy dictated by the 'algorithm' parameter to choose a * slave. If the switch does not support the specified 'algorithm' parameter, * it should reject the action. * * Several algorithms take into account liveness when selecting slaves. The * liveness of a slave is implementation defined (with one exception), but will * generally take into account things like its carrier status and the results * of any link monitoring protocols which happen to be running on it. In order * to give controllers a place-holder value, the OFPP_NONE port is always * considered live. * * Some slave selection strategies require the use of a hash function, in which * case the 'fields' and 'basis' parameters should be populated. The 'fields' * parameter (one of NX_HASH_FIELDS_*) designates which parts of the flow to * hash. Refer to the definition of "enum nx_hash_fields" for details. The * 'basis' parameter is used as a universal hash parameter. Different values * of 'basis' yield different hash results. * * The 'zero' parameter at the end of the action structure is reserved for * future use. Switches are required to reject actions which have nonzero * bytes in the 'zero' field. * * NXAST_BUNDLE actions should have 'ofs_nbits' and 'dst' zeroed. Switches * should reject actions which have nonzero bytes in either of these fields. * * NXAST_BUNDLE_LOAD stores the OpenFlow port number of the selected slave in * dst[ofs:ofs+n_bits]. The format and semantics of 'dst' and 'ofs_nbits' are * similar to those for the NXAST_REG_LOAD action. */ ''' nx_action_bundle = nstruct( # /* Slave choice algorithm to apply to hash value. */ (nx_bd_algorithm, 'algorithm'), # /* One of NX_BD_ALG_*. */ # /* What fields to hash and how. */ (nx_hash_fields, 'fields'), # /* One of NX_HASH_FIELDS_*. */ (uint16, 'basis'), # /* Universal hash parameter. */ (nxm_header, 'slave_type'), # /* NXM_OF_IN_PORT. */ (uint16, 'n_slaves'), # /* Number of slaves. */ (uint16, 'ofs_nbits'), # /* (ofs << 6) | (n_bits - 1). */ (nxm_header, 'dst'), # /* Destination. */ (uint8[4],), # /* Reserved. Must be zero. */ name = 'nx_action_bundle', base = nx_action, criteria = lambda x: getattr(x, action_subtype) == NXAST_BUNDLE or getattr(x, action_subtype) == NXAST_BUNDLE_LOAD, classifyby = (NXAST_BUNDLE, NXAST_BUNDLE_LOAD), init = packvalue(NXAST_BUNDLE, action_subtype) ) namespace['nx_action_bundle'] = nx_action_bundle def _nx_slave_ports_prepack(x): x.n_slaves = len(x.bundles) _nx_slave_ports = nstruct( (nx_port_no[0], 'bundles'), name = '_nx_slave_ports', size = lambda x: x.n_slaves * 2, prepack = _nx_slave_ports_prepack, padding = 1 ) nx_action_bundle_port = nstruct( (_nx_slave_ports,), base = nx_action_bundle, name = 'nx_action_bundle_port', criteria = lambda x: x.slave_type == NXM_OF_IN_PORT, init = packvalue(NXM_OF_IN_PORT, 'slave_type'), lastextra = False, formatter = _createdesc(lambda x: 'bundle_load(%s,%d,%s,%s,%s[%d..%d],slaves:%r)' % \ (x['fields'], x['basis'], x['algorithm'], x['slave_type'], x['dst'], x['ofs_nbits'] >> 6, (x['ofs_nbits'] >> 6) + (x['ofs_nbits'] & 0x3f), x['bundles']) \ if x[action_subtype] == 'NXAST_BUNDLE_LOAD' else 'bundle(%s,%d,%s,%s,slaves:%r)' % (x['fields'], x['basis'], x['algorithm'], x['slave_type'], x['bundles'])) ) namespace['nx_action_bundle_port'] = nx_action_bundle_port def _nx_slave_others_prepack(x): x.n_slaves = len(x.bundlesraw) // NXM_LENGTH(x.slave_type) _nx_slave_others = nstruct( (raw, 'bundlesraw'), name = '_nx_slave_others', size = lambda x: x.n_slaves * NXM_LENGTH(x.slave_type), prepack = _nx_slave_others_prepack, padding = 1 ) nx_action_bundle_others = nstruct( (_nx_slave_others,), base = nx_action_bundle, name = 'nx_action_bundle_others', criteria = lambda x: x.slave_type != NXM_OF_IN_PORT, lastextra = False, init = packvalue(NXM_OF_ETH_DST, 'slave_type'), formatter = _createdesc(lambda x: 'bundle_load(%s,%d,%s,%s,%s[%d..%d],slaves:%r)' % \ (x['fields'], x['basis'], x['algorithm'], x['slave_type'], x['dst'], x['ofs_nbits'] >> 6, (x['ofs_nbits'] >> 6) + (x['ofs_nbits'] & 0x3f), x['bundleraw']) \ if x[action_subtype] == 'NXAST_BUNDLE_LOAD' else 'bundle(%s,%d,%s,%s,slaves:%r)' % (x['fields'], x['basis'], x['algorithm'], x['slave_type'], x['bundleraw'])) ) namespace['nx_action_bundle_others'] = nx_action_bundle_others ''' /* Action structure for NXAST_DEC_TTL_CNT_IDS. * * If the packet is not IPv4 or IPv6, does nothing. For IPv4 or IPv6, if the * TTL or hop limit is at least 2, decrements it by 1. Otherwise, if TTL or * hop limit is 0 or 1, sends a packet-in to the controllers with each of the * 'n_controllers' controller IDs specified in 'cnt_ids'. * * (This differs from NXAST_DEC_TTL in that for NXAST_DEC_TTL the packet-in is * sent only to controllers with id 0.) */ ''' def _nx_action_cnt_ids_ids_prepack(x): x.n_controllers = len(x.cnt_ids) _nx_action_cnt_ids_ids = nstruct( (uint16[0], 'cnt_ids'), name = '_nx_action_cnt_ids_ids', size = lambda x: 2 * x.n_controllers, prepack = _nx_action_cnt_ids_ids_prepack ) nx_action_cnt_ids = nstruct( (uint16, 'n_controllers'), # /* Number of controllers. */ (uint8[4],), # /* Must be zero. */ (_nx_action_cnt_ids_ids,), base = nx_action, classifyby = (NXAST_DEC_TTL_CNT_IDS,), criteria = lambda x: getattr(x, action_subtype) == NXAST_DEC_TTL_CNT_IDS, init = packvalue(NXAST_DEC_TTL_CNT_IDS, action_subtype), lastextra = False, name = 'nx_action_cnt_ids' ) namespace['nx_action_cnt_ids'] = nx_action_cnt_ids ''' /* Action structure for NXAST_OUTPUT_REG. * * Outputs to the OpenFlow port number written to src[ofs:ofs+nbits]. * * The format and semantics of 'src' and 'ofs_nbits' are similar to those for * the NXAST_REG_LOAD action. * * The acceptable nxm_header values for 'src' are the same as the acceptable * nxm_header values for the 'src' field of NXAST_REG_MOVE. * * The 'max_len' field indicates the number of bytes to send when the chosen * port is OFPP_CONTROLLER. Its semantics are equivalent to the 'max_len' * field of OFPAT_OUTPUT. * * The 'zero' field is required to be zeroed for forward compatibility. */ ''' nx_action_output_reg = nstruct( (uint16, 'ofs_nbits'), # /* (ofs << 6) | (n_bits - 1). */ (nxm_header, 'src'), # /* Source. */ (uint16, 'max_len'), # /* Max length to send to controller. */ (uint8[6],), # /* Reserved, must be zero. */ base = nx_action, classifyby = (NXAST_OUTPUT_REG,), criteria = lambda x: getattr(x, action_subtype) == NXAST_OUTPUT_REG, init = packvalue(NXAST_OUTPUT_REG, action_subtype), name = 'nx_action_output_reg', formatter = _createdesc(lambda x: 'output:%s[%d..%d]' % (x['src'], x['ofs_nbits'] >> 6, (x['ofs_nbits'] >> 6) + (x['ofs_nbits'] & 0x3f))) ) namespace['nx_action_output_reg'] = nx_action_output_reg ''' /* NXAST_EXIT * * Discontinues action processing. * * The NXAST_EXIT action causes the switch to immediately halt processing * actions for the flow. Any actions which have already been processed are * executed by the switch. However, any further actions, including those which * may be in different tables, or different levels of the NXAST_RESUBMIT * hierarchy, will be ignored. * * Uses the nx_action_header structure. */ /* ## --------------------- ## */ /* ## Requests and replies. ## */ /* ## --------------------- ## */ ''' ''' /* NXT_SET_FLOW_FORMAT request. */ ''' nx_set_flow_format = nstruct( (nx_flow_format, 'format'), # /* One of NXFF_*. */ name = 'nx_set_flow_format', base = nicira_header, criteria = lambda x: getattr(x, msg_subtype) == NXT_SET_FLOW_FORMAT, classifyby = (NXT_SET_FLOW_FORMAT,), init = packvalue(NXT_SET_FLOW_FORMAT, msg_subtype) ) namespace['nx_set_flow_format'] = nx_set_flow_format ''' /* NXT_FLOW_MOD (analogous to OFPT_FLOW_MOD). * * It is possible to limit flow deletions and modifications to certain * cookies by using the NXM_NX_COOKIE(_W) matches. The "cookie" field * is used only to add or modify flow cookies. */ ''' ofp_flow_mod_command = namespace['ofp_flow_mod_command'] nx_flow_mod = nstruct( (uint64, 'cookie'), # /* Opaque controller-issued identifier. */ (ofp_flow_mod_command, 'command'), # /* OFPFC_* + possibly a table ID (see comment # * on struct nx_flow_mod_table_id). */ (uint16, 'idle_timeout'), # /* Idle time before discarding (seconds). */ (uint16, 'hard_timeout'), # /* Max time before discarding (seconds). */ (uint16, 'priority'), # /* Priority level of flow entry. */ (uint32, 'buffer_id'), # /* Buffered packet to apply to (or -1). # Not meaningful for OFPFC_DELETE*. */ (nx_port_no, 'out_port'), # /* For OFPFC_DELETE* commands, require # matching entries to include this as an # output port. A value of OFPP_NONE # indicates no restriction. */ (ofp_flow_mod_flags, 'flags'), # /* One of OFPFF_*. */ (uint16, 'match_len'), # /* Size of nx_match. */ (uint8[6],), # /* Align to 64-bits. */ (nx_matches,), base = nicira_header, criteria = lambda x: getattr(x, msg_subtype) == NXT_FLOW_MOD, classifyby = (NXT_FLOW_MOD,), init = packvalue(NXT_FLOW_MOD, msg_subtype), name = 'nx_flow_mod' ) namespace['nx_flow_mod'] = nx_flow_mod ''' /* NXT_FLOW_REMOVED (analogous to OFPT_FLOW_REMOVED). * * 'table_id' is present only in Open vSwitch 1.11 and later. In earlier * versions of Open vSwitch, this is a padding byte that is always zeroed. * Therefore, a 'table_id' value of 0 indicates that the table ID is not known, * and other values may be interpreted as one more than the flow's former table * ID. */ ''' nx_flow_removed = nstruct( (uint64, 'cookie'), # /* Opaque controller-issued identifier. */ (uint16, 'priority'), # /* Priority level of flow entry. */ (ofp_flow_removed_reason, 'reason'), # /* One of OFPRR_*. */ (uint8, 'table_id'), # /* Flow's former table ID, plus one. */ (uint32, 'duration_sec'), # /* Time flow was alive in seconds. */ (uint32, 'duration_nsec'), # /* Time flow was alive in nanoseconds beyond # duration_sec. */ (uint16, 'idle_timeout'), # /* Idle timeout from original flow mod. */ (uint16, 'match_len'), # /* Size of nx_match. */ (uint64, 'packet_count'), (uint64, 'byte_count'), (nx_matches,), base = nicira_header, criteria = lambda x: getattr(x, msg_subtype) == NXT_FLOW_REMOVED, classifyby = (NXT_FLOW_REMOVED,), init = packvalue(NXT_FLOW_REMOVED, msg_subtype), name = 'nx_flow_removed' ) namespace['nx_flow_removed'] = nx_flow_removed ''' /* Nicira vendor stats request of type NXST_FLOW (analogous to OFPST_FLOW * request). * * It is possible to limit matches to certain cookies by using the * NXM_NX_COOKIE and NXM_NX_COOKIE_W matches. */ ''' nx_flow_stats_request = nstruct( (nx_port_no, 'out_port'), #/* Require matching entries to include this # as an output port. A value of OFPP_NONE # indicates no restriction. */ (uint16, 'match_len'), # /* Length of nx_match. */ (uint8, 'table_id'), # /* ID of table to read (from ofp_table_stats) # or 0xff for all tables. */ (uint8[3],), # /* Align to 64 bits. */ (nx_matches,), base = nx_stats_request, criteria = lambda x: getattr(x, stats_subtype) == NXST_FLOW, classifyby = (NXST_FLOW,), init = packvalue(NXST_FLOW, stats_subtype), name = 'nx_flow_stats_request' ) namespace['nx_flow_stats_request'] = nx_flow_stats_request ''' /* Body for Nicira vendor stats reply of type NXST_FLOW (analogous to * OFPST_FLOW reply). * * The values of 'idle_age' and 'hard_age' are only meaningful when talking to * a switch that implements the NXT_FLOW_AGE extension. Zero means that the * true value is unknown, perhaps because hardware does not track the value. * (Zero is also the value that one should ordinarily expect to see talking to * a switch that does not implement NXT_FLOW_AGE, since those switches zero the * padding bytes that these fields replaced.) A nonzero value X represents X-1 * seconds. A value of 65535 represents 65534 or more seconds. * * 'idle_age' is the number of seconds that the flow has been idle, that is, * the number of seconds since a packet passed through the flow. 'hard_age' is * the number of seconds since the flow was last modified (e.g. OFPFC_MODIFY or * OFPFC_MODIFY_STRICT). (The 'duration_*' fields are the elapsed time since * the flow was added, regardless of subsequent modifications.) * * For a flow with an idle or hard timeout, 'idle_age' or 'hard_age', * respectively, will ordinarily be smaller than the timeout, but flow * expiration times are only approximate and so one must be prepared to * tolerate expirations that occur somewhat early or late. */ ''' ofp_action = namespace['ofp_action'] nx_flow_stats = nstruct( (uint16, 'length'), # /* Length of this entry. */ (uint8, 'table_id'), # /* ID of table flow came from. */ (uint8,), (uint32, 'duration_sec'), # /* Time flow has been alive in seconds. */ (uint32, 'duration_nsec'), # /* Time flow has been alive in nanoseconds # beyond duration_sec. */ (uint16, 'priority'), # /* Priority of the entry. */ (uint16, 'idle_timeout'), # /* Number of seconds idle before expiration. */ (uint16, 'hard_timeout'), # /* Number of seconds before expiration. */ (uint16, 'match_len'), # /* Length of nx_match. */ (uint16, 'idle_age'), # /* Seconds since last packet, plus one. */ (uint16, 'hard_age'), # /* Seconds since last modification, plus one. */ (uint64, 'cookie'), # /* Opaque controller-issued identifier. */ (uint64, 'packet_count'), # /* Number of packets, UINT64_MAX if unknown. */ (uint64, 'byte_count'), # /* Number of bytes, UINT64_MAX if unknown. */ #======================================================================= # /* Followed by: # * - Exactly match_len (possibly 0) bytes containing the nx_match, then # * - Exactly (match_len + 7)/8*8 - match_len (between 0 and 7) bytes of # * all-zero bytes, then # * - Actions to fill out the remainder 'length' bytes (always a multiple # * of 8). # */ #======================================================================= (nx_matches,), (ofp_action[0], 'actions'), name = 'nx_flow_stats', size = sizefromlen(65536, 'length'), prepack = packsize('length') ) namespace['nx_flow_stats'] = nx_flow_stats nx_flow_stats_reply = nstruct( (nx_flow_stats[0], 'stats'), base = nx_stats_reply, classifyby = (NXST_FLOW,), criteria = lambda x: getattr(x, stats_subtype) == NXST_FLOW, init = packvalue(NXST_FLOW, stats_subtype), name = 'nx_flow_stats_reply' ) namespace['nx_flow_stats_reply'] = nx_flow_stats_reply ''' /* Nicira vendor stats request of type NXST_AGGREGATE (analogous to * OFPST_AGGREGATE request). * * The reply format is identical to the reply format for OFPST_AGGREGATE, * except for the header. */ ''' nx_aggregate_stats_request = nstruct( (nx_port_no, 'out_port'), # /* Require matching entries to include this # as an output port. A value of OFPP_NONE # indicates no restriction. */ (uint16, 'match_len'), # /* Length of nx_match. */ (uint8, 'table_id'), # /* ID of table to read (from ofp_table_stats) # or 0xff for all tables. */ (uint8[3],), # /* Align to 64 bits. */ #======================================================================= # /* Followed by: # * - Exactly match_len (possibly 0) bytes containing the nx_match, then # * - Exactly (match_len + 7)/8*8 - match_len (between 0 and 7) bytes of # * all-zero bytes, which must also exactly fill out the length of the # * message. # */ #======================================================================= (nx_matches,), base = nx_stats_request, name = 'nx_aggregate_stats_request', criteria = lambda x: getattr(x, stats_subtype) == NXST_AGGREGATE, classifyby = (NXST_AGGREGATE,), init = packvalue(NXST_AGGREGATE, stats_subtype), lastextra = False ) namespace['nx_aggregate_stats_request'] = nx_aggregate_stats_request nx_aggregate_stats_reply = nstruct( (uint64, 'packet_count'), # /* Number of packets in flows. */ (uint64, 'byte_count'), # /* Number of bytes in flows. */ (uint32, 'flow_count'), # /* Number of flows. */ (uint8[4],), base = nx_stats_reply, name = 'nx_aggregate_stats_reply', criteria = lambda x: getattr(x, stats_subtype) == NXST_AGGREGATE, classifyby = (NXST_AGGREGATE,), init = packvalue(NXST_AGGREGATE, stats_subtype) ) namespace['nx_aggregate_stats_reply'] = nx_aggregate_stats_reply ''' /* NXT_SET_CONTROLLER_ID. * * Each OpenFlow controller connection has a 16-bit identifier that is * initially 0. This message changes the connection's ID to 'id'. * * Controller connection IDs need not be unique. * * The NXAST_CONTROLLER action is the only current user of controller * connection IDs. */ ''' nx_controller_id = nstruct( (uint8[6],), # /* Must be zero. */ (uint16, 'controller_id'), # /* New controller connection ID. */ base = nicira_header, name = 'nx_controller_id', criteria = lambda x: getattr(x, msg_subtype) == NXT_SET_CONTROLLER_ID, init = packvalue(NXT_SET_CONTROLLER_ID, msg_subtype), classifyby = (NXT_SET_CONTROLLER_ID,) ) namespace['nx_controller_id'] = nx_controller_id ''' /* Action structure for NXAST_CONTROLLER. * * This generalizes using OFPAT_OUTPUT to send a packet to OFPP_CONTROLLER. In * addition to the 'max_len' that OFPAT_OUTPUT supports, it also allows * specifying: * * - 'reason': The reason code to use in the ofp_packet_in or nx_packet_in. * * - 'controller_id': The ID of the controller connection to which the * ofp_packet_in should be sent. The ofp_packet_in or nx_packet_in is * sent only to controllers that have the specified controller connection * ID. See "struct nx_controller_id" for more information. */ ''' nx_action_controller = nstruct( (uint16, 'max_len'), # /* Maximum length to send to controller. */ (uint16, 'controller_id'), # /* Controller ID to send packet-in. */ (ofp_packet_in_reason, 'reason'), # /* enum ofp_packet_in_reason (OFPR_*). */ (uint8,), base = nx_action, name = 'nx_action_controller', criteria = lambda x: getattr(x, action_subtype) == NXAST_CONTROLLER, classifyby = (NXAST_CONTROLLER,), init = packvalue(NXAST_CONTROLLER, action_subtype) ) namespace['nx_action_controller'] = nx_action_controller ''' /* Flow Table Monitoring * ===================== * * NXST_FLOW_MONITOR allows a controller to keep track of changes to OpenFlow * flow table(s) or subsets of them, with the following workflow: * * 1. The controller sends an NXST_FLOW_MONITOR request to begin monitoring * flows. The 'id' in the request must be unique among all monitors that * the controller has started and not yet canceled on this OpenFlow * connection. * * 2. The switch responds with an NXST_FLOW_MONITOR reply. If the request's * 'flags' included NXFMF_INITIAL, the reply includes all the flows that * matched the request at the time of the request (with event NXFME_ADDED). * If 'flags' did not include NXFMF_INITIAL, the reply is empty. * * The reply uses the xid of the request (as do all replies to OpenFlow * requests). * * 3. Whenever a change to a flow table entry matches some outstanding monitor * request's criteria and flags, the switch sends a notification to the * controller as an additional NXST_FLOW_MONITOR reply with xid 0. * * When multiple outstanding monitors match a single change, only a single * notification is sent. This merged notification includes the information * requested in any of the individual monitors. That is, if any of the * matching monitors requests actions (NXFMF_ACTIONS), the notification * includes actions, and if any of the monitors request full changes for the * controller's own changes (NXFMF_OWN), the controller's own changes will * be included in full. * * 4. The controller may cancel a monitor with NXT_FLOW_MONITOR_CANCEL. No * further notifications will be sent on the basis of the canceled monitor * afterward. * * * Buffer Management * ================= * * OpenFlow messages for flow monitor notifications can overflow the buffer * space available to the switch, either temporarily (e.g. due to network * conditions slowing OpenFlow traffic) or more permanently (e.g. the sustained * rate of flow table change exceeds the network bandwidth between switch and * controller). * * When Open vSwitch's notification buffer space reaches a limiting threshold, * OVS reacts as follows: * * 1. OVS sends an NXT_FLOW_MONITOR_PAUSED message to the controller, following * all the already queued notifications. After it receives this message, * the controller knows that its view of the flow table, as represented by * flow monitor notifications, is incomplete. * * 2. As long as the notification buffer is not empty: * * - NXMFE_ADD and NXFME_MODIFIED notifications will not be sent. * * - NXFME_DELETED notifications will still be sent, but only for flows * that existed before OVS sent NXT_FLOW_MONITOR_PAUSED. * * - NXFME_ABBREV notifications will not be sent. They are treated as * the expanded version (and therefore only the NXFME_DELETED * components, if any, are sent). * * 3. When the notification buffer empties, OVS sends NXFME_ADD notifications * for flows added since the buffer reached its limit and NXFME_MODIFIED * notifications for flows that existed before the limit was reached and * changed after the limit was reached. * * 4. OVS sends an NXT_FLOW_MONITOR_RESUMED message to the controller. After * it receives this message, the controller knows that its view of the flow * table, as represented by flow monitor notifications, is again complete. * * This allows the maximum buffer space requirement for notifications to be * bounded by the limit plus the maximum number of supported flows. * * * "Flow Removed" messages * ======================= * * The flow monitor mechanism is independent of OFPT_FLOW_REMOVED and * NXT_FLOW_REMOVED. Flow monitor updates for deletion are sent if * NXFMF_DELETE is set on a monitor, regardless of whether the * OFPFF_SEND_FLOW_REM flag was set when the flow was added. */ /* NXST_FLOW_MONITOR request. * * The NXST_FLOW_MONITOR request's body consists of an array of zero or more * instances of this structure. The request arranges to monitor the flows * that match the specified criteria, which are interpreted in the same way as * for NXST_FLOW. * * 'id' identifies a particular monitor for the purpose of allowing it to be * canceled later with NXT_FLOW_MONITOR_CANCEL. 'id' must be unique among * existing monitors that have not already been canceled. * * The reply includes the initial flow matches for monitors that have the * NXFMF_INITIAL flag set. No single flow will be included in the reply more * than once, even if more than one requested monitor matches that flow. The * reply will be empty if none of the monitors has NXFMF_INITIAL set or if none * of the monitors initially matches any flows. * * For NXFMF_ADD, an event will be reported if 'out_port' matches against the * actions of the flow being added or, for a flow that is replacing an existing * flow, if 'out_port' matches against the actions of the flow being replaced. * For NXFMF_DELETE, 'out_port' matches against the actions of a flow being * deleted. For NXFMF_MODIFY, an event will be reported if 'out_port' matches * either the old or the new actions. */ ''' ofp_table = namespace['ofp_table'] nx_flow_monitor_request = nstruct( (uint32, 'id'), # /* Controller-assigned ID for this monitor. */ (nx_flow_monitor_flags, 'flags'), # /* NXFMF_*. */ (nx_port_no, 'out_port'), # /* Required output port, if not OFPP_NONE. */ (uint16, 'match_len'), # /* Length of nx_match. */ (ofp_table, 'table_id'), # /* One table's ID or 0xff for all tables. */ (uint8[5],), # /* Align to 64 bits (must be zero). */ (nx_matches,), name = 'nx_flow_monitor_request', base = nx_stats_request, criteria = lambda x: getattr(x, stats_subtype) == NXST_FLOW_MONITOR, init = packvalue(NXST_FLOW_MONITOR, stats_subtype), classifyby = (NXST_FLOW_MONITOR,) ) namespace['nx_flow_monitor_request'] = nx_flow_monitor_request ''' /* NXST_FLOW_MONITOR reply header. * * The body of an NXST_FLOW_MONITOR reply is an array of variable-length * structures, each of which begins with this header. The 'length' member may * be used to traverse the array, and the 'event' member may be used to * determine the particular structure. * * Every instance is a multiple of 8 bytes long. */ ''' nx_flow_update = nstruct( (uint16, 'length'), #/* Length of this entry. */ (nx_flow_update_event, 'event'), # /* One of NXFME_*. */ name = 'nx_flow_update', size = sizefromlen(65536, 'length'), prepack = packsize('length') ) namespace['nx_flow_update'] = nx_flow_update ''' /* NXST_FLOW_MONITOR reply for NXFME_ADDED, NXFME_DELETED, and * NXFME_MODIFIED. */ ''' nx_flow_update_full = nstruct( (ofp_flow_removed_reason, 'reason'), # /* OFPRR_* for NXFME_DELETED, else zero. */ (uint16, 'priority'), # /* Priority of the entry. */ (uint16, 'idle_timeout'), # /* Number of seconds idle before expiration. */ (uint16, 'hard_timeout'), # /* Number of seconds before expiration. */ (uint16, 'match_len'), # /* Length of nx_match. */ (uint8, 'table_id'), # /* ID of flow's table. */ (uint8,), # /* Reserved, currently zeroed. */ (uint64, 'cookie'), # /* Opaque controller-issued identifier. */ #======================================================================= # /* Followed by: # * - Exactly match_len (possibly 0) bytes containing the nx_match, then # * - Exactly (match_len + 7)/8*8 - match_len (between 0 and 7) bytes of # * all-zero bytes, then # * - Actions to fill out the remainder 'length' bytes (always a multiple # * of 8). If NXFMF_ACTIONS was not specified, or 'event' is # * NXFME_DELETED, no actions are included. # */ #======================================================================= (nx_matches,), (ofp_action[0], 'actions'), name = 'nx_flow_update_full', base = nx_flow_update, criteria = lambda x: x.event in (NXFME_ADDED, NXFME_DELETED, NXFME_MODIFIED), init = packvalue(NXFME_ADDED, 'event') ) namespace['nx_flow_update_full'] = nx_flow_update_full ''' /* NXST_FLOW_MONITOR reply for NXFME_ABBREV. * * When the controller does not specify NXFMF_OWN in a monitor request, any * flow tables changes due to the controller's own requests (on the same * OpenFlow channel) will be abbreviated, when possible, to this form, which * simply specifies the 'xid' of the OpenFlow request (e.g. an OFPT_FLOW_MOD or * NXT_FLOW_MOD) that caused the change. * * Some changes cannot be abbreviated and will be sent in full: * * - Changes that only partially succeed. This can happen if, for example, * a flow_mod with type OFPFC_MODIFY affects multiple flows, but only some * of those modifications succeed (e.g. due to hardware limitations). * * This cannot occur with the current implementation of the Open vSwitch * software datapath. It could happen with other datapath implementations. * * - Changes that race with conflicting changes made by other controllers or * other flow_mods (not separated by barriers) by the same controller. * * This cannot occur with the current Open vSwitch implementation * (regardless of datapath) because Open vSwitch internally serializes * potentially conflicting changes. * * A flow_mod that does not change the flow table will not trigger any * notification, even an abbreviated one. For example, a "modify" or "delete" * flow_mod that does not match any flows will not trigger a notification. * Whether an "add" or "modify" that specifies all the same parameters that a * flow already has triggers a notification is unspecified and subject to * change in future versions of Open vSwitch. * * OVS will always send the notifications for a given flow table change before * the reply to a OFPT_BARRIER_REQUEST request that follows the flow table * change. Thus, if the controller does not receive an abbreviated (or * unabbreviated) notification for a flow_mod before the next * OFPT_BARRIER_REPLY, it will never receive one. */ ''' nx_flow_update_abbrev = nstruct( (uint32, 'xid'), # /* Controller-specified xid from flow_mod. */ name = 'nx_flow_update_abbrev', base = nx_flow_update, criteria = lambda x: x.event == NXFME_ABBREV, init = packvalue(NXFME_ABBREV, 'event') ) namespace['nx_flow_update_abbrev'] = nx_flow_update_abbrev nx_flow_monitor_reply = nstruct( (nx_flow_update[0], 'stats'), base = nx_stats_reply, classifyby = (NXST_FLOW_MONITOR,), name = 'nx_flow_monitor_reply', criteria = lambda x: getattr(x, stats_subtype) == NXST_FLOW_MONITOR, init = packvalue(NXST_FLOW_MONITOR, stats_subtype) ) namespace['nx_flow_monitor_reply'] = nx_flow_monitor_reply ''' /* NXT_FLOW_MONITOR_CANCEL. * * Used by a controller to cancel an outstanding monitor. */ ''' nx_flow_monitor_cancel = nstruct( (uint32, 'id'), # /* 'id' from nx_flow_monitor_request. */ name = 'nx_flow_monitor_cancel', base = nicira_header, classifyby = (NXT_FLOW_MONITOR_CANCEL,), criteria = lambda x: getattr(x, msg_subtype) == NXT_FLOW_MONITOR_CANCEL, init = packvalue(NXT_FLOW_MONITOR_CANCEL, msg_subtype) ) namespace['nx_flow_monitor_cancel'] = nx_flow_monitor_cancel ''' /* Action structure for NXAST_WRITE_METADATA. * * Modifies the 'mask' bits of the metadata value. */ ''' nx_action_write_metadata = nstruct( (uint8[6],), # /* Must be zero. */ (uint64, 'metadata'), # /* Metadata register. */ (uint64, 'mask'), # /* Metadata mask. */ base = nx_action, classifyby = (NXAST_WRITE_METADATA,), criteria = lambda x: getattr(x, action_subtype) == NXAST_WRITE_METADATA, init = packvalue(NXAST_WRITE_METADATA, action_subtype), name = 'nx_action_write_metadata' ) namespace['nx_action_write_metadata'] = nx_action_write_metadata ''' /* Action structure for NXAST_PUSH_MPLS. */ ''' nx_action_push_mpls = nstruct( (ethertype, 'ethertype'), # /* Ethertype */ (uint8[4],), base = nx_action, classifyby = (NXAST_PUSH_MPLS,), criteria = lambda x: getattr(x, action_subtype) == NXAST_PUSH_MPLS, init = packvalue(NXAST_PUSH_MPLS, action_subtype), name = 'nx_action_push_mpls' ) namespace['nx_action_push_mpls'] = nx_action_push_mpls ''' /* Action structure for NXAST_POP_MPLS. */ ''' nx_action_pop_mpls = nstruct( (ethertype, 'ethertype'), # /* Ethertype */ (uint8[4],), base = nx_action, classifyby = (NXAST_POP_MPLS,), criteria = lambda x: getattr(x, action_subtype) == NXAST_POP_MPLS, init = packvalue(NXAST_POP_MPLS, action_subtype), name = 'nx_action_pop_mpls' ) namespace['nx_action_pop_mpls'] = nx_action_pop_mpls ''' /* Action structure for NXAST_SET_MPLS_LABEL. */ ''' nx_action_mpls_label = nstruct( (uint8[2],), # /* Must be zero. */ (uint32, 'label'), # /* LABEL */ base = nx_action, classifyby = (NXAST_SET_MPLS_LABEL,), criteria = lambda x: getattr(x, action_subtype) == NXAST_SET_MPLS_LABEL, init = packvalue(NXAST_SET_MPLS_LABEL, action_subtype), name = 'nx_action_mpls_label' ) namespace['nx_action_mpls_label'] = nx_action_mpls_label ''' /* Action structure for NXAST_SET_MPLS_TC. */ ''' nx_action_mpls_tc = nstruct( (uint8, 'tc'), # /* TC */ (uint8[5],), base = nx_action, classifyby = (NXAST_SET_MPLS_TC,), criteria = lambda x: getattr(x, action_subtype) == NXAST_SET_MPLS_TC, init = packvalue(NXAST_SET_MPLS_TC, action_subtype), name = 'nx_action_mpls_tc' ) namespace['nx_action_mpls_tc'] = nx_action_mpls_tc ''' /* Action structure for NXAST_SET_MPLS_TTL. */ ''' nx_action_mpls_ttl = nstruct( (uint8, 'ttl'), # /* TTL */ (uint8[5],), base = nx_action, classifyby = (NXAST_SET_MPLS_TTL,), criteria = lambda x: getattr(x, action_subtype) == NXAST_SET_MPLS_TTL, init = packvalue(NXAST_SET_MPLS_TTL, action_subtype), name = 'nx_action_mpls_ttl' ) namespace['nx_action_mpls_ttl'] = nx_action_mpls_ttl ''' /* Action structure for NXAST_SAMPLE. * * Samples matching packets with the given probability and sends them * each to the set of collectors identified with the given ID. The * probability is expressed as a number of packets to be sampled out * of USHRT_MAX packets, and must be >0. * * When sending packet samples to IPFIX collectors, the IPFIX flow * record sent for each sampled packet is associated with the given * observation domain ID and observation point ID. Each IPFIX flow * record contain the sampled packet's headers when executing this * rule. If a sampled packet's headers are modified by previous * actions in the flow, those modified headers are sent. */ ''' nx_action_sample = nstruct( (uint16, 'probability'), # /* Fraction of packets to sample. */ (uint32, 'collector_set_id'), # /* ID of collector set in OVSDB. */ (uint32, 'obs_domain_id'), # /* ID of sampling observation domain. */ (uint32, 'obs_point_id'), # /* ID of sampling observation point. */ base = nx_action, classifyby = (NXAST_SAMPLE,), criteria = lambda x: getattr(x, action_subtype) == NXAST_SAMPLE, init = packvalue(NXAST_SAMPLE, action_subtype), name = 'nx_action_sample' ) namespace['nx_action_sample'] = nx_action_sample
[ "def", "create_extension", "(", "namespace", ",", "nicira_header", ",", "nx_action", ",", "nx_stats_request", ",", "nx_stats_reply", ",", "msg_subtype", ",", "action_subtype", ",", "stats_subtype", ")", ":", "with", "_warnings", ".", "catch_warnings", "(", ")", ":", "_warnings", ".", "filterwarnings", "(", "'ignore'", ",", "'^padding'", ",", "StructDefWarning", ")", "nx_flow_mod_table_id", "=", "nstruct", "(", "(", "uint8", ",", "'set'", ")", ",", "# /* Nonzero to enable, zero to disable. */", "(", "uint8", "[", "7", "]", ",", ")", ",", "name", "=", "'nx_flow_mod_table_id'", ",", "base", "=", "nicira_header", ",", "criteria", "=", "lambda", "x", ":", "getattr", "(", "x", ",", "msg_subtype", ")", "==", "NXT_FLOW_MOD_TABLE_ID", ",", "classifyby", "=", "(", "NXT_FLOW_MOD_TABLE_ID", ",", ")", ",", "init", "=", "packvalue", "(", "NXT_FLOW_MOD_TABLE_ID", ",", "msg_subtype", ")", ")", "namespace", "[", "'nx_flow_mod_table_id'", "]", "=", "nx_flow_mod_table_id", "'''\n /* NXT_SET_PACKET_IN_FORMAT request. */\n '''", "nx_set_packet_in_format", "=", "nstruct", "(", "(", "uint32", ",", "'format'", ")", ",", "# /* One of NXPIF_*. */", "name", "=", "'nx_set_packet_in_format'", ",", "base", "=", "nicira_header", ",", "criteria", "=", "lambda", "x", ":", "getattr", "(", "x", ",", "msg_subtype", ")", "==", "NXT_SET_PACKET_IN_FORMAT", ",", "classifyby", "=", "(", "NXT_SET_PACKET_IN_FORMAT", ",", ")", ",", "init", "=", "packvalue", "(", "NXT_SET_PACKET_IN_FORMAT", ",", "msg_subtype", ")", ")", "namespace", "[", "'nx_set_packet_in_format'", "]", "=", "nx_set_packet_in_format", "'''\n /* NXT_PACKET_IN (analogous to OFPT_PACKET_IN).\n *\n * NXT_PACKET_IN is similar to the OpenFlow 1.2 OFPT_PACKET_IN. The\n * differences are:\n *\n * - NXT_PACKET_IN includes the cookie of the rule that triggered the\n * message. (OpenFlow 1.3 OFPT_PACKET_IN also includes the cookie.)\n *\n * - The metadata fields use NXM (instead of OXM) field numbers.\n *\n * Open vSwitch 1.9.0 and later omits metadata fields that are zero (as allowed\n * by OpenFlow 1.2). Earlier versions included all implemented metadata\n * fields.\n *\n * Open vSwitch does not include non-metadata in the nx_match, because by\n * definition that information can be found in the packet itself. The format\n * and the standards allow this, however, so controllers should be prepared to\n * tolerate future changes.\n *\n * The NXM format is convenient for reporting metadata values, but it is\n * important not to interpret the format as matching against a flow, because it\n * does not. Nothing is being matched; arbitrary metadata masks would not be\n * meaningful.\n *\n * Whereas in most cases a controller can expect to only get back NXM fields\n * that it set up itself (e.g. flow dumps will ordinarily report only NXM\n * fields from flows that the controller added), NXT_PACKET_IN messages might\n * contain fields that the controller does not understand, because the switch\n * might support fields (new registers, new protocols, etc.) that the\n * controller does not. The controller must prepared to tolerate these.\n *\n * The 'cookie' field has no meaning when 'reason' is OFPR_NO_MATCH. In this\n * case it should be UINT64_MAX. */\n '''", "if", "'ofp_oxm'", "in", "namespace", ":", "nx_match", "=", "namespace", "[", "'ofp_oxm'", "]", "namespace", "[", "'nx_match'", "]", "=", "nx_match", "nx_match_mask", "=", "namespace", "[", "'ofp_oxm_mask'", "]", "namespace", "[", "'nx_match_mask'", "]", "=", "nx_match_mask", "nx_match_nomask", "=", "namespace", "[", "'ofp_oxm_nomask'", "]", "namespace", "[", "'nx_match_nomask'", "]", "=", "nx_match_nomask", "create_nxm", "=", "namespace", "[", "'create_oxm'", "]", "namespace", "[", "'create_nxm'", "]", "=", "create_nxm", "nx_match_nomask_ext", "=", "nstruct", "(", "base", "=", "nx_match_nomask", ",", "criteria", "=", "lambda", "x", ":", "NXM_VENDOR", "(", "x", ".", "header", ")", "<=", "1", ",", "extend", "=", "{", "'header'", ":", "nxm_header", "}", ",", "name", "=", "'nx_match_nomask_ext'", ")", "namespace", "[", "'nx_match_nomask_ext'", "]", "=", "nx_match_nomask_ext", "nx_match_mask_ext", "=", "nstruct", "(", "base", "=", "nx_match_mask", ",", "criteria", "=", "lambda", "x", ":", "NXM_VENDOR", "(", "x", ".", "header", ")", "<=", "1", ",", "extend", "=", "{", "'header'", ":", "nxm_header", "}", ",", "name", "=", "'nx_match_mask_ext'", ")", "namespace", "[", "'nx_match_mask_ext'", "]", "=", "nx_match_mask_ext", "else", ":", "nx_match", "=", "nstruct", "(", "(", "nxm_header", ",", "'header'", ")", ",", "name", "=", "'nx_match'", ",", "padding", "=", "1", ",", "size", "=", "lambda", "x", ":", "NXM_LENGTH", "(", "x", ".", "header", ")", "+", "4", ")", "namespace", "[", "'nx_match'", "]", "=", "nx_match", "nx_match_nomask", "=", "nstruct", "(", "(", "raw", ",", "'value'", ")", ",", "base", "=", "nx_match", ",", "criteria", "=", "lambda", "x", ":", "not", "NXM_HASMASK", "(", "x", ".", "header", ")", ",", "init", "=", "packvalue", "(", "NXM_OF_IN_PORT", ",", "'header'", ")", ",", "name", "=", "'nx_match_nomask'", ")", "namespace", "[", "'nx_match_nomask'", "]", "=", "nx_match_nomask", "_nxm_mask_value", "=", "nstruct", "(", "(", "raw", ",", "'value'", ")", ",", "name", "=", "'nxm_mask_value'", ",", "size", "=", "lambda", "x", ":", "NXM_LENGTH", "(", "x", ".", "header", ")", "//", "2", ",", "padding", "=", "1", ")", "nx_match_mask", "=", "nstruct", "(", "(", "_nxm_mask_value", ",", ")", ",", "(", "raw", ",", "'mask'", ")", ",", "base", "=", "nx_match", ",", "criteria", "=", "lambda", "x", ":", "NXM_HASMASK", "(", "x", ".", "header", ")", ",", "init", "=", "packvalue", "(", "NXM_OF_ETH_SRC_W", ",", "'header'", ")", ",", "name", "=", "'nx_match_mask'", ",", ")", "namespace", "[", "'nx_match_mask'", "]", "=", "nx_match_mask", "def", "create_nxm", "(", "header", ",", "value", "=", "None", ",", "mask", "=", "None", ")", ":", "if", "NXM_HASMASK", "(", "header", ")", ":", "nxm", "=", "nx_match_mask", ".", "new", "(", ")", "size", "=", "NXM_LENGTH", "(", "header", ")", "//", "2", "else", ":", "nxm", "=", "nx_match_nomask", ".", "new", "(", ")", "size", "=", "NXM_LENGTH", "(", "header", ")", "nxm", ".", "header", "=", "header", "nxm", ".", "value", "=", "common", ".", "create_binary", "(", "value", ",", "size", ")", "if", "NXM_HASMASK", "(", "header", ")", ":", "nxm", ".", "mask", "=", "common", ".", "create_binary", "(", "mask", ",", "size", ")", "nxm", ".", "_pack", "(", ")", "nxm", ".", "_autosubclass", "(", ")", "return", "nxm", "namespace", "[", "'create_nxm'", "]", "=", "create_nxm", "nx_match_nomask_ext", "=", "nx_match_nomask", "nx_match_mask_ext", "=", "nx_match_mask", "namespace", "[", "'nx_match_nomask_ext'", "]", "=", "nx_match_nomask_ext", "namespace", "[", "'nx_match_mask_ext'", "]", "=", "nx_match_mask_ext", "from", "namedstruct", ".", "namedstruct", "import", "rawtype", "as", "_rawtype", "import", "socket", "as", "_socket", "if", "'ip4_addr_bytes'", "in", "namespace", ":", "ip4_addr_bytes", "=", "namespace", "[", "'ip4_addr_bytes'", "]", "else", ":", "ip4_addr_bytes", "=", "prim", "(", "'4s'", ",", "'ip4_addr_bytes'", ")", "ip4_addr_bytes", ".", "formatter", "=", "lambda", "x", ":", "_socket", ".", "inet_ntoa", "(", "x", ")", "namespace", "[", "'ip4_addr_bytes'", "]", "=", "ip4_addr_bytes", "nxm_mask_ipv4", "=", "nstruct", "(", "name", "=", "'nxm_mask_ipv4'", ",", "base", "=", "nx_match_mask_ext", ",", "criteria", "=", "lambda", "x", ":", "x", ".", "header", "in", "(", "NXM_OF_IP_SRC_W", ",", "NXM_OF_IP_DST_W", ",", "NXM_OF_ARP_SPA_W", ",", "NXM_OF_ARP_TPA_W", ",", "NXM_NX_TUN_IPV4_SRC_W", ",", "NXM_NX_TUN_IPV4_DST_W", ")", ",", "init", "=", "packvalue", "(", "NXM_OF_IP_SRC_W", ",", "'header'", ")", ",", "extend", "=", "{", "'value'", ":", "ip4_addr_bytes", ",", "'mask'", ":", "ip4_addr_bytes", "}", ")", "namespace", "[", "'nxm_mask_ipv4'", "]", "=", "nxm_mask_ipv4", "nxm_nomask_ipv4", "=", "nstruct", "(", "name", "=", "'nxm_nomask_ipv4'", ",", "base", "=", "nx_match_nomask_ext", ",", "criteria", "=", "lambda", "x", ":", "x", ".", "header", "in", "(", "NXM_OF_IP_SRC", ",", "NXM_OF_IP_DST", ",", "NXM_OF_ARP_SPA", ",", "NXM_OF_ARP_TPA", ",", "NXM_NX_TUN_IPV4_SRC", ",", "NXM_NX_TUN_IPV4_DST", ")", ",", "init", "=", "packvalue", "(", "NXM_OF_IP_SRC", ",", "'header'", ")", ",", "extend", "=", "{", "'value'", ":", "ip4_addr_bytes", "}", ")", "namespace", "[", "'nxm_nomask_ipv4'", "]", "=", "nxm_nomask_ipv4", "if", "'mac_addr_bytes'", "in", "namespace", ":", "mac_addr_bytes", "=", "namespace", "[", "'mac_addr_bytes'", "]", "else", ":", "mac_addr_bytes", "=", "_rawtype", "(", ")", "mac_addr_bytes", ".", "formatter", "=", "lambda", "x", ":", "':'", ".", "join", "(", "'%02X'", "%", "(", "c", ",", ")", "for", "c", "in", "bytearray", "(", "x", ")", ")", "namespace", "[", "'mac_addr_bytes'", "]", "=", "mac_addr_bytes", "nxm_mask_eth", "=", "nstruct", "(", "name", "=", "'nxm_mask_eth'", ",", "base", "=", "nx_match_mask_ext", ",", "criteria", "=", "lambda", "x", ":", "x", ".", "header", "in", "(", "NXM_OF_ETH_SRC_W", ",", "NXM_OF_ETH_DST_W", ")", ",", "init", "=", "packvalue", "(", "NXM_OF_ETH_SRC_W", ",", "'header'", ")", ",", "extend", "=", "{", "'value'", ":", "mac_addr_bytes", ",", "'mask'", ":", "mac_addr_bytes", "}", ")", "namespace", "[", "'nxm_mask_eth'", "]", "=", "nxm_mask_eth", "nxm_nomask_eth", "=", "nstruct", "(", "name", "=", "'nxm_nomask_eth'", ",", "base", "=", "nx_match_nomask_ext", ",", "criteria", "=", "lambda", "x", ":", "x", ".", "header", "in", "(", "NXM_OF_ETH_SRC", ",", "NXM_OF_ETH_DST", ",", "NXM_NX_ND_SLL", ",", "NXM_NX_ND_TLL", ",", "NXM_NX_ARP_SHA", ",", "NXM_NX_ARP_THA", ")", ",", "init", "=", "packvalue", "(", "NXM_OF_ETH_SRC", ",", "'header'", ")", ",", "extend", "=", "{", "'value'", ":", "mac_addr_bytes", "}", ")", "namespace", "[", "'nxm_nomask_eth'", "]", "=", "nxm_nomask_eth", "ofp_port_no", "=", "namespace", "[", "'ofp_port_no'", "]", "nx_port_no", "=", "enum", "(", "'nx_port_no'", ",", "None", ",", "uint16", ",", "*", "*", "dict", "(", "(", "k", ",", "v", "&", "0xffff", ")", "for", "k", ",", "v", "in", "ofp_port_no", ".", "getDict", "(", ")", ".", "items", "(", ")", ")", ")", "nxm_port_no_raw", "=", "_rawtype", "(", ")", "nxm_port_no_raw", ".", "formatter", "=", "lambda", "x", ":", "nx_port_no", ".", "formatter", "(", "nx_port_no", ".", "parse", "(", "x", ")", "[", "0", "]", ")", "namespace", "[", "'nx_port_no'", "]", "=", "nx_port_no", "namespace", "[", "'nxm_port_no_raw'", "]", "=", "nxm_port_no_raw", "nxm_nomask_port", "=", "nstruct", "(", "name", "=", "'nxm_nomask_port'", ",", "base", "=", "nx_match_nomask_ext", ",", "criteria", "=", "lambda", "x", ":", "x", ".", "header", "==", "NXM_OF_IN_PORT", ",", "init", "=", "packvalue", "(", "NXM_OF_IN_PORT", ",", "'header'", ")", ",", "extend", "=", "{", "'value'", ":", "nxm_port_no_raw", "}", ")", "namespace", "[", "'nxm_nomask_port'", "]", "=", "nxm_nomask_port", "if", "'ethtype_raw'", "in", "namespace", ":", "ethtype_raw", "=", "namespace", "[", "'ethtype_raw'", "]", "else", ":", "ethtype_raw", "=", "_rawtype", "(", ")", "ethtype_raw", ".", "formatter", "=", "lambda", "x", ":", "ethertype", ".", "formatter", "(", "ethertype", ".", "parse", "(", "x", ")", "[", "0", "]", ")", "namespace", "[", "'ethtype_raw'", "]", "=", "ethtype_raw", "nxm_nomask_ethertype", "=", "nstruct", "(", "name", "=", "'nxm_nomask_ethertype'", ",", "base", "=", "nx_match_nomask_ext", ",", "criteria", "=", "lambda", "x", ":", "x", ".", "header", "==", "NXM_OF_ETH_TYPE", ",", "init", "=", "packvalue", "(", "NXM_OF_ETH_TYPE", ",", "'header'", ")", ",", "extend", "=", "{", "'value'", ":", "ethtype_raw", "}", ")", "namespace", "[", "'nxm_nomask_ethertype'", "]", "=", "nxm_nomask_ethertype", "if", "'arpop_raw'", "in", "namespace", ":", "arpop_raw", "=", "namespace", "[", "'arpop_raw'", "]", "else", ":", "arpop_raw", "=", "_rawtype", "(", ")", "arpop_raw", ".", "formatter", "=", "lambda", "x", ":", "arp_op_code", ".", "formatter", "(", "arp_op_code", ".", "parse", "(", "x", ")", "[", "0", "]", ")", "namespace", "[", "'arpop_raw'", "]", "=", "arpop_raw", "nxm_nomask_arpopcode", "=", "nstruct", "(", "name", "=", "'nxm_nomask_arpopcode'", ",", "base", "=", "nx_match_nomask_ext", ",", "criteria", "=", "lambda", "x", ":", "x", ".", "header", "==", "NXM_OF_ARP_OP", ",", "init", "=", "packvalue", "(", "NXM_OF_ARP_OP", ",", "'header'", ")", ",", "extend", "=", "{", "'value'", ":", "arpop_raw", "}", ")", "namespace", "[", "'nxm_nomask_arpopcode'", "]", "=", "nxm_nomask_arpopcode", "if", "'ip_protocol_raw'", "in", "namespace", ":", "ip_protocol_raw", "=", "namespace", "[", "'ip_protocol_raw'", "]", "else", ":", "ip_protocol_raw", "=", "_rawtype", "(", ")", "ip_protocol_raw", ".", "formatter", "=", "lambda", "x", ":", "ip_protocol", ".", "formatter", "(", "ip_protocol", ".", "parse", "(", "x", ")", "[", "0", "]", ")", "namespace", "[", "'ip_protocol_raw'", "]", "=", "ip_protocol_raw", "nxm_nomask_ip_protocol", "=", "nstruct", "(", "name", "=", "'nxm_nomask_ip_protocol'", ",", "base", "=", "nx_match_nomask_ext", ",", "criteria", "=", "lambda", "x", ":", "x", ".", "header", "==", "NXM_OF_IP_PROTO", ",", "init", "=", "packvalue", "(", "NXM_OF_IP_PROTO", ",", "'header'", ")", ",", "extend", "=", "{", "'value'", ":", "ip_protocol_raw", "}", ")", "namespace", "[", "'nxm_nomask_ip_protocol'", "]", "=", "nxm_nomask_ip_protocol", "if", "'ip6_addr_bytes'", "in", "namespace", ":", "nxm_nomask_ipv6", "=", "nstruct", "(", "name", "=", "'nxm_nomask_ipv6'", ",", "base", "=", "nx_match_nomask_ext", ",", "criteria", "=", "lambda", "x", ":", "x", ".", "header", "in", "(", "NXM_NX_IPV6_SRC", ",", "NXM_NX_IPV6_DST", ",", "NXM_NX_ND_TARGET", ")", ",", "init", "=", "packvalue", "(", "NXM_NX_IPV6_SRC", ",", "'header'", ")", ",", "extend", "=", "{", "'value'", ":", "ip6_addr_bytes", "}", ")", "namespace", "[", "'nxm_nomask_ipv6'", "]", "=", "nxm_nomask_ipv6", "nxm_mask_ipv6", "=", "nstruct", "(", "name", "=", "'nxm_mask_ipv6'", ",", "base", "=", "nx_match_mask_ext", ",", "criteria", "=", "lambda", "x", ":", "x", ".", "header", "in", "(", "NXM_NX_IPV6_SRC_W", ",", "NXM_NX_IPV6_DST_W", ")", ",", "init", "=", "packvalue", "(", "NXM_NX_IPV6_SRC_W", ",", "'header'", ")", ",", "extend", "=", "{", "'value'", ":", "ip6_addr_bytes", ",", "'mask'", ":", "ip6_addr_bytes", "}", ")", "namespace", "[", "'nxm_mask_ipv6'", "]", "=", "nxm_mask_ipv6", "nx_ip_frag_raw", "=", "_rawtype", "(", ")", "nx_ip_frag_raw", ".", "formatter", "=", "lambda", "x", ":", "nx_ip_frag", ".", "formatter", "(", "nx_ip_frag", ".", "parse", "(", "x", ")", "[", "0", "]", ")", "nxm_nomask_ipfrag", "=", "nstruct", "(", "name", "=", "'nxm_nomask_ipfrag'", ",", "base", "=", "nx_match_nomask_ext", ",", "criteria", "=", "lambda", "x", ":", "x", ".", "header", "==", "NXM_NX_IP_FRAG", ",", "init", "=", "packvalue", "(", "NXM_NX_IP_FRAG", ",", "'header'", ")", ",", "extend", "=", "{", "'value'", ":", "nx_ip_frag_raw", "}", ")", "namespace", "[", "'nxm_nomask_ipfrag'", "]", "=", "nxm_nomask_ipfrag", "nxm_mask_ipfrag", "=", "nstruct", "(", "name", "=", "'nxm_mask_ipfrag'", ",", "base", "=", "nx_match_mask_ext", ",", "criteria", "=", "lambda", "x", ":", "x", ".", "header", "==", "NXM_NX_IP_FRAG_W", ",", "init", "=", "packvalue", "(", "NXM_NX_IP_FRAG_W", ",", "'header'", ")", ",", "extend", "=", "{", "'value'", ":", "nx_ip_frag_raw", ",", "'mask'", ":", "nx_ip_frag_raw", "}", ")", "namespace", "[", "'nxm_mask_ipfrag'", "]", "=", "nxm_mask_ipfrag", "nx_matches", "=", "nstruct", "(", "(", "nx_match", "[", "0", "]", ",", "'matches'", ")", ",", "name", "=", "'nx_matches'", ",", "size", "=", "sizefromlen", "(", "65536", ",", "'match_len'", ")", ",", "prepack", "=", "packrealsize", "(", "'match_len'", ")", ",", "padding", "=", "8", ")", "namespace", "[", "'nx_matches'", "]", "=", "nx_matches", "nx_packet_in", "=", "nstruct", "(", "(", "uint32", ",", "'buffer_id'", ")", ",", "# /* ID assigned by datapath. */", "(", "uint16", ",", "'total_len'", ")", ",", "# /* Full length of frame. */", "(", "uint8", ",", "'reason'", ")", ",", "# /* Reason packet is sent (one of OFPR_*). */", "(", "uint8", ",", "'table_id'", ")", ",", "# /* ID of the table that was looked up. */", "(", "uint64", ",", "'cookie'", ")", ",", "# /* Cookie of the rule that was looked up. */", "(", "uint16", ",", "'match_len'", ")", ",", "# /* Size of nx_match. */", "(", "uint8", "[", "6", "]", ",", ")", ",", "# /* Align to 64-bits. */", "(", "nx_matches", ",", ")", ",", "(", "uint8", "[", "2", "]", ",", ")", ",", "(", "raw", ",", "'data'", ")", ",", "name", "=", "'nx_packet_in'", ",", "base", "=", "nicira_header", ",", "classifyby", "=", "(", "NXT_PACKET_IN", ",", ")", ",", "criteria", "=", "lambda", "x", ":", "getattr", "(", "x", ",", "msg_subtype", ")", "==", "NXT_PACKET_IN", ",", "init", "=", "packvalue", "(", "NXT_PACKET_IN", ",", "msg_subtype", ")", ")", "namespace", "[", "'nx_packet_in'", "]", "=", "nx_packet_in", "'''\n /* Configures the \"role\" of the sending controller. The default role is:\n *\n * - Other (NX_ROLE_OTHER), which allows the controller access to all\n * OpenFlow features.\n *\n * The other possible roles are a related pair:\n *\n * - Master (NX_ROLE_MASTER) is equivalent to Other, except that there may\n * be at most one Master controller at a time: when a controller\n * configures itself as Master, any existing Master is demoted to the\n * Slave role.\n *\n * - Slave (NX_ROLE_SLAVE) allows the controller read-only access to\n * OpenFlow features. In particular attempts to modify the flow table\n * will be rejected with an OFPBRC_EPERM error.\n *\n * Slave controllers do not receive OFPT_PACKET_IN or OFPT_FLOW_REMOVED\n * messages, but they do receive OFPT_PORT_STATUS messages.\n */\n '''", "nx_role_request", "=", "nstruct", "(", "(", "nx_role", ",", "'role'", ")", ",", "# /* One of NX_ROLE_*. */", "name", "=", "'nx_role_request'", ",", "base", "=", "nicira_header", ",", "classifyby", "=", "(", "NXT_ROLE_REQUEST", ",", "NXT_ROLE_REPLY", ")", ",", "criteria", "=", "lambda", "x", ":", "getattr", "(", "x", ",", "msg_subtype", ")", "==", "NXT_ROLE_REQUEST", "or", "getattr", "(", "x", ",", "msg_subtype", ")", "==", "NXT_ROLE_REPLY", ",", "init", "=", "packvalue", "(", "NXT_ROLE_REQUEST", ",", "msg_subtype", ")", ")", "namespace", "[", "'nx_role_request'", "]", "=", "nx_role_request", "'''\n /* NXT_SET_ASYNC_CONFIG.\n *\n * Sent by a controller, this message configures the asynchronous messages that\n * the controller wants to receive. Element 0 in each array specifies messages\n * of interest when the controller has an \"other\" or \"master\" role; element 1,\n * when the controller has a \"slave\" role.\n *\n * Each array element is a bitmask in which a 0-bit disables receiving a\n * particular message and a 1-bit enables receiving it. Each bit controls the\n * message whose 'reason' corresponds to the bit index. For example, the bit\n * with value 1<<2 == 4 in port_status_mask[1] determines whether the\n * controller will receive OFPT_PORT_STATUS messages with reason OFPPR_MODIFY\n * (value 2) when the controller has a \"slave\" role.\n *\n * As a side effect, for service controllers, this message changes the\n * miss_send_len from default of zero to OFP_DEFAULT_MISS_SEND_LEN (128).\n */\n '''", "ofp_packet_in_reason", "=", "namespace", "[", "'ofp_packet_in_reason'", "]", "if", "'ofp_packet_in_reason_bitwise'", "in", "namespace", ":", "ofp_packet_in_reason_bitwise", "=", "namespace", "[", "'ofp_packet_in_reason_bitwise'", "]", "else", ":", "ofp_packet_in_reason_bitwise", "=", "enum", "(", "'ofp_packet_in_reason_bitwise'", ",", "None", ",", "uint32", ",", "*", "*", "dict", "(", "(", "k", ",", "1", "<<", "v", ")", "for", "k", ",", "v", "in", "ofp_packet_in_reason", ".", "getDict", "(", ")", ".", "items", "(", ")", ")", ")", "namespace", "[", "'ofp_packet_in_reason_bitwise'", "]", "=", "ofp_packet_in_reason_bitwise", "ofp_port_reason", "=", "namespace", "[", "'ofp_port_reason'", "]", "if", "'ofp_port_reason_bitwise'", "in", "namespace", ":", "ofp_port_reason_bitwise", "=", "namespace", "[", "'ofp_port_reason_bitwise'", "]", "else", ":", "ofp_port_reason_bitwise", "=", "enum", "(", "'ofp_port_reason_bitwise'", ",", "None", ",", "uint32", ",", "*", "*", "dict", "(", "(", "k", ",", "1", "<<", "v", ")", "for", "k", ",", "v", "in", "ofp_port_reason", ".", "getDict", "(", ")", ".", "items", "(", ")", ")", ")", "namespace", "[", "'ofp_port_reason_bitwise'", "]", "=", "ofp_port_reason_bitwise", "ofp_flow_removed_reason", "=", "namespace", "[", "'ofp_flow_removed_reason'", "]", "if", "'ofp_flow_removed_reason_bitwise'", "in", "namespace", ":", "ofp_flow_removed_reason_bitwise", "=", "namespace", "[", "'ofp_flow_removed_reason_bitwise'", "]", "else", ":", "ofp_flow_removed_reason_bitwise", "=", "enum", "(", "'ofp_flow_removed_reason_bitwise'", ",", "None", ",", "uint32", ",", "*", "*", "dict", "(", "(", "k", ",", "1", "<<", "v", ")", "for", "k", ",", "v", "in", "ofp_flow_removed_reason", ".", "getDict", "(", ")", ".", "items", "(", ")", ")", ")", "namespace", "[", "'ofp_flow_removed_reason_bitwise'", "]", "=", "ofp_flow_removed_reason_bitwise", "nx_async_config", "=", "nstruct", "(", "(", "ofp_packet_in_reason_bitwise", "[", "2", "]", ",", "'packet_in_mask'", ")", ",", "# /* Bitmasks of OFPR_* values. */", "(", "ofp_port_reason_bitwise", "[", "2", "]", ",", "'port_status_mask'", ")", ",", "# /* Bitmasks of OFPRR_* values. */", "(", "ofp_flow_removed_reason_bitwise", "[", "2", "]", ",", "'flow_removed_mask'", ")", ",", "#/* Bitmasks of OFPPR_* values. */", "name", "=", "'nx_async_config'", ",", "base", "=", "nicira_header", ",", "classifyby", "=", "(", "NXT_SET_ASYNC_CONFIG", ",", ")", ",", "criteria", "=", "lambda", "x", ":", "getattr", "(", "x", ",", "msg_subtype", ")", "==", "NXT_SET_ASYNC_CONFIG", ",", "init", "=", "packvalue", "(", "NXT_SET_ASYNC_CONFIG", ",", "msg_subtype", ")", ")", "namespace", "[", "'nx_async_config'", "]", "=", "nx_async_config", "'''\n /* Nicira vendor flow actions. */\n '''", "'''\n /* Action structures for NXAST_RESUBMIT and NXAST_RESUBMIT_TABLE.\n *\n * These actions search one of the switch's flow tables:\n *\n * - For NXAST_RESUBMIT_TABLE only, if the 'table' member is not 255, then\n * it specifies the table to search.\n *\n * - Otherwise (for NXAST_RESUBMIT_TABLE with a 'table' of 255, or for\n * NXAST_RESUBMIT regardless of 'table'), it searches the current flow\n * table, that is, the OpenFlow flow table that contains the flow from\n * which this action was obtained. If this action did not come from a\n * flow table (e.g. it came from an OFPT_PACKET_OUT message), then table 0\n * is the current table.\n *\n * The flow table lookup uses a flow that may be slightly modified from the\n * original lookup:\n *\n * - For NXAST_RESUBMIT, the 'in_port' member of struct nx_action_resubmit\n * is used as the flow's in_port.\n *\n * - For NXAST_RESUBMIT_TABLE, if the 'in_port' member is not OFPP_IN_PORT,\n * then its value is used as the flow's in_port. Otherwise, the original\n * in_port is used.\n *\n * - If actions that modify the flow (e.g. OFPAT_SET_VLAN_VID) precede the\n * resubmit action, then the flow is updated with the new values.\n *\n * Following the lookup, the original in_port is restored.\n *\n * If the modified flow matched in the flow table, then the corresponding\n * actions are executed. Afterward, actions following the resubmit in the\n * original set of actions, if any, are executed; any changes made to the\n * packet (e.g. changes to VLAN) by secondary actions persist when those\n * actions are executed, although the original in_port is restored.\n *\n * Resubmit actions may be used any number of times within a set of actions.\n *\n * Resubmit actions may nest to an implementation-defined depth. Beyond this\n * implementation-defined depth, further resubmit actions are simply ignored.\n *\n * NXAST_RESUBMIT ignores 'table' and 'pad'. NXAST_RESUBMIT_TABLE requires\n * 'pad' to be all-bits-zero.\n *\n * Open vSwitch 1.0.1 and earlier did not support recursion. Open vSwitch\n * before 1.2.90 did not support NXAST_RESUBMIT_TABLE.\n */\n '''", "nx_action_resubmit", "=", "nstruct", "(", "(", "nx_port_no", ",", "'in_port'", ")", ",", "# /* New in_port for checking flow table. */", "(", "uint8", ",", "'table'", ")", ",", "# /* NXAST_RESUBMIT_TABLE: table to use. */", "(", "uint8", "[", "3", "]", ",", ")", ",", "base", "=", "nx_action", ",", "criteria", "=", "lambda", "x", ":", "getattr", "(", "x", ",", "action_subtype", ")", "==", "NXAST_RESUBMIT_TABLE", "or", "getattr", "(", "x", ",", "action_subtype", ")", "==", "NXAST_RESUBMIT", ",", "classifyby", "=", "(", "NXAST_RESUBMIT_TABLE", ",", "NXAST_RESUBMIT", ")", ",", "name", "=", "'nx_action_resubmit'", ",", "init", "=", "packvalue", "(", "NXAST_RESUBMIT_TABLE", ",", "action_subtype", ")", ")", "namespace", "[", "'nx_action_resubmit'", "]", "=", "nx_action_resubmit", "'''\n /* Action structure for NXAST_SET_TUNNEL.\n *\n * Sets the encapsulating tunnel ID to a 32-bit value. The most-significant 32\n * bits of the tunnel ID are set to 0. */\n '''", "nx_action_set_tunnel", "=", "nstruct", "(", "(", "uint8", "[", "2", "]", ",", ")", ",", "(", "uint32", ",", "'tun_id'", ")", ",", "# /* Tunnel ID. */", "name", "=", "'nx_action_set_tunnel'", ",", "base", "=", "nx_action", ",", "classifyby", "=", "(", "NXAST_SET_TUNNEL", ",", ")", ",", "criteria", "=", "lambda", "x", ":", "getattr", "(", "x", ",", "action_subtype", ")", "==", "NXAST_SET_TUNNEL", ",", "init", "=", "packvalue", "(", "NXAST_SET_TUNNEL", ",", "action_subtype", ")", ")", "namespace", "[", "'nx_action_set_tunnel'", "]", "=", "nx_action_set_tunnel", "'''\n /* Action structure for NXAST_SET_TUNNEL64.\n *\n * Sets the encapsulating tunnel ID to a 64-bit value. */\n '''", "nx_action_set_tunnel64", "=", "nstruct", "(", "(", "uint8", "[", "6", "]", ",", ")", ",", "(", "uint64", ",", "'tun_id'", ")", ",", "# /* Tunnel ID. */", "name", "=", "'nx_action_set_tunnel64'", ",", "base", "=", "nx_action", ",", "classifyby", "=", "(", "NXAST_SET_TUNNEL64", ",", ")", ",", "criteria", "=", "lambda", "x", ":", "getattr", "(", "x", ",", "action_subtype", ")", "==", "NXAST_SET_TUNNEL64", ",", "init", "=", "packvalue", "(", "NXAST_SET_TUNNEL64", ",", "action_subtype", ")", ")", "namespace", "[", "'nx_action_set_tunnel64'", "]", "=", "nx_action_set_tunnel64", "'''\n /* Action structure for NXAST_SET_QUEUE.\n *\n * Set the queue that should be used when packets are output. This is similar\n * to the OpenFlow OFPAT_ENQUEUE action, but does not take the output port as\n * an argument. This allows the queue to be defined before the port is\n * known. */\n '''", "nx_action_set_queue", "=", "nstruct", "(", "(", "uint8", "[", "2", "]", ",", ")", ",", "(", "uint32", ",", "'queue_id'", ")", ",", "# /* Where to enqueue packets. */", "name", "=", "'nx_action_set_queue'", ",", "base", "=", "nx_action", ",", "classifyby", "=", "(", "NXAST_SET_QUEUE", ",", ")", ",", "criteria", "=", "lambda", "x", ":", "getattr", "(", "x", ",", "action_subtype", ")", "==", "NXAST_SET_QUEUE", ",", "init", "=", "packvalue", "(", "NXAST_SET_QUEUE", ",", "action_subtype", ")", ")", "namespace", "[", "'nx_action_set_queue'", "]", "=", "nx_action_set_queue", "'''\n /* Action structure for NXAST_POP_QUEUE.\n *\n * Restores the queue to the value it was before any NXAST_SET_QUEUE actions\n * were used. Only the original queue can be restored this way; no stack is\n * maintained. */\n '''", "nx_action_pop_queue", "=", "nstruct", "(", "(", "uint8", "[", "6", "]", ",", ")", ",", "name", "=", "'nx_action_pop_queue'", ",", "base", "=", "nx_action", ",", "classifyby", "=", "(", "NXAST_POP_QUEUE", ",", ")", ",", "criteria", "=", "lambda", "x", ":", "getattr", "(", "x", ",", "action_subtype", ")", "==", "NXAST_POP_QUEUE", ",", "init", "=", "packvalue", "(", "NXAST_POP_QUEUE", ",", "action_subtype", ")", ")", "namespace", "[", "'nx_action_pop_queue'", "]", "=", "nx_action_pop_queue", "'''\n /* Action structure for NXAST_REG_MOVE.\n *\n * Copies src[src_ofs:src_ofs+n_bits] to dst[dst_ofs:dst_ofs+n_bits], where\n * a[b:c] denotes the bits within 'a' numbered 'b' through 'c' (not including\n * bit 'c'). Bit numbering starts at 0 for the least-significant bit, 1 for\n * the next most significant bit, and so on.\n *\n * 'src' and 'dst' are nxm_header values with nxm_hasmask=0. (It doesn't make\n * sense to use nxm_hasmask=1 because the action does not do any kind of\n * matching; it uses the actual value of a field.)\n *\n * The following nxm_header values are potentially acceptable as 'src':\n *\n * - NXM_OF_IN_PORT\n * - NXM_OF_ETH_DST\n * - NXM_OF_ETH_SRC\n * - NXM_OF_ETH_TYPE\n * - NXM_OF_VLAN_TCI\n * - NXM_OF_IP_TOS\n * - NXM_OF_IP_PROTO\n * - NXM_OF_IP_SRC\n * - NXM_OF_IP_DST\n * - NXM_OF_TCP_SRC\n * - NXM_OF_TCP_DST\n * - NXM_OF_UDP_SRC\n * - NXM_OF_UDP_DST\n * - NXM_OF_ICMP_TYPE\n * - NXM_OF_ICMP_CODE\n * - NXM_OF_ARP_OP\n * - NXM_OF_ARP_SPA\n * - NXM_OF_ARP_TPA\n * - NXM_NX_TUN_ID\n * - NXM_NX_ARP_SHA\n * - NXM_NX_ARP_THA\n * - NXM_NX_ICMPV6_TYPE\n * - NXM_NX_ICMPV6_CODE\n * - NXM_NX_ND_SLL\n * - NXM_NX_ND_TLL\n * - NXM_NX_REG(idx) for idx in the switch's accepted range.\n * - NXM_NX_PKT_MARK\n * - NXM_NX_TUN_IPV4_SRC\n * - NXM_NX_TUN_IPV4_DST\n *\n * The following nxm_header values are potentially acceptable as 'dst':\n *\n * - NXM_OF_ETH_DST\n * - NXM_OF_ETH_SRC\n * - NXM_OF_IP_TOS\n * - NXM_OF_IP_SRC\n * - NXM_OF_IP_DST\n * - NXM_OF_TCP_SRC\n * - NXM_OF_TCP_DST\n * - NXM_OF_UDP_SRC\n * - NXM_OF_UDP_DST\n * - NXM_NX_ARP_SHA\n * - NXM_NX_ARP_THA\n * - NXM_OF_ARP_OP\n * - NXM_OF_ARP_SPA\n * - NXM_OF_ARP_TPA\n * Modifying any of the above fields changes the corresponding packet\n * header.\n *\n * - NXM_OF_IN_PORT\n *\n * - NXM_NX_REG(idx) for idx in the switch's accepted range.\n *\n * - NXM_NX_PKT_MARK\n *\n * - NXM_OF_VLAN_TCI. Modifying this field's value has side effects on the\n * packet's 802.1Q header. Setting a value with CFI=0 removes the 802.1Q\n * header (if any), ignoring the other bits. Setting a value with CFI=1\n * adds or modifies the 802.1Q header appropriately, setting the TCI field\n * to the field's new value (with the CFI bit masked out).\n *\n * - NXM_NX_TUN_ID, NXM_NX_TUN_IPV4_SRC, NXM_NX_TUN_IPV4_DST. Modifying\n * any of these values modifies the corresponding tunnel header field used\n * for the packet's next tunnel encapsulation, if allowed by the\n * configuration of the output tunnel port.\n *\n * A given nxm_header value may be used as 'src' or 'dst' only on a flow whose\n * nx_match satisfies its prerequisites. For example, NXM_OF_IP_TOS may be\n * used only if the flow's nx_match includes an nxm_entry that specifies\n * nxm_type=NXM_OF_ETH_TYPE, nxm_hasmask=0, and nxm_value=0x0800.\n *\n * The switch will reject actions for which src_ofs+n_bits is greater than the\n * width of 'src' or dst_ofs+n_bits is greater than the width of 'dst' with\n * error type OFPET_BAD_ACTION, code OFPBAC_BAD_ARGUMENT.\n *\n * This action behaves properly when 'src' overlaps with 'dst', that is, it\n * behaves as if 'src' were copied out to a temporary buffer, then the\n * temporary buffer copied to 'dst'.\n */\n '''", "nx_action_reg_move", "=", "nstruct", "(", "(", "uint16", ",", "'n_bits'", ")", ",", "# /* Number of bits. */", "(", "uint16", ",", "'src_ofs'", ")", ",", "# /* Starting bit offset in source. */", "(", "uint16", ",", "'dst_ofs'", ")", ",", "# /* Starting bit offset in destination. */", "(", "nxm_header", ",", "'src'", ")", ",", "# /* Source register. */", "(", "nxm_header", ",", "'dst'", ")", ",", "# /* Destination register. */", "name", "=", "'nx_action_reg_move'", ",", "base", "=", "nx_action", ",", "classifyby", "=", "(", "NXAST_REG_MOVE", ",", ")", ",", "criteria", "=", "lambda", "x", ":", "getattr", "(", "x", ",", "action_subtype", ")", "==", "NXAST_REG_MOVE", ",", "init", "=", "packvalue", "(", "NXAST_REG_MOVE", ",", "action_subtype", ")", ",", "formatter", "=", "_createdesc", "(", "lambda", "x", ":", "'move:%s[%d..%d]->%s[%d..%d]'", "%", "(", "x", "[", "'src'", "]", ",", "x", "[", "'src_ofs'", "]", ",", "x", "[", "'src_ofs'", "]", "+", "x", "[", "'n_bits'", "]", "-", "1", ",", "x", "[", "'dst'", "]", ",", "x", "[", "'dst_ofs'", "]", ",", "x", "[", "'dst_ofs'", "]", "+", "x", "[", "'n_bits'", "]", "-", "1", ")", ")", ")", "namespace", "[", "'nx_action_reg_move'", "]", "=", "nx_action_reg_move", "'''\n /* Action structure for NXAST_REG_LOAD.\n *\n * Copies value[0:n_bits] to dst[ofs:ofs+n_bits], where a[b:c] denotes the bits\n * within 'a' numbered 'b' through 'c' (not including bit 'c'). Bit numbering\n * starts at 0 for the least-significant bit, 1 for the next most significant\n * bit, and so on.\n *\n * 'dst' is an nxm_header with nxm_hasmask=0. See the documentation for\n * NXAST_REG_MOVE, above, for the permitted fields and for the side effects of\n * loading them.\n *\n * The 'ofs' and 'n_bits' fields are combined into a single 'ofs_nbits' field\n * to avoid enlarging the structure by another 8 bytes. To allow 'n_bits' to\n * take a value between 1 and 64 (inclusive) while taking up only 6 bits, it is\n * also stored as one less than its true value:\n *\n * 15 6 5 0\n * +------------------------------+------------------+\n * | ofs | n_bits - 1 |\n * +------------------------------+------------------+\n *\n * The switch will reject actions for which ofs+n_bits is greater than the\n * width of 'dst', or in which any bits in 'value' with value 2**n_bits or\n * greater are set to 1, with error type OFPET_BAD_ACTION, code\n * OFPBAC_BAD_ARGUMENT.\n */\n '''", "nx_action_reg_load", "=", "nstruct", "(", "(", "uint16", ",", "'ofs_nbits'", ")", ",", "# /* (ofs << 6) | (n_bits - 1). */", "(", "nxm_header", ",", "'dst'", ")", ",", "# /* Destination register. */", "(", "uint64", ",", "'value'", ")", ",", "# /* Immediate value. */", "name", "=", "'nx_action_reg_load'", ",", "base", "=", "nx_action", ",", "classifyby", "=", "(", "NXAST_REG_LOAD", ",", ")", ",", "criteria", "=", "lambda", "x", ":", "getattr", "(", "x", ",", "action_subtype", ")", "==", "NXAST_REG_LOAD", ",", "init", "=", "packvalue", "(", "NXAST_REG_LOAD", ",", "action_subtype", ")", ",", "formatter", "=", "_createdesc", "(", "lambda", "x", ":", "'load:0x%x->%s[%d..%d]'", "%", "(", "x", "[", "'value'", "]", ",", "x", "[", "'dst'", "]", ",", "x", "[", "'ofs_nbits'", "]", ">>", "6", ",", "(", "x", "[", "'ofs_nbits'", "]", ">>", "6", ")", "+", "(", "x", "[", "'ofs_nbits'", "]", "&", "0x3f", ")", ")", ")", ")", "namespace", "[", "'nx_action_reg_load'", "]", "=", "nx_action_reg_load", "'''\n /* Action structure for NXAST_STACK_PUSH and NXAST_STACK_POP.\n *\n * Pushes (or pops) field[offset: offset + n_bits] to (or from)\n * top of the stack.\n */\n '''", "nx_action_stack", "=", "nstruct", "(", "(", "uint16", ",", "'offset'", ")", ",", "# /* Bit offset into the field. */", "(", "nxm_header", ",", "'field'", ")", ",", "# /* The field used for push or pop. */", "(", "uint16", ",", "'n_bits'", ")", ",", "# /* (n_bits + 1) bits of the field. */", "(", "uint8", "[", "6", "]", ",", ")", ",", "# /* Reserved, must be zero. */", "name", "=", "'nx_action_stack'", ",", "base", "=", "nx_action", ",", "classifyby", "=", "(", "NXAST_STACK_PUSH", ",", "NXAST_STACK_POP", ")", ",", "criteria", "=", "lambda", "x", ":", "getattr", "(", "x", ",", "action_subtype", ")", "==", "NXAST_STACK_PUSH", "or", "getattr", "(", "x", ",", "action_subtype", ")", "==", "NXAST_STACK_POP", ",", "init", "=", "packvalue", "(", "NXAST_STACK_PUSH", ",", "action_subtype", ")", ",", "formatter", "=", "_createdesc", "(", "lambda", "x", ":", "'%s:%s[%d..%d]'", "%", "(", "'push'", "if", "x", "[", "action_subtype", "]", "==", "'NXAST_STACK_PUSH'", "else", "'pop'", ",", "x", "[", "'field'", "]", ",", "x", "[", "'offset'", "]", ",", "(", "x", "[", "'offset'", "]", "+", "x", "[", "'n_bits'", "]", "-", "1", ")", ")", ")", ")", "namespace", "[", "'nx_action_stack'", "]", "=", "nx_action_stack", "'''\n /* Action structure for NXAST_NOTE.\n *\n * This action has no effect. It is variable length. The switch does not\n * attempt to interpret the user-defined 'note' data in any way. A controller\n * can use this action to attach arbitrary metadata to a flow.\n *\n * This action might go away in the future.\n */\n '''", "nx_action_note", "=", "nstruct", "(", "(", "varchr", ",", "'note'", ")", ",", "name", "=", "'nx_action_note'", ",", "base", "=", "nx_action", ",", "classifyby", "=", "(", "NXAST_NOTE", ",", ")", ",", "criteria", "=", "lambda", "x", ":", "getattr", "(", "x", ",", "action_subtype", ")", "==", "NXAST_NOTE", ",", "init", "=", "packvalue", "(", "NXAST_NOTE", ",", "action_subtype", ")", ")", "namespace", "[", "'nx_action_note'", "]", "=", "nx_action_note", "'''\n /* Action structure for NXAST_MULTIPATH.\n *\n * This action performs the following steps in sequence:\n *\n * 1. Hashes the fields designated by 'fields', one of NX_HASH_FIELDS_*.\n * Refer to the definition of \"enum nx_mp_fields\" for details.\n *\n * The 'basis' value is used as a universal hash parameter, that is,\n * different values of 'basis' yield different hash functions. The\n * particular universal hash function used is implementation-defined.\n *\n * The hashed fields' values are drawn from the current state of the\n * flow, including all modifications that have been made by actions up to\n * this point.\n *\n * 2. Applies the multipath link choice algorithm specified by 'algorithm',\n * one of NX_MP_ALG_*. Refer to the definition of \"enum nx_mp_algorithm\"\n * for details.\n *\n * The output of the algorithm is 'link', an unsigned integer less than\n * or equal to 'max_link'.\n *\n * Some algorithms use 'arg' as an additional argument.\n *\n * 3. Stores 'link' in dst[ofs:ofs+n_bits]. The format and semantics of\n * 'dst' and 'ofs_nbits' are similar to those for the NXAST_REG_LOAD\n * action.\n *\n * The switch will reject actions that have an unknown 'fields', or an unknown\n * 'algorithm', or in which ofs+n_bits is greater than the width of 'dst', or\n * in which 'max_link' is greater than or equal to 2**n_bits, with error type\n * OFPET_BAD_ACTION, code OFPBAC_BAD_ARGUMENT.\n */\n '''", "nx_action_multipath", "=", "nstruct", "(", "#/* What fields to hash and how. */", "(", "nx_hash_fields", ",", "'fields'", ")", ",", "# /* One of NX_HASH_FIELDS_*. */", "(", "uint16", ",", "'basis'", ")", ",", "# /* Universal hash parameter. */", "(", "uint16", ",", ")", ",", "#/* Multipath link choice algorithm to apply to hash value. */", "(", "nx_mp_algorithm", ",", "'algorithm'", ")", ",", "# /* One of NX_MP_ALG_*. */", "(", "uint16", ",", "'max_link'", ")", ",", "# /* Number of output links, minus 1. */", "(", "uint32", ",", "'arg'", ")", ",", "# /* Algorithm-specific argument. */", "(", "uint16", ",", ")", ",", "# /* Where to store the result. */", "(", "uint16", ",", "'ofs_nbits'", ")", ",", "# /* (ofs << 6) | (n_bits - 1). */", "(", "nxm_header", ",", "'dst'", ")", ",", "# /* Destination. */", "name", "=", "'nx_action_multipath'", ",", "base", "=", "nx_action", ",", "classifyby", "=", "(", "NXAST_MULTIPATH", ",", ")", ",", "criteria", "=", "lambda", "x", ":", "getattr", "(", "x", ",", "action_subtype", ")", "==", "NXAST_MULTIPATH", ",", "init", "=", "packvalue", "(", "NXAST_MULTIPATH", ",", "action_subtype", ")", ",", "formatter", "=", "_createdesc", "(", "lambda", "x", ":", "'multipath(%s,%d,%s,%d,%d,%s[%d..%d])'", "%", "(", "x", "[", "'fields'", "]", ",", "x", "[", "'basis'", "]", ",", "x", "[", "'algorithm'", "]", ",", "x", "[", "'max_link'", "]", "+", "1", ",", "x", "[", "'arg'", "]", ",", "x", "[", "'dst'", "]", ",", "x", "[", "'ofs_nbits'", "]", ">>", "6", ",", "(", "x", "[", "'ofs_nbits'", "]", ">>", "6", ")", "+", "(", "x", "[", "'ofs_nbits'", "]", "&", "0x3f", ")", ")", ")", ")", "namespace", "[", "'nx_action_multipath'", "]", "=", "nx_action_multipath", "'''\n /* Action structure for NXAST_LEARN.\n *\n * This action adds or modifies a flow in an OpenFlow table, similar to\n * OFPT_FLOW_MOD with OFPFC_MODIFY_STRICT as 'command'. The new flow has the\n * specified idle timeout, hard timeout, priority, cookie, and flags. The new\n * flow's match criteria and actions are built by applying each of the series\n * of flow_mod_spec elements included as part of the action.\n *\n * A flow_mod_spec starts with a 16-bit header. A header that is all-bits-0 is\n * a no-op used for padding the action as a whole to a multiple of 8 bytes in\n * length. Otherwise, the flow_mod_spec can be thought of as copying 'n_bits'\n * bits from a source to a destination. In this case, the header contains\n * multiple fields:\n *\n * 15 14 13 12 11 10 0\n * +------+---+------+---------------------------------+\n * | 0 |src| dst | n_bits |\n * +------+---+------+---------------------------------+\n *\n * The meaning and format of a flow_mod_spec depends on 'src' and 'dst'. The\n * following table summarizes the meaning of each possible combination.\n * Details follow the table:\n *\n * src dst meaning\n * --- --- ----------------------------------------------------------\n * 0 0 Add match criteria based on value in a field.\n * 1 0 Add match criteria based on an immediate value.\n * 0 1 Add NXAST_REG_LOAD action to copy field into a different field.\n * 1 1 Add NXAST_REG_LOAD action to load immediate value into a field.\n * 0 2 Add OFPAT_OUTPUT action to output to port from specified field.\n * All other combinations are undefined and not allowed.\n *\n * The flow_mod_spec header is followed by a source specification and a\n * destination specification. The format and meaning of the source\n * specification depends on 'src':\n *\n * - If 'src' is 0, the source bits are taken from a field in the flow to\n * which this action is attached. (This should be a wildcarded field. If\n * its value is fully specified then the source bits being copied have\n * constant values.)\n *\n * The source specification is an ovs_be32 'field' and an ovs_be16 'ofs'.\n * 'field' is an nxm_header with nxm_hasmask=0, and 'ofs' the starting bit\n * offset within that field. The source bits are field[ofs:ofs+n_bits-1].\n * 'field' and 'ofs' are subject to the same restrictions as the source\n * field in NXAST_REG_MOVE.\n *\n * - If 'src' is 1, the source bits are a constant value. The source\n * specification is (n_bits+15)/16*2 bytes long. Taking those bytes as a\n * number in network order, the source bits are the 'n_bits'\n * least-significant bits. The switch will report an error if other bits\n * in the constant are nonzero.\n *\n * The flow_mod_spec destination specification, for 'dst' of 0 or 1, is an\n * ovs_be32 'field' and an ovs_be16 'ofs'. 'field' is an nxm_header with\n * nxm_hasmask=0 and 'ofs' is a starting bit offset within that field. The\n * meaning of the flow_mod_spec depends on 'dst':\n *\n * - If 'dst' is 0, the flow_mod_spec specifies match criteria for the new\n * flow. The new flow matches only if bits field[ofs:ofs+n_bits-1] in a\n * packet equal the source bits. 'field' may be any nxm_header with\n * nxm_hasmask=0 that is allowed in NXT_FLOW_MOD.\n *\n * Order is significant. Earlier flow_mod_specs must satisfy any\n * prerequisites for matching fields specified later, by copying constant\n * values into prerequisite fields.\n *\n * The switch will reject flow_mod_specs that do not satisfy NXM masking\n * restrictions.\n *\n * - If 'dst' is 1, the flow_mod_spec specifies an NXAST_REG_LOAD action for\n * the new flow. The new flow copies the source bits into\n * field[ofs:ofs+n_bits-1]. Actions are executed in the same order as the\n * flow_mod_specs.\n *\n * A single NXAST_REG_LOAD action writes no more than 64 bits, so n_bits\n * greater than 64 yields multiple NXAST_REG_LOAD actions.\n *\n * The flow_mod_spec destination spec for 'dst' of 2 (when 'src' is 0) is\n * empty. It has the following meaning:\n *\n * - The flow_mod_spec specifies an OFPAT_OUTPUT action for the new flow.\n * The new flow outputs to the OpenFlow port specified by the source field.\n * Of the special output ports with value OFPP_MAX or larger, OFPP_IN_PORT,\n * OFPP_FLOOD, OFPP_LOCAL, and OFPP_ALL are supported. Other special ports\n * may not be used.\n *\n * Resource Management\n * -------------------\n *\n * A switch has a finite amount of flow table space available for learning.\n * When this space is exhausted, no new learning table entries will be learned\n * until some existing flow table entries expire. The controller should be\n * prepared to handle this by flooding (which can be implemented as a\n * low-priority flow).\n *\n * If a learned flow matches a single TCP stream with a relatively long\n * timeout, one may make the best of resource constraints by setting\n * 'fin_idle_timeout' or 'fin_hard_timeout' (both measured in seconds), or\n * both, to shorter timeouts. When either of these is specified as a nonzero\n * value, OVS adds a NXAST_FIN_TIMEOUT action, with the specified timeouts, to\n * the learned flow.\n *\n * Examples\n * --------\n *\n * The following examples give a prose description of the flow_mod_specs along\n * with informal notation for how those would be represented and a hex dump of\n * the bytes that would be required.\n *\n * These examples could work with various nx_action_learn parameters. Typical\n * values would be idle_timeout=OFP_FLOW_PERMANENT, hard_timeout=60,\n * priority=OFP_DEFAULT_PRIORITY, flags=0, table_id=10.\n *\n * 1. Learn input port based on the source MAC, with lookup into\n * NXM_NX_REG1[16:31] by resubmit to in_port=99:\n *\n * Match on in_port=99:\n * ovs_be16(src=1, dst=0, n_bits=16), 20 10\n * ovs_be16(99), 00 63\n * ovs_be32(NXM_OF_IN_PORT), ovs_be16(0) 00 00 00 02 00 00\n *\n * Match Ethernet destination on Ethernet source from packet:\n * ovs_be16(src=0, dst=0, n_bits=48), 00 30\n * ovs_be32(NXM_OF_ETH_SRC), ovs_be16(0) 00 00 04 06 00 00\n * ovs_be32(NXM_OF_ETH_DST), ovs_be16(0) 00 00 02 06 00 00\n *\n * Set NXM_NX_REG1[16:31] to the packet's input port:\n * ovs_be16(src=0, dst=1, n_bits=16), 08 10\n * ovs_be32(NXM_OF_IN_PORT), ovs_be16(0) 00 00 00 02 00 00\n * ovs_be32(NXM_NX_REG1), ovs_be16(16) 00 01 02 04 00 10\n *\n * Given a packet that arrived on port A with Ethernet source address B,\n * this would set up the flow \"in_port=99, dl_dst=B,\n * actions=load:A->NXM_NX_REG1[16..31]\".\n *\n * In syntax accepted by ovs-ofctl, this action is: learn(in_port=99,\n * NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[],\n * load:NXM_OF_IN_PORT[]->NXM_NX_REG1[16..31])\n *\n * 2. Output to input port based on the source MAC and VLAN VID, with lookup\n * into NXM_NX_REG1[16:31]:\n *\n * Match on same VLAN ID as packet:\n * ovs_be16(src=0, dst=0, n_bits=12), 00 0c\n * ovs_be32(NXM_OF_VLAN_TCI), ovs_be16(0) 00 00 08 02 00 00\n * ovs_be32(NXM_OF_VLAN_TCI), ovs_be16(0) 00 00 08 02 00 00\n *\n * Match Ethernet destination on Ethernet source from packet:\n * ovs_be16(src=0, dst=0, n_bits=48), 00 30\n * ovs_be32(NXM_OF_ETH_SRC), ovs_be16(0) 00 00 04 06 00 00\n * ovs_be32(NXM_OF_ETH_DST), ovs_be16(0) 00 00 02 06 00 00\n *\n * Output to the packet's input port:\n * ovs_be16(src=0, dst=2, n_bits=16), 10 10\n * ovs_be32(NXM_OF_IN_PORT), ovs_be16(0) 00 00 00 02 00 00\n *\n * Given a packet that arrived on port A with Ethernet source address B in\n * VLAN C, this would set up the flow \"dl_dst=B, vlan_vid=C,\n * actions=output:A\".\n *\n * In syntax accepted by ovs-ofctl, this action is:\n * learn(NXM_OF_VLAN_TCI[0..11], NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[],\n * output:NXM_OF_IN_PORT[])\n *\n * 3. Here's a recipe for a very simple-minded MAC learning switch. It uses a\n * 10-second MAC expiration time to make it easier to see what's going on\n *\n * ovs-vsctl del-controller br0\n * ovs-ofctl del-flows br0\n * ovs-ofctl add-flow br0 \"table=0 actions=learn(table=1, \\\n hard_timeout=10, NXM_OF_VLAN_TCI[0..11], \\\n NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[], \\\n output:NXM_OF_IN_PORT[]), resubmit(,1)\"\n * ovs-ofctl add-flow br0 \"table=1 priority=0 actions=flood\"\n *\n * You can then dump the MAC learning table with:\n *\n * ovs-ofctl dump-flows br0 table=1\n *\n * Usage Advice\n * ------------\n *\n * For best performance, segregate learned flows into a table that is not used\n * for any other flows except possibly for a lowest-priority \"catch-all\" flow\n * (a flow with no match criteria). If different learning actions specify\n * different match criteria, use different tables for the learned flows.\n *\n * The meaning of 'hard_timeout' and 'idle_timeout' can be counterintuitive.\n * These timeouts apply to the flow that is added, which means that a flow with\n * an idle timeout will expire when no traffic has been sent *to* the learned\n * address. This is not usually the intent in MAC learning; instead, we want\n * the MAC learn entry to expire when no traffic has been sent *from* the\n * learned address. Use a hard timeout for that.\n */\n '''", "def", "_nx_flow_mod_spec_formatter", "(", "x", ")", ":", "if", "NX_FLOWMODSPEC_SRC", "(", "x", "[", "'header'", "]", ")", ":", "srcdesc", "=", "'0x'", "+", "''", ".", "join", "(", "'%02x'", "%", "(", "c", ",", ")", "for", "c", "in", "bytearray", "(", "x", "[", "'value'", "]", ")", ")", "else", ":", "srcdesc", "=", "'%s[%d..%d]'", "%", "(", "x", "[", "'src'", "]", ",", "x", "[", "'src_ofs'", "]", ",", "x", "[", "'src_ofs'", "]", "+", "NX_FLOWMODSPEC_NBITS", "(", "x", "[", "'header'", "]", ")", "-", "1", ")", "dstv", "=", "NX_FLOWMODSPEC_DST", "(", "x", "[", "'header'", "]", ")", "if", "dstv", "!=", "NX_LEARN_DST_OUTPUT", ":", "dstdesc", "=", "'%s[%d..%d]'", "%", "(", "x", "[", "'dst'", "]", ",", "x", "[", "'dst_ofs'", "]", ",", "x", "[", "'dst_ofs'", "]", "+", "NX_FLOWMODSPEC_NBITS", "(", "x", "[", "'header'", "]", ")", "-", "1", ")", "if", "dstv", "==", "NX_LEARN_DST_MATCH", ":", "x", "[", "'_desc'", "]", "=", "'%s=%s'", "%", "(", "dstdesc", ",", "srcdesc", ")", "elif", "dstv", "==", "NX_LEARN_DST_LOAD", ":", "x", "[", "'_desc'", "]", "=", "'load:%s->%s'", "%", "(", "srcdesc", ",", "dstdesc", ")", "elif", "NX_FLOWMODSPEC_SRC", "(", "x", "[", "'header'", "]", ")", ":", "x", "[", "'_desc'", "]", "=", "'output:%s'", "%", "nxm_port_no_raw", ".", "formatter", "(", "common", ".", "create_binary", "(", "x", "[", "'value'", "]", ",", "2", ")", ")", "else", ":", "x", "[", "'_desc'", "]", "=", "'output:%s'", "%", "(", "srcdesc", ",", ")", "x", "[", "'header'", "]", "=", "nx_flow_mod_spec_header", ".", "formatter", "(", "x", "[", "'header'", "]", ")", "return", "x", "nx_flow_mod_spec", "=", "nstruct", "(", "(", "uint16", ",", "'header'", ")", ",", "(", "_nx_flow_mod_spec_src", ",", ")", ",", "(", "_nx_flow_mod_spec_dst", ",", ")", ",", "name", "=", "'nx_flow_mod_spec'", ",", "padding", "=", "1", ",", "formatter", "=", "_nx_flow_mod_spec_formatter", ",", "lastextra", "=", "False", "# if x.header == 0, size is 14, the padding should not be so large so it will not be successfully parsed", ")", "namespace", "[", "'nx_flow_mod_spec'", "]", "=", "nx_flow_mod_spec", "def", "create_nxfms_matchfield", "(", "src", ",", "dst", ",", "src_ofs", "=", "0", ",", "dst_ofs", "=", "0", ",", "n_bits", "=", "None", ")", ":", "if", "n_bits", "is", "None", ":", "n_bits", "=", "min", "(", "NXM_LENGTH", "(", "dst", ")", "*", "8", "-", "dst_ofs", ",", "NXM_LENGTH", "(", "src", ")", "*", "8", "-", "src_ofs", ")", "if", "n_bits", "<=", "0", ":", "raise", "ValueError", "(", "'Cannot create flow mod spec with 0 bits'", ")", "return", "nx_flow_mod_spec", ".", "parse", "(", "_create_header", "(", "NX_LEARN_SRC_FIELD", ",", "NX_LEARN_DST_MATCH", ",", "n_bits", ")", "+", "_create_field", "(", "src", ",", "src_ofs", ")", "+", "_create_field", "(", "dst", ",", "dst_ofs", ")", ")", "[", "0", "]", "namespace", "[", "'create_nxfms_matchfield'", "]", "=", "create_nxfms_matchfield", "def", "create_nxfms_matchvalue", "(", "dst", ",", "value", ",", "dst_ofs", ",", "n_bits", "=", "None", ")", ":", "if", "n_bits", "is", "None", ":", "n_bits", "=", "NXM_LENGTH", "(", "dst", ")", "*", "8", "-", "dst_ofs", "if", "n_bits", "<=", "0", ":", "raise", "ValueError", "(", "'Cannot create flow mod spec with 0 bits'", ")", "return", "nx_flow_mod_spec", ".", "parse", "(", "_create_header", "(", "NX_LEARN_SRC_IMMEDIATE", ",", "NX_LEARN_DST_MATCH", ",", "n_bits", ")", "+", "common", ".", "create_binary", "(", "value", ",", "(", "n_bits", "+", "15", ")", "//", "16", "*", "2", ")", "+", "_create_field", "(", "dst", ",", "dst_ofs", ")", ")", "[", "0", "]", "namespace", "[", "'create_nxfms_matchvalue'", "]", "=", "create_nxfms_matchvalue", "def", "create_nxfms_loadfield", "(", "src", ",", "dst", ",", "src_ofs", "=", "0", ",", "dst_ofs", "=", "0", ",", "n_bits", "=", "None", ")", ":", "if", "n_bits", "is", "None", ":", "n_bits", "=", "min", "(", "NXM_LENGTH", "(", "dst", ")", "*", "8", "-", "dst_ofs", ",", "NXM_LENGTH", "(", "src", ")", "*", "8", "-", "src_ofs", ")", "if", "n_bits", "<=", "0", ":", "raise", "ValueError", "(", "'Cannot create flow mod spec with 0 bits'", ")", "return", "nx_flow_mod_spec", ".", "parse", "(", "_create_header", "(", "NX_LEARN_SRC_FIELD", ",", "NX_LEARN_DST_LOAD", ",", "n_bits", ")", "+", "_create_field", "(", "src", ",", "src_ofs", ")", "+", "_create_field", "(", "dst", ",", "dst_ofs", ")", ")", "[", "0", "]", "namespace", "[", "'create_nxfms_loadfield'", "]", "=", "create_nxfms_loadfield", "def", "create_nxfms_loadvalue", "(", "dst", ",", "value", ",", "dst_ofs", ",", "n_bits", "=", "None", ")", ":", "if", "n_bits", "is", "None", ":", "n_bits", "=", "NXM_LENGTH", "(", "dst", ")", "*", "8", "-", "dst_ofs", "if", "n_bits", "<=", "0", ":", "raise", "ValueError", "(", "'Cannot create flow mod spec with 0 bits'", ")", "return", "nx_flow_mod_spec", ".", "parse", "(", "_create_header", "(", "NX_LEARN_SRC_IMMEDIATE", ",", "NX_LEARN_DST_LOAD", ",", "n_bits", ")", "+", "common", ".", "create_binary", "(", "value", ",", "(", "n_bits", "+", "15", ")", "//", "16", "*", "2", ")", "+", "_create_field", "(", "dst", ",", "dst_ofs", ")", ")", "[", "0", "]", "namespace", "[", "'create_nxfms_loadvalue'", "]", "=", "create_nxfms_loadvalue", "def", "create_nxfms_outputfield", "(", "src", ",", "src_ofs", "=", "0", ",", "n_bits", "=", "None", ")", ":", "if", "n_bits", "is", "None", ":", "n_bits", "=", "NXM_LENGTH", "(", "src", ")", "*", "8", "-", "src_ofs", "if", "n_bits", "<=", "0", ":", "raise", "ValueError", "(", "'Cannot create flow mod spec with 0 bits'", ")", "return", "nx_flow_mod_spec", ".", "parse", "(", "_create_header", "(", "NX_LEARN_SRC_FIELD", ",", "NX_LEARN_DST_OUTPUT", ",", "n_bits", ")", "+", "_create_field", "(", "src", ",", "src_ofs", ")", ")", "[", "0", "]", "namespace", "[", "'create_nxfms_outputfield'", "]", "=", "create_nxfms_outputfield", "def", "create_nxfms_outputvalue", "(", "dst", ",", "value", ")", ":", "return", "nx_flow_mod_spec", ".", "parse", "(", "_create_header", "(", "NX_LEARN_SRC_IMMEDIATE", ",", "NX_LEARN_DST_OUTPUT", ",", "16", ")", "+", "common", ".", "create_binary", "(", "value", ",", "2", ")", ")", "[", "0", "]", "namespace", "[", "'create_nxfms_outputvalue'", "]", "=", "create_nxfms_outputvalue", "ofp_flow_mod_flags", "=", "namespace", "[", "'ofp_flow_mod_flags'", "]", "nx_action_learn", "=", "nstruct", "(", "(", "uint16", ",", "'idle_timeout'", ")", ",", "# /* Idle time before discarding (seconds). */", "(", "uint16", ",", "'hard_timeout'", ")", ",", "# /* Max time before discarding (seconds). */", "(", "uint16", ",", "'priority'", ")", ",", "# /* Priority level of flow entry. */", "(", "uint64", ",", "'cookie'", ")", ",", "# /* Cookie for new flow. */", "(", "ofp_flow_mod_flags", ",", "'flags'", ")", ",", "# /* Either 0 or OFPFF_SEND_FLOW_REM. */", "(", "uint8", ",", "'table_id'", ")", ",", "# /* Table to insert flow entry. */", "(", "uint8", ",", ")", ",", "# /* Must be zero. */", "(", "uint16", ",", "'fin_idle_timeout'", ")", ",", "# /* Idle timeout after FIN, if nonzero. */", "(", "uint16", ",", "'fin_hard_timeout'", ")", ",", "# /* Hard timeout after FIN, if nonzero. */", "(", "nx_flow_mod_spec", "[", "0", "]", ",", "'specs'", ")", ",", "base", "=", "nx_action", ",", "name", "=", "'nx_action_learn'", ",", "classifyby", "=", "(", "NXAST_LEARN", ",", ")", ",", "criteria", "=", "lambda", "x", ":", "getattr", "(", "x", ",", "action_subtype", ")", "==", "NXAST_LEARN", ",", "init", "=", "packvalue", "(", "NXAST_LEARN", ",", "action_subtype", ")", ",", ")", "namespace", "[", "'nx_action_learn'", "]", "=", "nx_action_learn", "'''\n /* Action structure for NXAST_FIN_TIMEOUT.\n *\n * This action changes the idle timeout or hard timeout, or both, of this\n * OpenFlow rule when the rule matches a TCP packet with the FIN or RST flag.\n * When such a packet is observed, the action reduces the rule's idle timeout\n * to 'fin_idle_timeout' and its hard timeout to 'fin_hard_timeout'. This\n * action has no effect on an existing timeout that is already shorter than the\n * one that the action specifies. A 'fin_idle_timeout' or 'fin_hard_timeout'\n * of zero has no effect on the respective timeout.\n *\n * 'fin_idle_timeout' and 'fin_hard_timeout' are measured in seconds.\n * 'fin_hard_timeout' specifies time since the flow's creation, not since the\n * receipt of the FIN or RST.\n *\n * This is useful for quickly discarding learned TCP flows that otherwise will\n * take a long time to expire.\n *\n * This action is intended for use with an OpenFlow rule that matches only a\n * single TCP flow. If the rule matches multiple TCP flows (e.g. it wildcards\n * all TCP traffic, or all TCP traffic to a particular port), then any FIN or\n * RST in any of those flows will cause the entire OpenFlow rule to expire\n * early, which is not normally desirable.\n */\n '''", "nx_action_fin_timeout", "=", "nstruct", "(", "(", "uint16", ",", "'fin_idle_timeout'", ")", ",", "# /* New idle timeout, if nonzero. */", "(", "uint16", ",", "'fin_hard_timeout'", ")", ",", "# /* New hard timeout, if nonzero. */", "(", "uint16", ",", ")", ",", "base", "=", "nx_action", ",", "name", "=", "'nx_action_fin_timeout'", ",", "criteria", "=", "lambda", "x", ":", "getattr", "(", "x", ",", "action_subtype", ")", "==", "NXAST_FIN_TIMEOUT", ",", "classifyby", "=", "(", "NXAST_FIN_TIMEOUT", ",", ")", ",", "init", "=", "packvalue", "(", "NXAST_FIN_TIMEOUT", ",", "action_subtype", ")", ")", "namespace", "[", "'nx_action_fin_timeout'", "]", "=", "nx_action_fin_timeout", "'''\n /* Action structure for NXAST_BUNDLE and NXAST_BUNDLE_LOAD.\n *\n * The bundle actions choose a slave from a supplied list of options.\n * NXAST_BUNDLE outputs to its selection. NXAST_BUNDLE_LOAD writes its\n * selection to a register.\n *\n * The list of possible slaves follows the nx_action_bundle structure. The size\n * of each slave is governed by its type as indicated by the 'slave_type'\n * parameter. The list of slaves should be padded at its end with zeros to make\n * the total length of the action a multiple of 8.\n *\n * Switches infer from the 'slave_type' parameter the size of each slave. All\n * implementations must support the NXM_OF_IN_PORT 'slave_type' which indicates\n * that the slaves are OpenFlow port numbers with NXM_LENGTH(NXM_OF_IN_PORT) ==\n * 2 byte width. Switches should reject actions which indicate unknown or\n * unsupported slave types.\n *\n * Switches use a strategy dictated by the 'algorithm' parameter to choose a\n * slave. If the switch does not support the specified 'algorithm' parameter,\n * it should reject the action.\n *\n * Several algorithms take into account liveness when selecting slaves. The\n * liveness of a slave is implementation defined (with one exception), but will\n * generally take into account things like its carrier status and the results\n * of any link monitoring protocols which happen to be running on it. In order\n * to give controllers a place-holder value, the OFPP_NONE port is always\n * considered live.\n *\n * Some slave selection strategies require the use of a hash function, in which\n * case the 'fields' and 'basis' parameters should be populated. The 'fields'\n * parameter (one of NX_HASH_FIELDS_*) designates which parts of the flow to\n * hash. Refer to the definition of \"enum nx_hash_fields\" for details. The\n * 'basis' parameter is used as a universal hash parameter. Different values\n * of 'basis' yield different hash results.\n *\n * The 'zero' parameter at the end of the action structure is reserved for\n * future use. Switches are required to reject actions which have nonzero\n * bytes in the 'zero' field.\n *\n * NXAST_BUNDLE actions should have 'ofs_nbits' and 'dst' zeroed. Switches\n * should reject actions which have nonzero bytes in either of these fields.\n *\n * NXAST_BUNDLE_LOAD stores the OpenFlow port number of the selected slave in\n * dst[ofs:ofs+n_bits]. The format and semantics of 'dst' and 'ofs_nbits' are\n * similar to those for the NXAST_REG_LOAD action. */\n '''", "nx_action_bundle", "=", "nstruct", "(", "# /* Slave choice algorithm to apply to hash value. */", "(", "nx_bd_algorithm", ",", "'algorithm'", ")", ",", "# /* One of NX_BD_ALG_*. */", "# /* What fields to hash and how. */", "(", "nx_hash_fields", ",", "'fields'", ")", ",", "# /* One of NX_HASH_FIELDS_*. */", "(", "uint16", ",", "'basis'", ")", ",", "# /* Universal hash parameter. */", "(", "nxm_header", ",", "'slave_type'", ")", ",", "# /* NXM_OF_IN_PORT. */", "(", "uint16", ",", "'n_slaves'", ")", ",", "# /* Number of slaves. */", "(", "uint16", ",", "'ofs_nbits'", ")", ",", "# /* (ofs << 6) | (n_bits - 1). */", "(", "nxm_header", ",", "'dst'", ")", ",", "# /* Destination. */", "(", "uint8", "[", "4", "]", ",", ")", ",", "# /* Reserved. Must be zero. */", "name", "=", "'nx_action_bundle'", ",", "base", "=", "nx_action", ",", "criteria", "=", "lambda", "x", ":", "getattr", "(", "x", ",", "action_subtype", ")", "==", "NXAST_BUNDLE", "or", "getattr", "(", "x", ",", "action_subtype", ")", "==", "NXAST_BUNDLE_LOAD", ",", "classifyby", "=", "(", "NXAST_BUNDLE", ",", "NXAST_BUNDLE_LOAD", ")", ",", "init", "=", "packvalue", "(", "NXAST_BUNDLE", ",", "action_subtype", ")", ")", "namespace", "[", "'nx_action_bundle'", "]", "=", "nx_action_bundle", "def", "_nx_slave_ports_prepack", "(", "x", ")", ":", "x", ".", "n_slaves", "=", "len", "(", "x", ".", "bundles", ")", "_nx_slave_ports", "=", "nstruct", "(", "(", "nx_port_no", "[", "0", "]", ",", "'bundles'", ")", ",", "name", "=", "'_nx_slave_ports'", ",", "size", "=", "lambda", "x", ":", "x", ".", "n_slaves", "*", "2", ",", "prepack", "=", "_nx_slave_ports_prepack", ",", "padding", "=", "1", ")", "nx_action_bundle_port", "=", "nstruct", "(", "(", "_nx_slave_ports", ",", ")", ",", "base", "=", "nx_action_bundle", ",", "name", "=", "'nx_action_bundle_port'", ",", "criteria", "=", "lambda", "x", ":", "x", ".", "slave_type", "==", "NXM_OF_IN_PORT", ",", "init", "=", "packvalue", "(", "NXM_OF_IN_PORT", ",", "'slave_type'", ")", ",", "lastextra", "=", "False", ",", "formatter", "=", "_createdesc", "(", "lambda", "x", ":", "'bundle_load(%s,%d,%s,%s,%s[%d..%d],slaves:%r)'", "%", "(", "x", "[", "'fields'", "]", ",", "x", "[", "'basis'", "]", ",", "x", "[", "'algorithm'", "]", ",", "x", "[", "'slave_type'", "]", ",", "x", "[", "'dst'", "]", ",", "x", "[", "'ofs_nbits'", "]", ">>", "6", ",", "(", "x", "[", "'ofs_nbits'", "]", ">>", "6", ")", "+", "(", "x", "[", "'ofs_nbits'", "]", "&", "0x3f", ")", ",", "x", "[", "'bundles'", "]", ")", "if", "x", "[", "action_subtype", "]", "==", "'NXAST_BUNDLE_LOAD'", "else", "'bundle(%s,%d,%s,%s,slaves:%r)'", "%", "(", "x", "[", "'fields'", "]", ",", "x", "[", "'basis'", "]", ",", "x", "[", "'algorithm'", "]", ",", "x", "[", "'slave_type'", "]", ",", "x", "[", "'bundles'", "]", ")", ")", ")", "namespace", "[", "'nx_action_bundle_port'", "]", "=", "nx_action_bundle_port", "def", "_nx_slave_others_prepack", "(", "x", ")", ":", "x", ".", "n_slaves", "=", "len", "(", "x", ".", "bundlesraw", ")", "//", "NXM_LENGTH", "(", "x", ".", "slave_type", ")", "_nx_slave_others", "=", "nstruct", "(", "(", "raw", ",", "'bundlesraw'", ")", ",", "name", "=", "'_nx_slave_others'", ",", "size", "=", "lambda", "x", ":", "x", ".", "n_slaves", "*", "NXM_LENGTH", "(", "x", ".", "slave_type", ")", ",", "prepack", "=", "_nx_slave_others_prepack", ",", "padding", "=", "1", ")", "nx_action_bundle_others", "=", "nstruct", "(", "(", "_nx_slave_others", ",", ")", ",", "base", "=", "nx_action_bundle", ",", "name", "=", "'nx_action_bundle_others'", ",", "criteria", "=", "lambda", "x", ":", "x", ".", "slave_type", "!=", "NXM_OF_IN_PORT", ",", "lastextra", "=", "False", ",", "init", "=", "packvalue", "(", "NXM_OF_ETH_DST", ",", "'slave_type'", ")", ",", "formatter", "=", "_createdesc", "(", "lambda", "x", ":", "'bundle_load(%s,%d,%s,%s,%s[%d..%d],slaves:%r)'", "%", "(", "x", "[", "'fields'", "]", ",", "x", "[", "'basis'", "]", ",", "x", "[", "'algorithm'", "]", ",", "x", "[", "'slave_type'", "]", ",", "x", "[", "'dst'", "]", ",", "x", "[", "'ofs_nbits'", "]", ">>", "6", ",", "(", "x", "[", "'ofs_nbits'", "]", ">>", "6", ")", "+", "(", "x", "[", "'ofs_nbits'", "]", "&", "0x3f", ")", ",", "x", "[", "'bundleraw'", "]", ")", "if", "x", "[", "action_subtype", "]", "==", "'NXAST_BUNDLE_LOAD'", "else", "'bundle(%s,%d,%s,%s,slaves:%r)'", "%", "(", "x", "[", "'fields'", "]", ",", "x", "[", "'basis'", "]", ",", "x", "[", "'algorithm'", "]", ",", "x", "[", "'slave_type'", "]", ",", "x", "[", "'bundleraw'", "]", ")", ")", ")", "namespace", "[", "'nx_action_bundle_others'", "]", "=", "nx_action_bundle_others", "'''\n /* Action structure for NXAST_DEC_TTL_CNT_IDS.\n *\n * If the packet is not IPv4 or IPv6, does nothing. For IPv4 or IPv6, if the\n * TTL or hop limit is at least 2, decrements it by 1. Otherwise, if TTL or\n * hop limit is 0 or 1, sends a packet-in to the controllers with each of the\n * 'n_controllers' controller IDs specified in 'cnt_ids'.\n *\n * (This differs from NXAST_DEC_TTL in that for NXAST_DEC_TTL the packet-in is\n * sent only to controllers with id 0.)\n */\n '''", "def", "_nx_action_cnt_ids_ids_prepack", "(", "x", ")", ":", "x", ".", "n_controllers", "=", "len", "(", "x", ".", "cnt_ids", ")", "_nx_action_cnt_ids_ids", "=", "nstruct", "(", "(", "uint16", "[", "0", "]", ",", "'cnt_ids'", ")", ",", "name", "=", "'_nx_action_cnt_ids_ids'", ",", "size", "=", "lambda", "x", ":", "2", "*", "x", ".", "n_controllers", ",", "prepack", "=", "_nx_action_cnt_ids_ids_prepack", ")", "nx_action_cnt_ids", "=", "nstruct", "(", "(", "uint16", ",", "'n_controllers'", ")", ",", "# /* Number of controllers. */", "(", "uint8", "[", "4", "]", ",", ")", ",", "# /* Must be zero. */", "(", "_nx_action_cnt_ids_ids", ",", ")", ",", "base", "=", "nx_action", ",", "classifyby", "=", "(", "NXAST_DEC_TTL_CNT_IDS", ",", ")", ",", "criteria", "=", "lambda", "x", ":", "getattr", "(", "x", ",", "action_subtype", ")", "==", "NXAST_DEC_TTL_CNT_IDS", ",", "init", "=", "packvalue", "(", "NXAST_DEC_TTL_CNT_IDS", ",", "action_subtype", ")", ",", "lastextra", "=", "False", ",", "name", "=", "'nx_action_cnt_ids'", ")", "namespace", "[", "'nx_action_cnt_ids'", "]", "=", "nx_action_cnt_ids", "'''\n /* Action structure for NXAST_OUTPUT_REG.\n *\n * Outputs to the OpenFlow port number written to src[ofs:ofs+nbits].\n *\n * The format and semantics of 'src' and 'ofs_nbits' are similar to those for\n * the NXAST_REG_LOAD action.\n *\n * The acceptable nxm_header values for 'src' are the same as the acceptable\n * nxm_header values for the 'src' field of NXAST_REG_MOVE.\n *\n * The 'max_len' field indicates the number of bytes to send when the chosen\n * port is OFPP_CONTROLLER. Its semantics are equivalent to the 'max_len'\n * field of OFPAT_OUTPUT.\n *\n * The 'zero' field is required to be zeroed for forward compatibility. */\n '''", "nx_action_output_reg", "=", "nstruct", "(", "(", "uint16", ",", "'ofs_nbits'", ")", ",", "# /* (ofs << 6) | (n_bits - 1). */", "(", "nxm_header", ",", "'src'", ")", ",", "# /* Source. */", "(", "uint16", ",", "'max_len'", ")", ",", "# /* Max length to send to controller. */", "(", "uint8", "[", "6", "]", ",", ")", ",", "# /* Reserved, must be zero. */", "base", "=", "nx_action", ",", "classifyby", "=", "(", "NXAST_OUTPUT_REG", ",", ")", ",", "criteria", "=", "lambda", "x", ":", "getattr", "(", "x", ",", "action_subtype", ")", "==", "NXAST_OUTPUT_REG", ",", "init", "=", "packvalue", "(", "NXAST_OUTPUT_REG", ",", "action_subtype", ")", ",", "name", "=", "'nx_action_output_reg'", ",", "formatter", "=", "_createdesc", "(", "lambda", "x", ":", "'output:%s[%d..%d]'", "%", "(", "x", "[", "'src'", "]", ",", "x", "[", "'ofs_nbits'", "]", ">>", "6", ",", "(", "x", "[", "'ofs_nbits'", "]", ">>", "6", ")", "+", "(", "x", "[", "'ofs_nbits'", "]", "&", "0x3f", ")", ")", ")", ")", "namespace", "[", "'nx_action_output_reg'", "]", "=", "nx_action_output_reg", "'''\n /* NXAST_EXIT\n *\n * Discontinues action processing.\n *\n * The NXAST_EXIT action causes the switch to immediately halt processing\n * actions for the flow. Any actions which have already been processed are\n * executed by the switch. However, any further actions, including those which\n * may be in different tables, or different levels of the NXAST_RESUBMIT\n * hierarchy, will be ignored.\n *\n * Uses the nx_action_header structure. */\n \n /* ## --------------------- ## */\n /* ## Requests and replies. ## */\n /* ## --------------------- ## */\n '''", "'''\n /* NXT_SET_FLOW_FORMAT request. */\n '''", "nx_set_flow_format", "=", "nstruct", "(", "(", "nx_flow_format", ",", "'format'", ")", ",", "# /* One of NXFF_*. */", "name", "=", "'nx_set_flow_format'", ",", "base", "=", "nicira_header", ",", "criteria", "=", "lambda", "x", ":", "getattr", "(", "x", ",", "msg_subtype", ")", "==", "NXT_SET_FLOW_FORMAT", ",", "classifyby", "=", "(", "NXT_SET_FLOW_FORMAT", ",", ")", ",", "init", "=", "packvalue", "(", "NXT_SET_FLOW_FORMAT", ",", "msg_subtype", ")", ")", "namespace", "[", "'nx_set_flow_format'", "]", "=", "nx_set_flow_format", "'''\n /* NXT_FLOW_MOD (analogous to OFPT_FLOW_MOD).\n *\n * It is possible to limit flow deletions and modifications to certain\n * cookies by using the NXM_NX_COOKIE(_W) matches. The \"cookie\" field\n * is used only to add or modify flow cookies.\n */\n '''", "ofp_flow_mod_command", "=", "namespace", "[", "'ofp_flow_mod_command'", "]", "nx_flow_mod", "=", "nstruct", "(", "(", "uint64", ",", "'cookie'", ")", ",", "# /* Opaque controller-issued identifier. */", "(", "ofp_flow_mod_command", ",", "'command'", ")", ",", "# /* OFPFC_* + possibly a table ID (see comment", "# * on struct nx_flow_mod_table_id). */", "(", "uint16", ",", "'idle_timeout'", ")", ",", "# /* Idle time before discarding (seconds). */", "(", "uint16", ",", "'hard_timeout'", ")", ",", "# /* Max time before discarding (seconds). */", "(", "uint16", ",", "'priority'", ")", ",", "# /* Priority level of flow entry. */", "(", "uint32", ",", "'buffer_id'", ")", ",", "# /* Buffered packet to apply to (or -1).", "# Not meaningful for OFPFC_DELETE*. */", "(", "nx_port_no", ",", "'out_port'", ")", ",", "# /* For OFPFC_DELETE* commands, require", "# matching entries to include this as an", "# output port. A value of OFPP_NONE", "# indicates no restriction. */", "(", "ofp_flow_mod_flags", ",", "'flags'", ")", ",", "# /* One of OFPFF_*. */", "(", "uint16", ",", "'match_len'", ")", ",", "# /* Size of nx_match. */", "(", "uint8", "[", "6", "]", ",", ")", ",", "# /* Align to 64-bits. */", "(", "nx_matches", ",", ")", ",", "base", "=", "nicira_header", ",", "criteria", "=", "lambda", "x", ":", "getattr", "(", "x", ",", "msg_subtype", ")", "==", "NXT_FLOW_MOD", ",", "classifyby", "=", "(", "NXT_FLOW_MOD", ",", ")", ",", "init", "=", "packvalue", "(", "NXT_FLOW_MOD", ",", "msg_subtype", ")", ",", "name", "=", "'nx_flow_mod'", ")", "namespace", "[", "'nx_flow_mod'", "]", "=", "nx_flow_mod", "'''\n /* NXT_FLOW_REMOVED (analogous to OFPT_FLOW_REMOVED).\n *\n * 'table_id' is present only in Open vSwitch 1.11 and later. In earlier\n * versions of Open vSwitch, this is a padding byte that is always zeroed.\n * Therefore, a 'table_id' value of 0 indicates that the table ID is not known,\n * and other values may be interpreted as one more than the flow's former table\n * ID. */\n '''", "nx_flow_removed", "=", "nstruct", "(", "(", "uint64", ",", "'cookie'", ")", ",", "# /* Opaque controller-issued identifier. */", "(", "uint16", ",", "'priority'", ")", ",", "# /* Priority level of flow entry. */", "(", "ofp_flow_removed_reason", ",", "'reason'", ")", ",", "# /* One of OFPRR_*. */", "(", "uint8", ",", "'table_id'", ")", ",", "# /* Flow's former table ID, plus one. */", "(", "uint32", ",", "'duration_sec'", ")", ",", "# /* Time flow was alive in seconds. */", "(", "uint32", ",", "'duration_nsec'", ")", ",", "# /* Time flow was alive in nanoseconds beyond", "# duration_sec. */", "(", "uint16", ",", "'idle_timeout'", ")", ",", "# /* Idle timeout from original flow mod. */", "(", "uint16", ",", "'match_len'", ")", ",", "# /* Size of nx_match. */", "(", "uint64", ",", "'packet_count'", ")", ",", "(", "uint64", ",", "'byte_count'", ")", ",", "(", "nx_matches", ",", ")", ",", "base", "=", "nicira_header", ",", "criteria", "=", "lambda", "x", ":", "getattr", "(", "x", ",", "msg_subtype", ")", "==", "NXT_FLOW_REMOVED", ",", "classifyby", "=", "(", "NXT_FLOW_REMOVED", ",", ")", ",", "init", "=", "packvalue", "(", "NXT_FLOW_REMOVED", ",", "msg_subtype", ")", ",", "name", "=", "'nx_flow_removed'", ")", "namespace", "[", "'nx_flow_removed'", "]", "=", "nx_flow_removed", "'''\n /* Nicira vendor stats request of type NXST_FLOW (analogous to OFPST_FLOW\n * request).\n *\n * It is possible to limit matches to certain cookies by using the\n * NXM_NX_COOKIE and NXM_NX_COOKIE_W matches.\n */\n '''", "nx_flow_stats_request", "=", "nstruct", "(", "(", "nx_port_no", ",", "'out_port'", ")", ",", "#/* Require matching entries to include this", "# as an output port. A value of OFPP_NONE", "# indicates no restriction. */", "(", "uint16", ",", "'match_len'", ")", ",", "# /* Length of nx_match. */", "(", "uint8", ",", "'table_id'", ")", ",", "# /* ID of table to read (from ofp_table_stats)", "# or 0xff for all tables. */", "(", "uint8", "[", "3", "]", ",", ")", ",", "# /* Align to 64 bits. */", "(", "nx_matches", ",", ")", ",", "base", "=", "nx_stats_request", ",", "criteria", "=", "lambda", "x", ":", "getattr", "(", "x", ",", "stats_subtype", ")", "==", "NXST_FLOW", ",", "classifyby", "=", "(", "NXST_FLOW", ",", ")", ",", "init", "=", "packvalue", "(", "NXST_FLOW", ",", "stats_subtype", ")", ",", "name", "=", "'nx_flow_stats_request'", ")", "namespace", "[", "'nx_flow_stats_request'", "]", "=", "nx_flow_stats_request", "'''\n /* Body for Nicira vendor stats reply of type NXST_FLOW (analogous to\n * OFPST_FLOW reply).\n *\n * The values of 'idle_age' and 'hard_age' are only meaningful when talking to\n * a switch that implements the NXT_FLOW_AGE extension. Zero means that the\n * true value is unknown, perhaps because hardware does not track the value.\n * (Zero is also the value that one should ordinarily expect to see talking to\n * a switch that does not implement NXT_FLOW_AGE, since those switches zero the\n * padding bytes that these fields replaced.) A nonzero value X represents X-1\n * seconds. A value of 65535 represents 65534 or more seconds.\n *\n * 'idle_age' is the number of seconds that the flow has been idle, that is,\n * the number of seconds since a packet passed through the flow. 'hard_age' is\n * the number of seconds since the flow was last modified (e.g. OFPFC_MODIFY or\n * OFPFC_MODIFY_STRICT). (The 'duration_*' fields are the elapsed time since\n * the flow was added, regardless of subsequent modifications.)\n *\n * For a flow with an idle or hard timeout, 'idle_age' or 'hard_age',\n * respectively, will ordinarily be smaller than the timeout, but flow\n * expiration times are only approximate and so one must be prepared to\n * tolerate expirations that occur somewhat early or late.\n */\n '''", "ofp_action", "=", "namespace", "[", "'ofp_action'", "]", "nx_flow_stats", "=", "nstruct", "(", "(", "uint16", ",", "'length'", ")", ",", "# /* Length of this entry. */", "(", "uint8", ",", "'table_id'", ")", ",", "# /* ID of table flow came from. */", "(", "uint8", ",", ")", ",", "(", "uint32", ",", "'duration_sec'", ")", ",", "# /* Time flow has been alive in seconds. */", "(", "uint32", ",", "'duration_nsec'", ")", ",", "# /* Time flow has been alive in nanoseconds", "# beyond duration_sec. */", "(", "uint16", ",", "'priority'", ")", ",", "# /* Priority of the entry. */", "(", "uint16", ",", "'idle_timeout'", ")", ",", "# /* Number of seconds idle before expiration. */", "(", "uint16", ",", "'hard_timeout'", ")", ",", "# /* Number of seconds before expiration. */", "(", "uint16", ",", "'match_len'", ")", ",", "# /* Length of nx_match. */", "(", "uint16", ",", "'idle_age'", ")", ",", "# /* Seconds since last packet, plus one. */", "(", "uint16", ",", "'hard_age'", ")", ",", "# /* Seconds since last modification, plus one. */", "(", "uint64", ",", "'cookie'", ")", ",", "# /* Opaque controller-issued identifier. */", "(", "uint64", ",", "'packet_count'", ")", ",", "# /* Number of packets, UINT64_MAX if unknown. */", "(", "uint64", ",", "'byte_count'", ")", ",", "# /* Number of bytes, UINT64_MAX if unknown. */", "#=======================================================================", "# /* Followed by:", "# * - Exactly match_len (possibly 0) bytes containing the nx_match, then", "# * - Exactly (match_len + 7)/8*8 - match_len (between 0 and 7) bytes of", "# * all-zero bytes, then", "# * - Actions to fill out the remainder 'length' bytes (always a multiple", "# * of 8).", "# */", "#=======================================================================", "(", "nx_matches", ",", ")", ",", "(", "ofp_action", "[", "0", "]", ",", "'actions'", ")", ",", "name", "=", "'nx_flow_stats'", ",", "size", "=", "sizefromlen", "(", "65536", ",", "'length'", ")", ",", "prepack", "=", "packsize", "(", "'length'", ")", ")", "namespace", "[", "'nx_flow_stats'", "]", "=", "nx_flow_stats", "nx_flow_stats_reply", "=", "nstruct", "(", "(", "nx_flow_stats", "[", "0", "]", ",", "'stats'", ")", ",", "base", "=", "nx_stats_reply", ",", "classifyby", "=", "(", "NXST_FLOW", ",", ")", ",", "criteria", "=", "lambda", "x", ":", "getattr", "(", "x", ",", "stats_subtype", ")", "==", "NXST_FLOW", ",", "init", "=", "packvalue", "(", "NXST_FLOW", ",", "stats_subtype", ")", ",", "name", "=", "'nx_flow_stats_reply'", ")", "namespace", "[", "'nx_flow_stats_reply'", "]", "=", "nx_flow_stats_reply", "'''\n /* Nicira vendor stats request of type NXST_AGGREGATE (analogous to\n * OFPST_AGGREGATE request).\n *\n * The reply format is identical to the reply format for OFPST_AGGREGATE,\n * except for the header. */\n '''", "nx_aggregate_stats_request", "=", "nstruct", "(", "(", "nx_port_no", ",", "'out_port'", ")", ",", "# /* Require matching entries to include this", "# as an output port. A value of OFPP_NONE", "# indicates no restriction. */", "(", "uint16", ",", "'match_len'", ")", ",", "# /* Length of nx_match. */", "(", "uint8", ",", "'table_id'", ")", ",", "# /* ID of table to read (from ofp_table_stats)", "# or 0xff for all tables. */", "(", "uint8", "[", "3", "]", ",", ")", ",", "# /* Align to 64 bits. */", "#=======================================================================", "# /* Followed by:", "# * - Exactly match_len (possibly 0) bytes containing the nx_match, then", "# * - Exactly (match_len + 7)/8*8 - match_len (between 0 and 7) bytes of", "# * all-zero bytes, which must also exactly fill out the length of the", "# * message.", "# */", "#=======================================================================", "(", "nx_matches", ",", ")", ",", "base", "=", "nx_stats_request", ",", "name", "=", "'nx_aggregate_stats_request'", ",", "criteria", "=", "lambda", "x", ":", "getattr", "(", "x", ",", "stats_subtype", ")", "==", "NXST_AGGREGATE", ",", "classifyby", "=", "(", "NXST_AGGREGATE", ",", ")", ",", "init", "=", "packvalue", "(", "NXST_AGGREGATE", ",", "stats_subtype", ")", ",", "lastextra", "=", "False", ")", "namespace", "[", "'nx_aggregate_stats_request'", "]", "=", "nx_aggregate_stats_request", "nx_aggregate_stats_reply", "=", "nstruct", "(", "(", "uint64", ",", "'packet_count'", ")", ",", "# /* Number of packets in flows. */", "(", "uint64", ",", "'byte_count'", ")", ",", "# /* Number of bytes in flows. */", "(", "uint32", ",", "'flow_count'", ")", ",", "# /* Number of flows. */", "(", "uint8", "[", "4", "]", ",", ")", ",", "base", "=", "nx_stats_reply", ",", "name", "=", "'nx_aggregate_stats_reply'", ",", "criteria", "=", "lambda", "x", ":", "getattr", "(", "x", ",", "stats_subtype", ")", "==", "NXST_AGGREGATE", ",", "classifyby", "=", "(", "NXST_AGGREGATE", ",", ")", ",", "init", "=", "packvalue", "(", "NXST_AGGREGATE", ",", "stats_subtype", ")", ")", "namespace", "[", "'nx_aggregate_stats_reply'", "]", "=", "nx_aggregate_stats_reply", "'''\n /* NXT_SET_CONTROLLER_ID.\n *\n * Each OpenFlow controller connection has a 16-bit identifier that is\n * initially 0. This message changes the connection's ID to 'id'.\n *\n * Controller connection IDs need not be unique.\n *\n * The NXAST_CONTROLLER action is the only current user of controller\n * connection IDs. */\n '''", "nx_controller_id", "=", "nstruct", "(", "(", "uint8", "[", "6", "]", ",", ")", ",", "# /* Must be zero. */", "(", "uint16", ",", "'controller_id'", ")", ",", "# /* New controller connection ID. */", "base", "=", "nicira_header", ",", "name", "=", "'nx_controller_id'", ",", "criteria", "=", "lambda", "x", ":", "getattr", "(", "x", ",", "msg_subtype", ")", "==", "NXT_SET_CONTROLLER_ID", ",", "init", "=", "packvalue", "(", "NXT_SET_CONTROLLER_ID", ",", "msg_subtype", ")", ",", "classifyby", "=", "(", "NXT_SET_CONTROLLER_ID", ",", ")", ")", "namespace", "[", "'nx_controller_id'", "]", "=", "nx_controller_id", "'''\n /* Action structure for NXAST_CONTROLLER.\n *\n * This generalizes using OFPAT_OUTPUT to send a packet to OFPP_CONTROLLER. In\n * addition to the 'max_len' that OFPAT_OUTPUT supports, it also allows\n * specifying:\n *\n * - 'reason': The reason code to use in the ofp_packet_in or nx_packet_in.\n *\n * - 'controller_id': The ID of the controller connection to which the\n * ofp_packet_in should be sent. The ofp_packet_in or nx_packet_in is\n * sent only to controllers that have the specified controller connection\n * ID. See \"struct nx_controller_id\" for more information. */\n '''", "nx_action_controller", "=", "nstruct", "(", "(", "uint16", ",", "'max_len'", ")", ",", "# /* Maximum length to send to controller. */", "(", "uint16", ",", "'controller_id'", ")", ",", "# /* Controller ID to send packet-in. */", "(", "ofp_packet_in_reason", ",", "'reason'", ")", ",", "# /* enum ofp_packet_in_reason (OFPR_*). */", "(", "uint8", ",", ")", ",", "base", "=", "nx_action", ",", "name", "=", "'nx_action_controller'", ",", "criteria", "=", "lambda", "x", ":", "getattr", "(", "x", ",", "action_subtype", ")", "==", "NXAST_CONTROLLER", ",", "classifyby", "=", "(", "NXAST_CONTROLLER", ",", ")", ",", "init", "=", "packvalue", "(", "NXAST_CONTROLLER", ",", "action_subtype", ")", ")", "namespace", "[", "'nx_action_controller'", "]", "=", "nx_action_controller", "'''\n /* Flow Table Monitoring\n * =====================\n *\n * NXST_FLOW_MONITOR allows a controller to keep track of changes to OpenFlow\n * flow table(s) or subsets of them, with the following workflow:\n *\n * 1. The controller sends an NXST_FLOW_MONITOR request to begin monitoring\n * flows. The 'id' in the request must be unique among all monitors that\n * the controller has started and not yet canceled on this OpenFlow\n * connection.\n *\n * 2. The switch responds with an NXST_FLOW_MONITOR reply. If the request's\n * 'flags' included NXFMF_INITIAL, the reply includes all the flows that\n * matched the request at the time of the request (with event NXFME_ADDED).\n * If 'flags' did not include NXFMF_INITIAL, the reply is empty.\n *\n * The reply uses the xid of the request (as do all replies to OpenFlow\n * requests).\n *\n * 3. Whenever a change to a flow table entry matches some outstanding monitor\n * request's criteria and flags, the switch sends a notification to the\n * controller as an additional NXST_FLOW_MONITOR reply with xid 0.\n *\n * When multiple outstanding monitors match a single change, only a single\n * notification is sent. This merged notification includes the information\n * requested in any of the individual monitors. That is, if any of the\n * matching monitors requests actions (NXFMF_ACTIONS), the notification\n * includes actions, and if any of the monitors request full changes for the\n * controller's own changes (NXFMF_OWN), the controller's own changes will\n * be included in full.\n *\n * 4. The controller may cancel a monitor with NXT_FLOW_MONITOR_CANCEL. No\n * further notifications will be sent on the basis of the canceled monitor\n * afterward.\n *\n *\n * Buffer Management\n * =================\n *\n * OpenFlow messages for flow monitor notifications can overflow the buffer\n * space available to the switch, either temporarily (e.g. due to network\n * conditions slowing OpenFlow traffic) or more permanently (e.g. the sustained\n * rate of flow table change exceeds the network bandwidth between switch and\n * controller).\n *\n * When Open vSwitch's notification buffer space reaches a limiting threshold,\n * OVS reacts as follows:\n *\n * 1. OVS sends an NXT_FLOW_MONITOR_PAUSED message to the controller, following\n * all the already queued notifications. After it receives this message,\n * the controller knows that its view of the flow table, as represented by\n * flow monitor notifications, is incomplete.\n *\n * 2. As long as the notification buffer is not empty:\n *\n * - NXMFE_ADD and NXFME_MODIFIED notifications will not be sent.\n *\n * - NXFME_DELETED notifications will still be sent, but only for flows\n * that existed before OVS sent NXT_FLOW_MONITOR_PAUSED.\n *\n * - NXFME_ABBREV notifications will not be sent. They are treated as\n * the expanded version (and therefore only the NXFME_DELETED\n * components, if any, are sent).\n *\n * 3. When the notification buffer empties, OVS sends NXFME_ADD notifications\n * for flows added since the buffer reached its limit and NXFME_MODIFIED\n * notifications for flows that existed before the limit was reached and\n * changed after the limit was reached.\n *\n * 4. OVS sends an NXT_FLOW_MONITOR_RESUMED message to the controller. After\n * it receives this message, the controller knows that its view of the flow\n * table, as represented by flow monitor notifications, is again complete.\n *\n * This allows the maximum buffer space requirement for notifications to be\n * bounded by the limit plus the maximum number of supported flows.\n *\n *\n * \"Flow Removed\" messages\n * =======================\n *\n * The flow monitor mechanism is independent of OFPT_FLOW_REMOVED and\n * NXT_FLOW_REMOVED. Flow monitor updates for deletion are sent if\n * NXFMF_DELETE is set on a monitor, regardless of whether the\n * OFPFF_SEND_FLOW_REM flag was set when the flow was added. */\n \n /* NXST_FLOW_MONITOR request.\n *\n * The NXST_FLOW_MONITOR request's body consists of an array of zero or more\n * instances of this structure. The request arranges to monitor the flows\n * that match the specified criteria, which are interpreted in the same way as\n * for NXST_FLOW.\n *\n * 'id' identifies a particular monitor for the purpose of allowing it to be\n * canceled later with NXT_FLOW_MONITOR_CANCEL. 'id' must be unique among\n * existing monitors that have not already been canceled.\n *\n * The reply includes the initial flow matches for monitors that have the\n * NXFMF_INITIAL flag set. No single flow will be included in the reply more\n * than once, even if more than one requested monitor matches that flow. The\n * reply will be empty if none of the monitors has NXFMF_INITIAL set or if none\n * of the monitors initially matches any flows.\n *\n * For NXFMF_ADD, an event will be reported if 'out_port' matches against the\n * actions of the flow being added or, for a flow that is replacing an existing\n * flow, if 'out_port' matches against the actions of the flow being replaced.\n * For NXFMF_DELETE, 'out_port' matches against the actions of a flow being\n * deleted. For NXFMF_MODIFY, an event will be reported if 'out_port' matches\n * either the old or the new actions. */\n '''", "ofp_table", "=", "namespace", "[", "'ofp_table'", "]", "nx_flow_monitor_request", "=", "nstruct", "(", "(", "uint32", ",", "'id'", ")", ",", "# /* Controller-assigned ID for this monitor. */", "(", "nx_flow_monitor_flags", ",", "'flags'", ")", ",", "# /* NXFMF_*. */", "(", "nx_port_no", ",", "'out_port'", ")", ",", "# /* Required output port, if not OFPP_NONE. */", "(", "uint16", ",", "'match_len'", ")", ",", "# /* Length of nx_match. */", "(", "ofp_table", ",", "'table_id'", ")", ",", "# /* One table's ID or 0xff for all tables. */", "(", "uint8", "[", "5", "]", ",", ")", ",", "# /* Align to 64 bits (must be zero). */", "(", "nx_matches", ",", ")", ",", "name", "=", "'nx_flow_monitor_request'", ",", "base", "=", "nx_stats_request", ",", "criteria", "=", "lambda", "x", ":", "getattr", "(", "x", ",", "stats_subtype", ")", "==", "NXST_FLOW_MONITOR", ",", "init", "=", "packvalue", "(", "NXST_FLOW_MONITOR", ",", "stats_subtype", ")", ",", "classifyby", "=", "(", "NXST_FLOW_MONITOR", ",", ")", ")", "namespace", "[", "'nx_flow_monitor_request'", "]", "=", "nx_flow_monitor_request", "'''\n /* NXST_FLOW_MONITOR reply header.\n *\n * The body of an NXST_FLOW_MONITOR reply is an array of variable-length\n * structures, each of which begins with this header. The 'length' member may\n * be used to traverse the array, and the 'event' member may be used to\n * determine the particular structure.\n *\n * Every instance is a multiple of 8 bytes long. */\n '''", "nx_flow_update", "=", "nstruct", "(", "(", "uint16", ",", "'length'", ")", ",", "#/* Length of this entry. */", "(", "nx_flow_update_event", ",", "'event'", ")", ",", "# /* One of NXFME_*. */", "name", "=", "'nx_flow_update'", ",", "size", "=", "sizefromlen", "(", "65536", ",", "'length'", ")", ",", "prepack", "=", "packsize", "(", "'length'", ")", ")", "namespace", "[", "'nx_flow_update'", "]", "=", "nx_flow_update", "'''\n /* NXST_FLOW_MONITOR reply for NXFME_ADDED, NXFME_DELETED, and\n * NXFME_MODIFIED. */\n '''", "nx_flow_update_full", "=", "nstruct", "(", "(", "ofp_flow_removed_reason", ",", "'reason'", ")", ",", "# /* OFPRR_* for NXFME_DELETED, else zero. */", "(", "uint16", ",", "'priority'", ")", ",", "# /* Priority of the entry. */", "(", "uint16", ",", "'idle_timeout'", ")", ",", "# /* Number of seconds idle before expiration. */", "(", "uint16", ",", "'hard_timeout'", ")", ",", "# /* Number of seconds before expiration. */", "(", "uint16", ",", "'match_len'", ")", ",", "# /* Length of nx_match. */", "(", "uint8", ",", "'table_id'", ")", ",", "# /* ID of flow's table. */", "(", "uint8", ",", ")", ",", "# /* Reserved, currently zeroed. */", "(", "uint64", ",", "'cookie'", ")", ",", "# /* Opaque controller-issued identifier. */", "#=======================================================================", "# /* Followed by:", "# * - Exactly match_len (possibly 0) bytes containing the nx_match, then", "# * - Exactly (match_len + 7)/8*8 - match_len (between 0 and 7) bytes of", "# * all-zero bytes, then", "# * - Actions to fill out the remainder 'length' bytes (always a multiple", "# * of 8). If NXFMF_ACTIONS was not specified, or 'event' is", "# * NXFME_DELETED, no actions are included.", "# */", "#=======================================================================", "(", "nx_matches", ",", ")", ",", "(", "ofp_action", "[", "0", "]", ",", "'actions'", ")", ",", "name", "=", "'nx_flow_update_full'", ",", "base", "=", "nx_flow_update", ",", "criteria", "=", "lambda", "x", ":", "x", ".", "event", "in", "(", "NXFME_ADDED", ",", "NXFME_DELETED", ",", "NXFME_MODIFIED", ")", ",", "init", "=", "packvalue", "(", "NXFME_ADDED", ",", "'event'", ")", ")", "namespace", "[", "'nx_flow_update_full'", "]", "=", "nx_flow_update_full", "'''\n /* NXST_FLOW_MONITOR reply for NXFME_ABBREV.\n *\n * When the controller does not specify NXFMF_OWN in a monitor request, any\n * flow tables changes due to the controller's own requests (on the same\n * OpenFlow channel) will be abbreviated, when possible, to this form, which\n * simply specifies the 'xid' of the OpenFlow request (e.g. an OFPT_FLOW_MOD or\n * NXT_FLOW_MOD) that caused the change.\n *\n * Some changes cannot be abbreviated and will be sent in full:\n *\n * - Changes that only partially succeed. This can happen if, for example,\n * a flow_mod with type OFPFC_MODIFY affects multiple flows, but only some\n * of those modifications succeed (e.g. due to hardware limitations).\n *\n * This cannot occur with the current implementation of the Open vSwitch\n * software datapath. It could happen with other datapath implementations.\n *\n * - Changes that race with conflicting changes made by other controllers or\n * other flow_mods (not separated by barriers) by the same controller.\n *\n * This cannot occur with the current Open vSwitch implementation\n * (regardless of datapath) because Open vSwitch internally serializes\n * potentially conflicting changes.\n *\n * A flow_mod that does not change the flow table will not trigger any\n * notification, even an abbreviated one. For example, a \"modify\" or \"delete\"\n * flow_mod that does not match any flows will not trigger a notification.\n * Whether an \"add\" or \"modify\" that specifies all the same parameters that a\n * flow already has triggers a notification is unspecified and subject to\n * change in future versions of Open vSwitch.\n *\n * OVS will always send the notifications for a given flow table change before\n * the reply to a OFPT_BARRIER_REQUEST request that follows the flow table\n * change. Thus, if the controller does not receive an abbreviated (or\n * unabbreviated) notification for a flow_mod before the next\n * OFPT_BARRIER_REPLY, it will never receive one. */\n '''", "nx_flow_update_abbrev", "=", "nstruct", "(", "(", "uint32", ",", "'xid'", ")", ",", "# /* Controller-specified xid from flow_mod. */", "name", "=", "'nx_flow_update_abbrev'", ",", "base", "=", "nx_flow_update", ",", "criteria", "=", "lambda", "x", ":", "x", ".", "event", "==", "NXFME_ABBREV", ",", "init", "=", "packvalue", "(", "NXFME_ABBREV", ",", "'event'", ")", ")", "namespace", "[", "'nx_flow_update_abbrev'", "]", "=", "nx_flow_update_abbrev", "nx_flow_monitor_reply", "=", "nstruct", "(", "(", "nx_flow_update", "[", "0", "]", ",", "'stats'", ")", ",", "base", "=", "nx_stats_reply", ",", "classifyby", "=", "(", "NXST_FLOW_MONITOR", ",", ")", ",", "name", "=", "'nx_flow_monitor_reply'", ",", "criteria", "=", "lambda", "x", ":", "getattr", "(", "x", ",", "stats_subtype", ")", "==", "NXST_FLOW_MONITOR", ",", "init", "=", "packvalue", "(", "NXST_FLOW_MONITOR", ",", "stats_subtype", ")", ")", "namespace", "[", "'nx_flow_monitor_reply'", "]", "=", "nx_flow_monitor_reply", "'''\n /* NXT_FLOW_MONITOR_CANCEL.\n *\n * Used by a controller to cancel an outstanding monitor. */\n '''", "nx_flow_monitor_cancel", "=", "nstruct", "(", "(", "uint32", ",", "'id'", ")", ",", "# /* 'id' from nx_flow_monitor_request. */", "name", "=", "'nx_flow_monitor_cancel'", ",", "base", "=", "nicira_header", ",", "classifyby", "=", "(", "NXT_FLOW_MONITOR_CANCEL", ",", ")", ",", "criteria", "=", "lambda", "x", ":", "getattr", "(", "x", ",", "msg_subtype", ")", "==", "NXT_FLOW_MONITOR_CANCEL", ",", "init", "=", "packvalue", "(", "NXT_FLOW_MONITOR_CANCEL", ",", "msg_subtype", ")", ")", "namespace", "[", "'nx_flow_monitor_cancel'", "]", "=", "nx_flow_monitor_cancel", "'''\n /* Action structure for NXAST_WRITE_METADATA.\n *\n * Modifies the 'mask' bits of the metadata value. */\n '''", "nx_action_write_metadata", "=", "nstruct", "(", "(", "uint8", "[", "6", "]", ",", ")", ",", "# /* Must be zero. */", "(", "uint64", ",", "'metadata'", ")", ",", "# /* Metadata register. */", "(", "uint64", ",", "'mask'", ")", ",", "# /* Metadata mask. */", "base", "=", "nx_action", ",", "classifyby", "=", "(", "NXAST_WRITE_METADATA", ",", ")", ",", "criteria", "=", "lambda", "x", ":", "getattr", "(", "x", ",", "action_subtype", ")", "==", "NXAST_WRITE_METADATA", ",", "init", "=", "packvalue", "(", "NXAST_WRITE_METADATA", ",", "action_subtype", ")", ",", "name", "=", "'nx_action_write_metadata'", ")", "namespace", "[", "'nx_action_write_metadata'", "]", "=", "nx_action_write_metadata", "'''\n /* Action structure for NXAST_PUSH_MPLS. */\n '''", "nx_action_push_mpls", "=", "nstruct", "(", "(", "ethertype", ",", "'ethertype'", ")", ",", "# /* Ethertype */", "(", "uint8", "[", "4", "]", ",", ")", ",", "base", "=", "nx_action", ",", "classifyby", "=", "(", "NXAST_PUSH_MPLS", ",", ")", ",", "criteria", "=", "lambda", "x", ":", "getattr", "(", "x", ",", "action_subtype", ")", "==", "NXAST_PUSH_MPLS", ",", "init", "=", "packvalue", "(", "NXAST_PUSH_MPLS", ",", "action_subtype", ")", ",", "name", "=", "'nx_action_push_mpls'", ")", "namespace", "[", "'nx_action_push_mpls'", "]", "=", "nx_action_push_mpls", "'''\n /* Action structure for NXAST_POP_MPLS. */\n '''", "nx_action_pop_mpls", "=", "nstruct", "(", "(", "ethertype", ",", "'ethertype'", ")", ",", "# /* Ethertype */", "(", "uint8", "[", "4", "]", ",", ")", ",", "base", "=", "nx_action", ",", "classifyby", "=", "(", "NXAST_POP_MPLS", ",", ")", ",", "criteria", "=", "lambda", "x", ":", "getattr", "(", "x", ",", "action_subtype", ")", "==", "NXAST_POP_MPLS", ",", "init", "=", "packvalue", "(", "NXAST_POP_MPLS", ",", "action_subtype", ")", ",", "name", "=", "'nx_action_pop_mpls'", ")", "namespace", "[", "'nx_action_pop_mpls'", "]", "=", "nx_action_pop_mpls", "'''\n /* Action structure for NXAST_SET_MPLS_LABEL. */\n '''", "nx_action_mpls_label", "=", "nstruct", "(", "(", "uint8", "[", "2", "]", ",", ")", ",", "# /* Must be zero. */", "(", "uint32", ",", "'label'", ")", ",", "# /* LABEL */", "base", "=", "nx_action", ",", "classifyby", "=", "(", "NXAST_SET_MPLS_LABEL", ",", ")", ",", "criteria", "=", "lambda", "x", ":", "getattr", "(", "x", ",", "action_subtype", ")", "==", "NXAST_SET_MPLS_LABEL", ",", "init", "=", "packvalue", "(", "NXAST_SET_MPLS_LABEL", ",", "action_subtype", ")", ",", "name", "=", "'nx_action_mpls_label'", ")", "namespace", "[", "'nx_action_mpls_label'", "]", "=", "nx_action_mpls_label", "'''\n /* Action structure for NXAST_SET_MPLS_TC. */\n '''", "nx_action_mpls_tc", "=", "nstruct", "(", "(", "uint8", ",", "'tc'", ")", ",", "# /* TC */", "(", "uint8", "[", "5", "]", ",", ")", ",", "base", "=", "nx_action", ",", "classifyby", "=", "(", "NXAST_SET_MPLS_TC", ",", ")", ",", "criteria", "=", "lambda", "x", ":", "getattr", "(", "x", ",", "action_subtype", ")", "==", "NXAST_SET_MPLS_TC", ",", "init", "=", "packvalue", "(", "NXAST_SET_MPLS_TC", ",", "action_subtype", ")", ",", "name", "=", "'nx_action_mpls_tc'", ")", "namespace", "[", "'nx_action_mpls_tc'", "]", "=", "nx_action_mpls_tc", "'''\n /* Action structure for NXAST_SET_MPLS_TTL. */\n '''", "nx_action_mpls_ttl", "=", "nstruct", "(", "(", "uint8", ",", "'ttl'", ")", ",", "# /* TTL */", "(", "uint8", "[", "5", "]", ",", ")", ",", "base", "=", "nx_action", ",", "classifyby", "=", "(", "NXAST_SET_MPLS_TTL", ",", ")", ",", "criteria", "=", "lambda", "x", ":", "getattr", "(", "x", ",", "action_subtype", ")", "==", "NXAST_SET_MPLS_TTL", ",", "init", "=", "packvalue", "(", "NXAST_SET_MPLS_TTL", ",", "action_subtype", ")", ",", "name", "=", "'nx_action_mpls_ttl'", ")", "namespace", "[", "'nx_action_mpls_ttl'", "]", "=", "nx_action_mpls_ttl", "'''\n /* Action structure for NXAST_SAMPLE.\n *\n * Samples matching packets with the given probability and sends them\n * each to the set of collectors identified with the given ID. The\n * probability is expressed as a number of packets to be sampled out\n * of USHRT_MAX packets, and must be >0.\n *\n * When sending packet samples to IPFIX collectors, the IPFIX flow\n * record sent for each sampled packet is associated with the given\n * observation domain ID and observation point ID. Each IPFIX flow\n * record contain the sampled packet's headers when executing this\n * rule. If a sampled packet's headers are modified by previous\n * actions in the flow, those modified headers are sent. */\n '''", "nx_action_sample", "=", "nstruct", "(", "(", "uint16", ",", "'probability'", ")", ",", "# /* Fraction of packets to sample. */", "(", "uint32", ",", "'collector_set_id'", ")", ",", "# /* ID of collector set in OVSDB. */", "(", "uint32", ",", "'obs_domain_id'", ")", ",", "# /* ID of sampling observation domain. */", "(", "uint32", ",", "'obs_point_id'", ")", ",", "# /* ID of sampling observation point. */", "base", "=", "nx_action", ",", "classifyby", "=", "(", "NXAST_SAMPLE", ",", ")", ",", "criteria", "=", "lambda", "x", ":", "getattr", "(", "x", ",", "action_subtype", ")", "==", "NXAST_SAMPLE", ",", "init", "=", "packvalue", "(", "NXAST_SAMPLE", ",", "action_subtype", ")", ",", "name", "=", "'nx_action_sample'", ")", "namespace", "[", "'nx_action_sample'", "]", "=", "nx_action_sample" ]
/* This command enables or disables an Open vSwitch extension that allows a * controller to specify the OpenFlow table to which a flow should be added, * instead of having the switch decide which table is most appropriate as * required by OpenFlow 1.0. Because NXM was designed as an extension to * OpenFlow 1.0, the extension applies equally to ofp10_flow_mod and * nx_flow_mod. By default, the extension is disabled. * * When this feature is enabled, Open vSwitch treats struct ofp10_flow_mod's * and struct nx_flow_mod's 16-bit 'command' member as two separate fields. * The upper 8 bits are used as the table ID, the lower 8 bits specify the * command as usual. A table ID of 0xff is treated like a wildcarded table ID. * * The specific treatment of the table ID depends on the type of flow mod: * * - OFPFC_ADD: Given a specific table ID, the flow is always placed in that * table. If an identical flow already exists in that table only, then it * is replaced. If the flow cannot be placed in the specified table, * either because the table is full or because the table cannot support * flows of the given type, the switch replies with an OFPFMFC_TABLE_FULL * error. (A controller can distinguish these cases by comparing the * current and maximum number of entries reported in ofp_table_stats.) * * If the table ID is wildcarded, the switch picks an appropriate table * itself. If an identical flow already exist in the selected flow table, * then it is replaced. The choice of table might depend on the flows * that are already in the switch; for example, if one table fills up then * the switch might fall back to another one. * * - OFPFC_MODIFY, OFPFC_DELETE: Given a specific table ID, only flows * within that table are matched and modified or deleted. If the table ID * is wildcarded, flows within any table may be matched and modified or * deleted. * * - OFPFC_MODIFY_STRICT, OFPFC_DELETE_STRICT: Given a specific table ID, * only a flow within that table may be matched and modified or deleted. * If the table ID is wildcarded and exactly one flow within any table * matches, then it is modified or deleted; if flows in more than one * table match, then none is modified or deleted. */
[ "/", "*", "This", "command", "enables", "or", "disables", "an", "Open", "vSwitch", "extension", "that", "allows", "a", "*", "controller", "to", "specify", "the", "OpenFlow", "table", "to", "which", "a", "flow", "should", "be", "added", "*", "instead", "of", "having", "the", "switch", "decide", "which", "table", "is", "most", "appropriate", "as", "*", "required", "by", "OpenFlow", "1", ".", "0", ".", "Because", "NXM", "was", "designed", "as", "an", "extension", "to", "*", "OpenFlow", "1", ".", "0", "the", "extension", "applies", "equally", "to", "ofp10_flow_mod", "and", "*", "nx_flow_mod", ".", "By", "default", "the", "extension", "is", "disabled", ".", "*", "*", "When", "this", "feature", "is", "enabled", "Open", "vSwitch", "treats", "struct", "ofp10_flow_mod", "s", "*", "and", "struct", "nx_flow_mod", "s", "16", "-", "bit", "command", "member", "as", "two", "separate", "fields", ".", "*", "The", "upper", "8", "bits", "are", "used", "as", "the", "table", "ID", "the", "lower", "8", "bits", "specify", "the", "*", "command", "as", "usual", ".", "A", "table", "ID", "of", "0xff", "is", "treated", "like", "a", "wildcarded", "table", "ID", ".", "*", "*", "The", "specific", "treatment", "of", "the", "table", "ID", "depends", "on", "the", "type", "of", "flow", "mod", ":", "*", "*", "-", "OFPFC_ADD", ":", "Given", "a", "specific", "table", "ID", "the", "flow", "is", "always", "placed", "in", "that", "*", "table", ".", "If", "an", "identical", "flow", "already", "exists", "in", "that", "table", "only", "then", "it", "*", "is", "replaced", ".", "If", "the", "flow", "cannot", "be", "placed", "in", "the", "specified", "table", "*", "either", "because", "the", "table", "is", "full", "or", "because", "the", "table", "cannot", "support", "*", "flows", "of", "the", "given", "type", "the", "switch", "replies", "with", "an", "OFPFMFC_TABLE_FULL", "*", "error", ".", "(", "A", "controller", "can", "distinguish", "these", "cases", "by", "comparing", "the", "*", "current", "and", "maximum", "number", "of", "entries", "reported", "in", "ofp_table_stats", ".", ")", "*", "*", "If", "the", "table", "ID", "is", "wildcarded", "the", "switch", "picks", "an", "appropriate", "table", "*", "itself", ".", "If", "an", "identical", "flow", "already", "exist", "in", "the", "selected", "flow", "table", "*", "then", "it", "is", "replaced", ".", "The", "choice", "of", "table", "might", "depend", "on", "the", "flows", "*", "that", "are", "already", "in", "the", "switch", ";", "for", "example", "if", "one", "table", "fills", "up", "then", "*", "the", "switch", "might", "fall", "back", "to", "another", "one", ".", "*", "*", "-", "OFPFC_MODIFY", "OFPFC_DELETE", ":", "Given", "a", "specific", "table", "ID", "only", "flows", "*", "within", "that", "table", "are", "matched", "and", "modified", "or", "deleted", ".", "If", "the", "table", "ID", "*", "is", "wildcarded", "flows", "within", "any", "table", "may", "be", "matched", "and", "modified", "or", "*", "deleted", ".", "*", "*", "-", "OFPFC_MODIFY_STRICT", "OFPFC_DELETE_STRICT", ":", "Given", "a", "specific", "table", "ID", "*", "only", "a", "flow", "within", "that", "table", "may", "be", "matched", "and", "modified", "or", "deleted", ".", "*", "If", "the", "table", "ID", "is", "wildcarded", "and", "exactly", "one", "flow", "within", "any", "table", "*", "matches", "then", "it", "is", "modified", "or", "deleted", ";", "if", "flows", "in", "more", "than", "one", "*", "table", "match", "then", "none", "is", "modified", "or", "deleted", ".", "*", "/" ]
python
train
zvolsky/wfpdf
wfpdf.py
https://github.com/zvolsky/wfpdf/blob/d3625a6420ae1fb6722d81cddf0636af496c42bb/wfpdf.py#L129-L135
def down(self, h, cr=True): """moves current vertical position h mm down cr True will navigate to the left margin """ if cr: self.oPdf.ln(h=0) self.oPdf.set_y(self.oPdf.get_y() + h)
[ "def", "down", "(", "self", ",", "h", ",", "cr", "=", "True", ")", ":", "if", "cr", ":", "self", ".", "oPdf", ".", "ln", "(", "h", "=", "0", ")", "self", ".", "oPdf", ".", "set_y", "(", "self", ".", "oPdf", ".", "get_y", "(", ")", "+", "h", ")" ]
moves current vertical position h mm down cr True will navigate to the left margin
[ "moves", "current", "vertical", "position", "h", "mm", "down", "cr", "True", "will", "navigate", "to", "the", "left", "margin" ]
python
train
quantmind/agile-toolkit
agiletoolkit/api/components.py
https://github.com/quantmind/agile-toolkit/blob/96028e36a842c57b171907c20583a60d1045fec1/agiletoolkit/api/components.py#L54-L62
def update(self, id, data): """Update a component """ id = self.as_id(id) response = self.http.patch( '%s/%s' % (self, id), json=data, auth=self.auth ) response.raise_for_status() return response.json()
[ "def", "update", "(", "self", ",", "id", ",", "data", ")", ":", "id", "=", "self", ".", "as_id", "(", "id", ")", "response", "=", "self", ".", "http", ".", "patch", "(", "'%s/%s'", "%", "(", "self", ",", "id", ")", ",", "json", "=", "data", ",", "auth", "=", "self", ".", "auth", ")", "response", ".", "raise_for_status", "(", ")", "return", "response", ".", "json", "(", ")" ]
Update a component
[ "Update", "a", "component" ]
python
train
erikrose/parsimonious
parsimonious/expressions.py
https://github.com/erikrose/parsimonious/blob/12263be5ceca89344905c2c3eb9ac5a603e976e1/parsimonious/expressions.py#L278-L285
def _uncached_match(self, text, pos, cache, error): """Return length of match, ``None`` if no match.""" m = self.re.match(text, pos) if m is not None: span = m.span() node = RegexNode(self, text, pos, pos + span[1] - span[0]) node.match = m # TODO: A terrible idea for cache size? return node
[ "def", "_uncached_match", "(", "self", ",", "text", ",", "pos", ",", "cache", ",", "error", ")", ":", "m", "=", "self", ".", "re", ".", "match", "(", "text", ",", "pos", ")", "if", "m", "is", "not", "None", ":", "span", "=", "m", ".", "span", "(", ")", "node", "=", "RegexNode", "(", "self", ",", "text", ",", "pos", ",", "pos", "+", "span", "[", "1", "]", "-", "span", "[", "0", "]", ")", "node", ".", "match", "=", "m", "# TODO: A terrible idea for cache size?", "return", "node" ]
Return length of match, ``None`` if no match.
[ "Return", "length", "of", "match", "None", "if", "no", "match", "." ]
python
train
mozilla-services/python-dockerflow
src/dockerflow/flask/app.py
https://github.com/mozilla-services/python-dockerflow/blob/43703c5e8934ba6901b0a1520d6da4ed6457208c/src/dockerflow/flask/app.py#L232-L261
def summary_extra(self): """ Build the extra data for the summary logger. """ out = { 'errno': 0, 'agent': request.headers.get('User-Agent', ''), 'lang': request.headers.get('Accept-Language', ''), 'method': request.method, 'path': request.path, } # set the uid value to the current user ID user_id = self.user_id() if user_id is None: user_id = '' out['uid'] = user_id # the rid value to the current request ID request_id = g.get('_request_id', None) if request_id is not None: out['rid'] = request_id # and the t value to the time it took to render start_timestamp = g.get('_start_timestamp', None) if start_timestamp is not None: # Duration of request, in milliseconds. out['t'] = int(1000 * (time.time() - start_timestamp)) return out
[ "def", "summary_extra", "(", "self", ")", ":", "out", "=", "{", "'errno'", ":", "0", ",", "'agent'", ":", "request", ".", "headers", ".", "get", "(", "'User-Agent'", ",", "''", ")", ",", "'lang'", ":", "request", ".", "headers", ".", "get", "(", "'Accept-Language'", ",", "''", ")", ",", "'method'", ":", "request", ".", "method", ",", "'path'", ":", "request", ".", "path", ",", "}", "# set the uid value to the current user ID", "user_id", "=", "self", ".", "user_id", "(", ")", "if", "user_id", "is", "None", ":", "user_id", "=", "''", "out", "[", "'uid'", "]", "=", "user_id", "# the rid value to the current request ID", "request_id", "=", "g", ".", "get", "(", "'_request_id'", ",", "None", ")", "if", "request_id", "is", "not", "None", ":", "out", "[", "'rid'", "]", "=", "request_id", "# and the t value to the time it took to render", "start_timestamp", "=", "g", ".", "get", "(", "'_start_timestamp'", ",", "None", ")", "if", "start_timestamp", "is", "not", "None", ":", "# Duration of request, in milliseconds.", "out", "[", "'t'", "]", "=", "int", "(", "1000", "*", "(", "time", ".", "time", "(", ")", "-", "start_timestamp", ")", ")", "return", "out" ]
Build the extra data for the summary logger.
[ "Build", "the", "extra", "data", "for", "the", "summary", "logger", "." ]
python
train
raiden-network/raiden
raiden/network/proxies/payment_channel.py
https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/network/proxies/payment_channel.py#L161-L168
def can_transfer(self, block_identifier: BlockSpecification) -> bool: """ Returns True if the channel is opened and the node has deposit in it. """ return self.token_network.can_transfer( participant1=self.participant1, participant2=self.participant2, block_identifier=block_identifier, channel_identifier=self.channel_identifier, )
[ "def", "can_transfer", "(", "self", ",", "block_identifier", ":", "BlockSpecification", ")", "->", "bool", ":", "return", "self", ".", "token_network", ".", "can_transfer", "(", "participant1", "=", "self", ".", "participant1", ",", "participant2", "=", "self", ".", "participant2", ",", "block_identifier", "=", "block_identifier", ",", "channel_identifier", "=", "self", ".", "channel_identifier", ",", ")" ]
Returns True if the channel is opened and the node has deposit in it.
[ "Returns", "True", "if", "the", "channel", "is", "opened", "and", "the", "node", "has", "deposit", "in", "it", "." ]
python
train
OLC-Bioinformatics/sipprverse
pointsippr/pointsippr.py
https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/pointsippr/pointsippr.py#L345-L403
def write_table_report(summary_dict, seqid, genus): """ Parse the PointFinder table output, and write a summary report :param summary_dict: nested dictionary containing data such as header strings, and paths to reports :param seqid: name of the strain, :param genus: MASH-calculated genus of current isolate """ # Set the header string if the summary report doesn't already exist if not os.path.isfile(summary_dict[genus]['table']['summary']): header_string = summary_dict[genus]['table']['header'] else: header_string = str() summary_string = '{seq},'.format(seq=seqid) try: # Read in the predictions with open(summary_dict[genus]['table']['output'], 'r') as outputs: for header_value in summary_dict[genus]['table']['header'].split(',')[:-1]: for line in outputs: if line.startswith('{hv}\n'.format(hv=header_value)): # Iterate through the lines following the match for subline in outputs: if subline != '\n': if subline.startswith('Mutation'): for detailline in outputs: if detailline != '\n': summary_string += '{},'.format(detailline.split('\t')[0]) else: break else: summary_string += '{},'.format( subline.replace(',', ';').replace('\t', ',').rstrip()) break else: break break # Reset the file iterator to the first line in preparation for the next header outputs.seek(0) # Ensure that there were results to report if summary_string: if not summary_string.endswith('\n'): summary_string += '\n' # Write the summaries to the summary file with open(summary_dict[genus]['table']['summary'], 'a+') as summary: # Write the header if necessary if header_string: summary.write(header_string) summary.write(summary_string) except FileNotFoundError: # Write the summaries to the summary file with open(summary_dict[genus]['table']['summary'], 'a+') as summary: # Extract the length of the header from the dictionary. Subtract two (don't need the strain, or the # empty column created by a trailing comma header_len = len(summary_dict[genus]['table']['header'].split(',')) - 2 # Populate the summary strain with the appropriate number of comma-separated 'Gene not found' entries summary_string += '{empty}\n'.format(empty='Gene not found,' * header_len) # Write the header if necessary if header_string: summary.write(header_string) summary.write(summary_string)
[ "def", "write_table_report", "(", "summary_dict", ",", "seqid", ",", "genus", ")", ":", "# Set the header string if the summary report doesn't already exist", "if", "not", "os", ".", "path", ".", "isfile", "(", "summary_dict", "[", "genus", "]", "[", "'table'", "]", "[", "'summary'", "]", ")", ":", "header_string", "=", "summary_dict", "[", "genus", "]", "[", "'table'", "]", "[", "'header'", "]", "else", ":", "header_string", "=", "str", "(", ")", "summary_string", "=", "'{seq},'", ".", "format", "(", "seq", "=", "seqid", ")", "try", ":", "# Read in the predictions", "with", "open", "(", "summary_dict", "[", "genus", "]", "[", "'table'", "]", "[", "'output'", "]", ",", "'r'", ")", "as", "outputs", ":", "for", "header_value", "in", "summary_dict", "[", "genus", "]", "[", "'table'", "]", "[", "'header'", "]", ".", "split", "(", "','", ")", "[", ":", "-", "1", "]", ":", "for", "line", "in", "outputs", ":", "if", "line", ".", "startswith", "(", "'{hv}\\n'", ".", "format", "(", "hv", "=", "header_value", ")", ")", ":", "# Iterate through the lines following the match", "for", "subline", "in", "outputs", ":", "if", "subline", "!=", "'\\n'", ":", "if", "subline", ".", "startswith", "(", "'Mutation'", ")", ":", "for", "detailline", "in", "outputs", ":", "if", "detailline", "!=", "'\\n'", ":", "summary_string", "+=", "'{},'", ".", "format", "(", "detailline", ".", "split", "(", "'\\t'", ")", "[", "0", "]", ")", "else", ":", "break", "else", ":", "summary_string", "+=", "'{},'", ".", "format", "(", "subline", ".", "replace", "(", "','", ",", "';'", ")", ".", "replace", "(", "'\\t'", ",", "','", ")", ".", "rstrip", "(", ")", ")", "break", "else", ":", "break", "break", "# Reset the file iterator to the first line in preparation for the next header", "outputs", ".", "seek", "(", "0", ")", "# Ensure that there were results to report", "if", "summary_string", ":", "if", "not", "summary_string", ".", "endswith", "(", "'\\n'", ")", ":", "summary_string", "+=", "'\\n'", "# Write the summaries to the summary file", "with", "open", "(", "summary_dict", "[", "genus", "]", "[", "'table'", "]", "[", "'summary'", "]", ",", "'a+'", ")", "as", "summary", ":", "# Write the header if necessary", "if", "header_string", ":", "summary", ".", "write", "(", "header_string", ")", "summary", ".", "write", "(", "summary_string", ")", "except", "FileNotFoundError", ":", "# Write the summaries to the summary file", "with", "open", "(", "summary_dict", "[", "genus", "]", "[", "'table'", "]", "[", "'summary'", "]", ",", "'a+'", ")", "as", "summary", ":", "# Extract the length of the header from the dictionary. Subtract two (don't need the strain, or the", "# empty column created by a trailing comma", "header_len", "=", "len", "(", "summary_dict", "[", "genus", "]", "[", "'table'", "]", "[", "'header'", "]", ".", "split", "(", "','", ")", ")", "-", "2", "# Populate the summary strain with the appropriate number of comma-separated 'Gene not found' entries", "summary_string", "+=", "'{empty}\\n'", ".", "format", "(", "empty", "=", "'Gene not found,'", "*", "header_len", ")", "# Write the header if necessary", "if", "header_string", ":", "summary", ".", "write", "(", "header_string", ")", "summary", ".", "write", "(", "summary_string", ")" ]
Parse the PointFinder table output, and write a summary report :param summary_dict: nested dictionary containing data such as header strings, and paths to reports :param seqid: name of the strain, :param genus: MASH-calculated genus of current isolate
[ "Parse", "the", "PointFinder", "table", "output", "and", "write", "a", "summary", "report", ":", "param", "summary_dict", ":", "nested", "dictionary", "containing", "data", "such", "as", "header", "strings", "and", "paths", "to", "reports", ":", "param", "seqid", ":", "name", "of", "the", "strain", ":", "param", "genus", ":", "MASH", "-", "calculated", "genus", "of", "current", "isolate" ]
python
train
lehins/python-wepay
wepay/calls/batch.py
https://github.com/lehins/python-wepay/blob/414d25a1a8d0ecb22a3ddd1f16c60b805bb52a1f/wepay/calls/batch.py#L8-L22
def __create(self, client_id, client_secret, calls, **kwargs): """Call documentation: `/batch/create <https://www.wepay.com/developer/reference/batch#create>`_, plus extra keyword parameter: :keyword str access_token: will be used instead of instance's ``access_token`` """ params = { 'client_id': client_id, 'client_secret': client_secret, 'calls': calls } return self.make_call(self.__create, params, kwargs)
[ "def", "__create", "(", "self", ",", "client_id", ",", "client_secret", ",", "calls", ",", "*", "*", "kwargs", ")", ":", "params", "=", "{", "'client_id'", ":", "client_id", ",", "'client_secret'", ":", "client_secret", ",", "'calls'", ":", "calls", "}", "return", "self", ".", "make_call", "(", "self", ".", "__create", ",", "params", ",", "kwargs", ")" ]
Call documentation: `/batch/create <https://www.wepay.com/developer/reference/batch#create>`_, plus extra keyword parameter: :keyword str access_token: will be used instead of instance's ``access_token``
[ "Call", "documentation", ":", "/", "batch", "/", "create", "<https", ":", "//", "www", ".", "wepay", ".", "com", "/", "developer", "/", "reference", "/", "batch#create", ">", "_", "plus", "extra", "keyword", "parameter", ":", ":", "keyword", "str", "access_token", ":", "will", "be", "used", "instead", "of", "instance", "s", "access_token" ]
python
train
ThreatConnect-Inc/tcex
tcex/tcex_bin_run.py
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_bin_run.py#L492-L519
def deep_diff(self, db_data, user_data): """Validate data in user data. Args: db_data (dict|str|list): The data store in Redis. user_data (dict|str|list): The user provided data. Returns: bool: True if the data passed validation. """ # NOTE: tcex does include the deepdiff library as a dependencies since it is only # required for local testing. try: from deepdiff import DeepDiff except ImportError: print('Could not import DeepDiff module (try "pip install deepdiff").') sys.exit(1) try: ddiff = DeepDiff(db_data, user_data, ignore_order=True) except KeyError: return False except NameError: return False if ddiff: self.tcex.log.info(u'[validate] Diff : {}'.format(ddiff)) return False return True
[ "def", "deep_diff", "(", "self", ",", "db_data", ",", "user_data", ")", ":", "# NOTE: tcex does include the deepdiff library as a dependencies since it is only", "# required for local testing.", "try", ":", "from", "deepdiff", "import", "DeepDiff", "except", "ImportError", ":", "print", "(", "'Could not import DeepDiff module (try \"pip install deepdiff\").'", ")", "sys", ".", "exit", "(", "1", ")", "try", ":", "ddiff", "=", "DeepDiff", "(", "db_data", ",", "user_data", ",", "ignore_order", "=", "True", ")", "except", "KeyError", ":", "return", "False", "except", "NameError", ":", "return", "False", "if", "ddiff", ":", "self", ".", "tcex", ".", "log", ".", "info", "(", "u'[validate] Diff : {}'", ".", "format", "(", "ddiff", ")", ")", "return", "False", "return", "True" ]
Validate data in user data. Args: db_data (dict|str|list): The data store in Redis. user_data (dict|str|list): The user provided data. Returns: bool: True if the data passed validation.
[ "Validate", "data", "in", "user", "data", "." ]
python
train
facelessuser/backrefs
backrefs/_bre_parse.py
https://github.com/facelessuser/backrefs/blob/3b3d60f5d57b02044f880aa29c9c5add0e31a34f/backrefs/_bre_parse.py#L533-L554
def posix_props(self, prop, in_group=False): """ Insert POSIX properties. Posix style properties are not as forgiving as Unicode properties. Case does matter, and whitespace and '-' and '_' will not be tolerated. """ try: if self.is_bytes or not self.unicode: pattern = _uniprops.get_posix_property( prop, (_uniprops.POSIX_BYTES if self.is_bytes else _uniprops.POSIX) ) else: pattern = _uniprops.get_posix_property(prop, _uniprops.POSIX_UNICODE) except Exception: raise ValueError('Invalid POSIX property!') if not in_group and not pattern: # pragma: no cover pattern = '^%s' % ('\x00-\xff' if self.is_bytes else _uniprops.UNICODE_RANGE) return [pattern]
[ "def", "posix_props", "(", "self", ",", "prop", ",", "in_group", "=", "False", ")", ":", "try", ":", "if", "self", ".", "is_bytes", "or", "not", "self", ".", "unicode", ":", "pattern", "=", "_uniprops", ".", "get_posix_property", "(", "prop", ",", "(", "_uniprops", ".", "POSIX_BYTES", "if", "self", ".", "is_bytes", "else", "_uniprops", ".", "POSIX", ")", ")", "else", ":", "pattern", "=", "_uniprops", ".", "get_posix_property", "(", "prop", ",", "_uniprops", ".", "POSIX_UNICODE", ")", "except", "Exception", ":", "raise", "ValueError", "(", "'Invalid POSIX property!'", ")", "if", "not", "in_group", "and", "not", "pattern", ":", "# pragma: no cover", "pattern", "=", "'^%s'", "%", "(", "'\\x00-\\xff'", "if", "self", ".", "is_bytes", "else", "_uniprops", ".", "UNICODE_RANGE", ")", "return", "[", "pattern", "]" ]
Insert POSIX properties. Posix style properties are not as forgiving as Unicode properties. Case does matter, and whitespace and '-' and '_' will not be tolerated.
[ "Insert", "POSIX", "properties", "." ]
python
train
bfrog/whizzer
whizzer/client.py
https://github.com/bfrog/whizzer/blob/a1e43084b3ac8c1f3fb4ada081777cdbf791fd77/whizzer/client.py#L162-L178
def _connect(self, sock, addr, timeout): """Start watching the socket for it to be writtable.""" if self.connection: raise SocketClientConnectedError() if self.connector: raise SocketClientConnectingError() self.connect_deferred = Deferred(self.loop) self.sock = sock self.addr = addr self.connector = Connector(self.loop, sock, addr, timeout) self.connector.deferred.add_callback(self._connected) self.connector.deferred.add_errback(self._connect_failed) self.connector.start() return self.connect_deferred
[ "def", "_connect", "(", "self", ",", "sock", ",", "addr", ",", "timeout", ")", ":", "if", "self", ".", "connection", ":", "raise", "SocketClientConnectedError", "(", ")", "if", "self", ".", "connector", ":", "raise", "SocketClientConnectingError", "(", ")", "self", ".", "connect_deferred", "=", "Deferred", "(", "self", ".", "loop", ")", "self", ".", "sock", "=", "sock", "self", ".", "addr", "=", "addr", "self", ".", "connector", "=", "Connector", "(", "self", ".", "loop", ",", "sock", ",", "addr", ",", "timeout", ")", "self", ".", "connector", ".", "deferred", ".", "add_callback", "(", "self", ".", "_connected", ")", "self", ".", "connector", ".", "deferred", ".", "add_errback", "(", "self", ".", "_connect_failed", ")", "self", ".", "connector", ".", "start", "(", ")", "return", "self", ".", "connect_deferred" ]
Start watching the socket for it to be writtable.
[ "Start", "watching", "the", "socket", "for", "it", "to", "be", "writtable", "." ]
python
train
jsommers/switchyard
switchyard/llnetreal.py
https://github.com/jsommers/switchyard/blob/fdcb3869c937dcedbd6ea7a7822ebd412bf1e2b0/switchyard/llnetreal.py#L149-L161
def _make_pcaps(self): ''' Internal method. Create libpcap devices for every network interface we care about and set them in non-blocking mode. ''' self._pcaps = {} for devname,intf in self._devinfo.items(): if intf.iftype == InterfaceType.Loopback: senddev = _RawSocket(devname, protocol=IPProtocol.UDP) self._localsend[devname] = senddev pdev = PcapLiveDevice(devname) self._pcaps[devname] = pdev
[ "def", "_make_pcaps", "(", "self", ")", ":", "self", ".", "_pcaps", "=", "{", "}", "for", "devname", ",", "intf", "in", "self", ".", "_devinfo", ".", "items", "(", ")", ":", "if", "intf", ".", "iftype", "==", "InterfaceType", ".", "Loopback", ":", "senddev", "=", "_RawSocket", "(", "devname", ",", "protocol", "=", "IPProtocol", ".", "UDP", ")", "self", ".", "_localsend", "[", "devname", "]", "=", "senddev", "pdev", "=", "PcapLiveDevice", "(", "devname", ")", "self", ".", "_pcaps", "[", "devname", "]", "=", "pdev" ]
Internal method. Create libpcap devices for every network interface we care about and set them in non-blocking mode.
[ "Internal", "method", ".", "Create", "libpcap", "devices", "for", "every", "network", "interface", "we", "care", "about", "and", "set", "them", "in", "non", "-", "blocking", "mode", "." ]
python
train
necaris/python3-openid
openid/extensions/draft/pape5.py
https://github.com/necaris/python3-openid/blob/4911bbc196dfd6f9eda7155df9903d668720ecbf/openid/extensions/draft/pape5.py#L73-L80
def _generateAlias(self): """Return an unused auth level alias""" for i in range(1000): alias = 'cust%d' % (i, ) if alias not in self.auth_level_aliases: return alias raise RuntimeError('Could not find an unused alias (tried 1000!)')
[ "def", "_generateAlias", "(", "self", ")", ":", "for", "i", "in", "range", "(", "1000", ")", ":", "alias", "=", "'cust%d'", "%", "(", "i", ",", ")", "if", "alias", "not", "in", "self", ".", "auth_level_aliases", ":", "return", "alias", "raise", "RuntimeError", "(", "'Could not find an unused alias (tried 1000!)'", ")" ]
Return an unused auth level alias
[ "Return", "an", "unused", "auth", "level", "alias" ]
python
train
istresearch/scrapy-cluster
redis-monitor/plugins/stats_monitor.py
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/redis-monitor/plugins/stats_monitor.py#L162-L207
def get_spider_stats(self): ''' Gather spider based stats ''' self.logger.debug("Gathering spider stats") the_dict = {} spider_set = set() total_spider_count = 0 keys = self.redis_conn.keys('stats:crawler:*:*:*') for key in keys: # we only care about the spider elements = key.split(":") spider = elements[3] if spider not in the_dict: the_dict[spider] = {} the_dict[spider]['count'] = 0 if len(elements) == 6: # got a time based stat response = elements[4] end = elements[5] if response not in the_dict[spider]: the_dict[spider][response] = {} the_dict[spider][response][end] = self._get_key_value(key, end == 'lifetime') elif len(elements) == 5: # got a spider identifier the_dict[spider]['count'] += 1 total_spider_count += 1 spider_set.add(spider) else: self.logger.warn("Unknown crawler stat key", {"key":key}) # simple counts the_dict['unique_spider_count'] = len(spider_set) the_dict['total_spider_count'] = total_spider_count ret_dict = {} ret_dict['spiders'] = the_dict return ret_dict
[ "def", "get_spider_stats", "(", "self", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"Gathering spider stats\"", ")", "the_dict", "=", "{", "}", "spider_set", "=", "set", "(", ")", "total_spider_count", "=", "0", "keys", "=", "self", ".", "redis_conn", ".", "keys", "(", "'stats:crawler:*:*:*'", ")", "for", "key", "in", "keys", ":", "# we only care about the spider", "elements", "=", "key", ".", "split", "(", "\":\"", ")", "spider", "=", "elements", "[", "3", "]", "if", "spider", "not", "in", "the_dict", ":", "the_dict", "[", "spider", "]", "=", "{", "}", "the_dict", "[", "spider", "]", "[", "'count'", "]", "=", "0", "if", "len", "(", "elements", ")", "==", "6", ":", "# got a time based stat", "response", "=", "elements", "[", "4", "]", "end", "=", "elements", "[", "5", "]", "if", "response", "not", "in", "the_dict", "[", "spider", "]", ":", "the_dict", "[", "spider", "]", "[", "response", "]", "=", "{", "}", "the_dict", "[", "spider", "]", "[", "response", "]", "[", "end", "]", "=", "self", ".", "_get_key_value", "(", "key", ",", "end", "==", "'lifetime'", ")", "elif", "len", "(", "elements", ")", "==", "5", ":", "# got a spider identifier", "the_dict", "[", "spider", "]", "[", "'count'", "]", "+=", "1", "total_spider_count", "+=", "1", "spider_set", ".", "add", "(", "spider", ")", "else", ":", "self", ".", "logger", ".", "warn", "(", "\"Unknown crawler stat key\"", ",", "{", "\"key\"", ":", "key", "}", ")", "# simple counts", "the_dict", "[", "'unique_spider_count'", "]", "=", "len", "(", "spider_set", ")", "the_dict", "[", "'total_spider_count'", "]", "=", "total_spider_count", "ret_dict", "=", "{", "}", "ret_dict", "[", "'spiders'", "]", "=", "the_dict", "return", "ret_dict" ]
Gather spider based stats
[ "Gather", "spider", "based", "stats" ]
python
train
SHDShim/pytheos
pytheos/eqn_vinet.py
https://github.com/SHDShim/pytheos/blob/be079624405e92fbec60c5ead253eb5917e55237/pytheos/eqn_vinet.py#L53-L71
def vinet_v_single(p, v0, k0, k0p, min_strain=0.01): """ find volume at given pressure using brenth in scipy.optimize this is for single p value, not vectorized :param p: pressure in GPa :param v0: unit-cell volume in A^3 at 1 bar :param k0: bulk modulus at reference conditions :param k0p: pressure derivative of bulk modulus at reference conditions :param min_strain: defining minimum v/v0 value to search volume for :return: unit cell volume at high pressure in A^3 """ if p <= 1.e-5: return v0 def f_diff(v, v0, k0, k0p, p): return vinet_p(v, v0, k0, k0p) - p v = brenth(f_diff, v0, v0 * min_strain, args=(v0, k0, k0p, p)) return v
[ "def", "vinet_v_single", "(", "p", ",", "v0", ",", "k0", ",", "k0p", ",", "min_strain", "=", "0.01", ")", ":", "if", "p", "<=", "1.e-5", ":", "return", "v0", "def", "f_diff", "(", "v", ",", "v0", ",", "k0", ",", "k0p", ",", "p", ")", ":", "return", "vinet_p", "(", "v", ",", "v0", ",", "k0", ",", "k0p", ")", "-", "p", "v", "=", "brenth", "(", "f_diff", ",", "v0", ",", "v0", "*", "min_strain", ",", "args", "=", "(", "v0", ",", "k0", ",", "k0p", ",", "p", ")", ")", "return", "v" ]
find volume at given pressure using brenth in scipy.optimize this is for single p value, not vectorized :param p: pressure in GPa :param v0: unit-cell volume in A^3 at 1 bar :param k0: bulk modulus at reference conditions :param k0p: pressure derivative of bulk modulus at reference conditions :param min_strain: defining minimum v/v0 value to search volume for :return: unit cell volume at high pressure in A^3
[ "find", "volume", "at", "given", "pressure", "using", "brenth", "in", "scipy", ".", "optimize", "this", "is", "for", "single", "p", "value", "not", "vectorized" ]
python
train
cebel/pyuniprot
src/pyuniprot/webserver/web.py
https://github.com/cebel/pyuniprot/blob/9462a6042c7c9295415a5eb589b77b27cb7c142b/src/pyuniprot/webserver/web.py#L739-L784
def query_db_reference(): """ Returns list of cross references by query parameters --- tags: - Query functions parameters: - name: type_ in: query type: string required: false description: Reference type default: EMBL - name: identifier in: query type: string required: false description: reference identifier default: Y00264 - name: entry_name in: query type: string required: false description: UniProt entry name default: A4_HUMAN - name: limit in: query type: integer required: false description: limit of results numbers default: 10 """ args = get_args( request_args=request.args, allowed_str_args=['type_', 'identifier', 'entry_name'], allowed_int_args=['limit'] ) return jsonify(query.db_reference(**args))
[ "def", "query_db_reference", "(", ")", ":", "args", "=", "get_args", "(", "request_args", "=", "request", ".", "args", ",", "allowed_str_args", "=", "[", "'type_'", ",", "'identifier'", ",", "'entry_name'", "]", ",", "allowed_int_args", "=", "[", "'limit'", "]", ")", "return", "jsonify", "(", "query", ".", "db_reference", "(", "*", "*", "args", ")", ")" ]
Returns list of cross references by query parameters --- tags: - Query functions parameters: - name: type_ in: query type: string required: false description: Reference type default: EMBL - name: identifier in: query type: string required: false description: reference identifier default: Y00264 - name: entry_name in: query type: string required: false description: UniProt entry name default: A4_HUMAN - name: limit in: query type: integer required: false description: limit of results numbers default: 10
[ "Returns", "list", "of", "cross", "references", "by", "query", "parameters", "---" ]
python
train
cebel/pyuniprot
src/pyuniprot/manager/database.py
https://github.com/cebel/pyuniprot/blob/9462a6042c7c9295415a5eb589b77b27cb7c142b/src/pyuniprot/manager/database.py#L651-L694
def get_pmids(self, entry): """ get `models.Pmid` objects from XML node entry :param entry: XML node entry :return: list of :class:`pyuniprot.manager.models.Pmid` objects """ pmids = [] for citation in entry.iterfind("./reference/citation"): for pubmed_ref in citation.iterfind('dbReference[@type="PubMed"]'): pmid_number = pubmed_ref.get('id') if pmid_number in self.pmids: pmid_sqlalchemy_obj = self.session.query(models.Pmid)\ .filter(models.Pmid.pmid == pmid_number).one() pmids.append(pmid_sqlalchemy_obj) else: pmid_dict = citation.attrib if not re.search('^\d+$', pmid_dict['volume']): pmid_dict['volume'] = -1 del pmid_dict['type'] # not needed because already filtered for PubMed pmid_dict.update(pmid=pmid_number) title_tag = citation.find('./title') if title_tag is not None: pmid_dict.update(title=title_tag.text) pmid_sqlalchemy_obj = models.Pmid(**pmid_dict) self.session.add(pmid_sqlalchemy_obj) self.session.flush() pmids.append(pmid_sqlalchemy_obj) self.pmids |= set([pmid_number, ]) # extend the cache of identifiers return pmids
[ "def", "get_pmids", "(", "self", ",", "entry", ")", ":", "pmids", "=", "[", "]", "for", "citation", "in", "entry", ".", "iterfind", "(", "\"./reference/citation\"", ")", ":", "for", "pubmed_ref", "in", "citation", ".", "iterfind", "(", "'dbReference[@type=\"PubMed\"]'", ")", ":", "pmid_number", "=", "pubmed_ref", ".", "get", "(", "'id'", ")", "if", "pmid_number", "in", "self", ".", "pmids", ":", "pmid_sqlalchemy_obj", "=", "self", ".", "session", ".", "query", "(", "models", ".", "Pmid", ")", ".", "filter", "(", "models", ".", "Pmid", ".", "pmid", "==", "pmid_number", ")", ".", "one", "(", ")", "pmids", ".", "append", "(", "pmid_sqlalchemy_obj", ")", "else", ":", "pmid_dict", "=", "citation", ".", "attrib", "if", "not", "re", ".", "search", "(", "'^\\d+$'", ",", "pmid_dict", "[", "'volume'", "]", ")", ":", "pmid_dict", "[", "'volume'", "]", "=", "-", "1", "del", "pmid_dict", "[", "'type'", "]", "# not needed because already filtered for PubMed", "pmid_dict", ".", "update", "(", "pmid", "=", "pmid_number", ")", "title_tag", "=", "citation", ".", "find", "(", "'./title'", ")", "if", "title_tag", "is", "not", "None", ":", "pmid_dict", ".", "update", "(", "title", "=", "title_tag", ".", "text", ")", "pmid_sqlalchemy_obj", "=", "models", ".", "Pmid", "(", "*", "*", "pmid_dict", ")", "self", ".", "session", ".", "add", "(", "pmid_sqlalchemy_obj", ")", "self", ".", "session", ".", "flush", "(", ")", "pmids", ".", "append", "(", "pmid_sqlalchemy_obj", ")", "self", ".", "pmids", "|=", "set", "(", "[", "pmid_number", ",", "]", ")", "# extend the cache of identifiers", "return", "pmids" ]
get `models.Pmid` objects from XML node entry :param entry: XML node entry :return: list of :class:`pyuniprot.manager.models.Pmid` objects
[ "get", "models", ".", "Pmid", "objects", "from", "XML", "node", "entry" ]
python
train
scour-project/scour
scour/scour.py
https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L1885-L1894
def taint(taintedSet, taintedAttribute): u"""Adds an attribute to a set of attributes. Related attributes are also included.""" taintedSet.add(taintedAttribute) if taintedAttribute == 'marker': taintedSet |= set(['marker-start', 'marker-mid', 'marker-end']) if taintedAttribute in ['marker-start', 'marker-mid', 'marker-end']: taintedSet.add('marker') return taintedSet
[ "def", "taint", "(", "taintedSet", ",", "taintedAttribute", ")", ":", "taintedSet", ".", "add", "(", "taintedAttribute", ")", "if", "taintedAttribute", "==", "'marker'", ":", "taintedSet", "|=", "set", "(", "[", "'marker-start'", ",", "'marker-mid'", ",", "'marker-end'", "]", ")", "if", "taintedAttribute", "in", "[", "'marker-start'", ",", "'marker-mid'", ",", "'marker-end'", "]", ":", "taintedSet", ".", "add", "(", "'marker'", ")", "return", "taintedSet" ]
u"""Adds an attribute to a set of attributes. Related attributes are also included.
[ "u", "Adds", "an", "attribute", "to", "a", "set", "of", "attributes", "." ]
python
train
saltstack/salt
salt/states/azurearm_dns.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/azurearm_dns.py#L619-L683
def record_set_absent(name, zone_name, resource_group, connection_auth=None): ''' .. versionadded:: Fluorine Ensure a record set does not exist in the DNS zone. :param name: Name of the record set. :param zone_name: Name of the DNS zone. :param resource_group: The resource group assigned to the DNS zone. :param connection_auth: A dict with subscription and authentication parameters to be used in connecting to the Azure Resource Manager API. ''' ret = { 'name': name, 'result': False, 'comment': '', 'changes': {} } if not isinstance(connection_auth, dict): ret['comment'] = 'Connection information must be specified via connection_auth dictionary!' return ret rec_set = __salt__['azurearm_dns.record_set_get']( name, zone_name, resource_group, azurearm_log_level='info', **connection_auth ) if 'error' in rec_set: ret['result'] = True ret['comment'] = 'Record set {0} was not found in zone {1}.'.format(name, zone_name) return ret elif __opts__['test']: ret['comment'] = 'Record set {0} would be deleted.'.format(name) ret['result'] = None ret['changes'] = { 'old': rec_set, 'new': {}, } return ret deleted = __salt__['azurearm_dns.record_set_delete'](name, zone_name, resource_group, **connection_auth) if deleted: ret['result'] = True ret['comment'] = 'Record set {0} has been deleted.'.format(name) ret['changes'] = { 'old': rec_set, 'new': {} } return ret ret['comment'] = 'Failed to delete record set {0}!'.format(name) return ret
[ "def", "record_set_absent", "(", "name", ",", "zone_name", ",", "resource_group", ",", "connection_auth", "=", "None", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "False", ",", "'comment'", ":", "''", ",", "'changes'", ":", "{", "}", "}", "if", "not", "isinstance", "(", "connection_auth", ",", "dict", ")", ":", "ret", "[", "'comment'", "]", "=", "'Connection information must be specified via connection_auth dictionary!'", "return", "ret", "rec_set", "=", "__salt__", "[", "'azurearm_dns.record_set_get'", "]", "(", "name", ",", "zone_name", ",", "resource_group", ",", "azurearm_log_level", "=", "'info'", ",", "*", "*", "connection_auth", ")", "if", "'error'", "in", "rec_set", ":", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'comment'", "]", "=", "'Record set {0} was not found in zone {1}.'", ".", "format", "(", "name", ",", "zone_name", ")", "return", "ret", "elif", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'comment'", "]", "=", "'Record set {0} would be deleted.'", ".", "format", "(", "name", ")", "ret", "[", "'result'", "]", "=", "None", "ret", "[", "'changes'", "]", "=", "{", "'old'", ":", "rec_set", ",", "'new'", ":", "{", "}", ",", "}", "return", "ret", "deleted", "=", "__salt__", "[", "'azurearm_dns.record_set_delete'", "]", "(", "name", ",", "zone_name", ",", "resource_group", ",", "*", "*", "connection_auth", ")", "if", "deleted", ":", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'comment'", "]", "=", "'Record set {0} has been deleted.'", ".", "format", "(", "name", ")", "ret", "[", "'changes'", "]", "=", "{", "'old'", ":", "rec_set", ",", "'new'", ":", "{", "}", "}", "return", "ret", "ret", "[", "'comment'", "]", "=", "'Failed to delete record set {0}!'", ".", "format", "(", "name", ")", "return", "ret" ]
.. versionadded:: Fluorine Ensure a record set does not exist in the DNS zone. :param name: Name of the record set. :param zone_name: Name of the DNS zone. :param resource_group: The resource group assigned to the DNS zone. :param connection_auth: A dict with subscription and authentication parameters to be used in connecting to the Azure Resource Manager API.
[ "..", "versionadded", "::", "Fluorine" ]
python
train
dedupeio/dedupe
dedupe/canonical.py
https://github.com/dedupeio/dedupe/blob/9f7c9f84473a4bcacf0f2b11152d8ed3eb35d48b/dedupe/canonical.py#L48-L71
def getCanonicalRep(record_cluster): """ Given a list of records within a duplicate cluster, constructs a canonical representation of the cluster by finding canonical values for each field """ canonical_rep = {} keys = record_cluster[0].keys() for key in keys: key_values = [] for record in record_cluster: # assume non-empty values always better than empty value # for canonical record if record[key]: key_values.append(record[key]) if key_values: canonical_rep[key] = getCentroid(key_values, comparator) else: canonical_rep[key] = '' return canonical_rep
[ "def", "getCanonicalRep", "(", "record_cluster", ")", ":", "canonical_rep", "=", "{", "}", "keys", "=", "record_cluster", "[", "0", "]", ".", "keys", "(", ")", "for", "key", "in", "keys", ":", "key_values", "=", "[", "]", "for", "record", "in", "record_cluster", ":", "# assume non-empty values always better than empty value", "# for canonical record", "if", "record", "[", "key", "]", ":", "key_values", ".", "append", "(", "record", "[", "key", "]", ")", "if", "key_values", ":", "canonical_rep", "[", "key", "]", "=", "getCentroid", "(", "key_values", ",", "comparator", ")", "else", ":", "canonical_rep", "[", "key", "]", "=", "''", "return", "canonical_rep" ]
Given a list of records within a duplicate cluster, constructs a canonical representation of the cluster by finding canonical values for each field
[ "Given", "a", "list", "of", "records", "within", "a", "duplicate", "cluster", "constructs", "a", "canonical", "representation", "of", "the", "cluster", "by", "finding", "canonical", "values", "for", "each", "field" ]
python
train
tensorflow/cleverhans
cleverhans/attacks/bapp.py
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attacks/bapp.py#L353-L355
def clip_image(image, clip_min, clip_max): """ Clip an image, or an image batch, with upper and lower threshold. """ return np.minimum(np.maximum(clip_min, image), clip_max)
[ "def", "clip_image", "(", "image", ",", "clip_min", ",", "clip_max", ")", ":", "return", "np", ".", "minimum", "(", "np", ".", "maximum", "(", "clip_min", ",", "image", ")", ",", "clip_max", ")" ]
Clip an image, or an image batch, with upper and lower threshold.
[ "Clip", "an", "image", "or", "an", "image", "batch", "with", "upper", "and", "lower", "threshold", "." ]
python
train
ethereum/py-evm
eth/vm/logic/arithmetic.py
https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/vm/logic/arithmetic.py#L214-L226
def sar(computation: BaseComputation) -> None: """ Arithmetic bitwise right shift """ shift_length, value = computation.stack_pop(num_items=2, type_hint=constants.UINT256) value = unsigned_to_signed(value) if shift_length >= 256: result = 0 if value >= 0 else constants.UINT_255_NEGATIVE_ONE else: result = (value >> shift_length) & constants.UINT_256_MAX computation.stack_push(result)
[ "def", "sar", "(", "computation", ":", "BaseComputation", ")", "->", "None", ":", "shift_length", ",", "value", "=", "computation", ".", "stack_pop", "(", "num_items", "=", "2", ",", "type_hint", "=", "constants", ".", "UINT256", ")", "value", "=", "unsigned_to_signed", "(", "value", ")", "if", "shift_length", ">=", "256", ":", "result", "=", "0", "if", "value", ">=", "0", "else", "constants", ".", "UINT_255_NEGATIVE_ONE", "else", ":", "result", "=", "(", "value", ">>", "shift_length", ")", "&", "constants", ".", "UINT_256_MAX", "computation", ".", "stack_push", "(", "result", ")" ]
Arithmetic bitwise right shift
[ "Arithmetic", "bitwise", "right", "shift" ]
python
train
pkgw/pwkit
pwkit/io.py
https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/io.py#L189-L200
def ensure_symlink (src, dst): """Ensure the existence of a symbolic link pointing to src named dst. Returns a boolean indicating whether the symlink already existed. """ try: os.symlink (src, dst) except OSError as e: if e.errno == 17: # EEXIST return True raise return False
[ "def", "ensure_symlink", "(", "src", ",", "dst", ")", ":", "try", ":", "os", ".", "symlink", "(", "src", ",", "dst", ")", "except", "OSError", "as", "e", ":", "if", "e", ".", "errno", "==", "17", ":", "# EEXIST", "return", "True", "raise", "return", "False" ]
Ensure the existence of a symbolic link pointing to src named dst. Returns a boolean indicating whether the symlink already existed.
[ "Ensure", "the", "existence", "of", "a", "symbolic", "link", "pointing", "to", "src", "named", "dst", ".", "Returns", "a", "boolean", "indicating", "whether", "the", "symlink", "already", "existed", "." ]
python
train
oceanprotocol/squid-py
squid_py/agreements/storage.py
https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/agreements/storage.py#L63-L85
def get_service_agreements(storage_path, status='pending'): """ Get service agreements pending to be executed. :param storage_path: storage path for the internal db, str :param status: :return: """ conn = sqlite3.connect(storage_path) try: cursor = conn.cursor() return [ row for row in cursor.execute( ''' SELECT id, did, service_definition_id, price, files, start_time, status FROM service_agreements WHERE status=?; ''', (status,)) ] finally: conn.close()
[ "def", "get_service_agreements", "(", "storage_path", ",", "status", "=", "'pending'", ")", ":", "conn", "=", "sqlite3", ".", "connect", "(", "storage_path", ")", "try", ":", "cursor", "=", "conn", ".", "cursor", "(", ")", "return", "[", "row", "for", "row", "in", "cursor", ".", "execute", "(", "'''\n SELECT id, did, service_definition_id, price, files, start_time, status\n FROM service_agreements \n WHERE status=?;\n '''", ",", "(", "status", ",", ")", ")", "]", "finally", ":", "conn", ".", "close", "(", ")" ]
Get service agreements pending to be executed. :param storage_path: storage path for the internal db, str :param status: :return:
[ "Get", "service", "agreements", "pending", "to", "be", "executed", "." ]
python
train
RPi-Distro/python-gpiozero
gpiozero/pins/data.py
https://github.com/RPi-Distro/python-gpiozero/blob/7b67374fd0c8c4fde5586d9bad9531f076db9c0c/gpiozero/pins/data.py#L1121-L1139
def physical_pin(self, function): """ Return the physical pin supporting the specified *function*. If no pins support the desired *function*, this function raises :exc:`PinNoPins`. If multiple pins support the desired *function*, :exc:`PinMultiplePins` will be raised (use :func:`physical_pins` if you expect multiple pins in the result, such as for electrical ground). :param str function: The pin function you wish to search for. Usually this is something like "GPIO9" for Broadcom GPIO pin 9. """ result = self.physical_pins(function) if len(result) > 1: raise PinMultiplePins('multiple pins can be used for %s' % function) elif result: return result.pop() else: raise PinNoPins('no pins can be used for %s' % function)
[ "def", "physical_pin", "(", "self", ",", "function", ")", ":", "result", "=", "self", ".", "physical_pins", "(", "function", ")", "if", "len", "(", "result", ")", ">", "1", ":", "raise", "PinMultiplePins", "(", "'multiple pins can be used for %s'", "%", "function", ")", "elif", "result", ":", "return", "result", ".", "pop", "(", ")", "else", ":", "raise", "PinNoPins", "(", "'no pins can be used for %s'", "%", "function", ")" ]
Return the physical pin supporting the specified *function*. If no pins support the desired *function*, this function raises :exc:`PinNoPins`. If multiple pins support the desired *function*, :exc:`PinMultiplePins` will be raised (use :func:`physical_pins` if you expect multiple pins in the result, such as for electrical ground). :param str function: The pin function you wish to search for. Usually this is something like "GPIO9" for Broadcom GPIO pin 9.
[ "Return", "the", "physical", "pin", "supporting", "the", "specified", "*", "function", "*", ".", "If", "no", "pins", "support", "the", "desired", "*", "function", "*", "this", "function", "raises", ":", "exc", ":", "PinNoPins", ".", "If", "multiple", "pins", "support", "the", "desired", "*", "function", "*", ":", "exc", ":", "PinMultiplePins", "will", "be", "raised", "(", "use", ":", "func", ":", "physical_pins", "if", "you", "expect", "multiple", "pins", "in", "the", "result", "such", "as", "for", "electrical", "ground", ")", "." ]
python
train
kamikaze/webdav
src/webdav/client.py
https://github.com/kamikaze/webdav/blob/6facff7224023d3e28c8e1592f3c58401c91a0e6/src/webdav/client.py#L852-L863
def parse_get_list_response(content): """Parses of response content XML from WebDAV server and extract file and directory names. :param content: the XML content of HTTP response from WebDAV server for getting list of files by remote path. :return: list of extracted file or directory names. """ try: tree = etree.fromstring(content) hrees = [Urn.separate + unquote(urlsplit(hree.text).path) for hree in tree.findall('.//{DAV:}href')] return [Urn(hree) for hree in hrees] except etree.XMLSyntaxError: return list()
[ "def", "parse_get_list_response", "(", "content", ")", ":", "try", ":", "tree", "=", "etree", ".", "fromstring", "(", "content", ")", "hrees", "=", "[", "Urn", ".", "separate", "+", "unquote", "(", "urlsplit", "(", "hree", ".", "text", ")", ".", "path", ")", "for", "hree", "in", "tree", ".", "findall", "(", "'.//{DAV:}href'", ")", "]", "return", "[", "Urn", "(", "hree", ")", "for", "hree", "in", "hrees", "]", "except", "etree", ".", "XMLSyntaxError", ":", "return", "list", "(", ")" ]
Parses of response content XML from WebDAV server and extract file and directory names. :param content: the XML content of HTTP response from WebDAV server for getting list of files by remote path. :return: list of extracted file or directory names.
[ "Parses", "of", "response", "content", "XML", "from", "WebDAV", "server", "and", "extract", "file", "and", "directory", "names", "." ]
python
train
OCHA-DAP/hdx-python-utilities
src/hdx/utilities/dictandlist.py
https://github.com/OCHA-DAP/hdx-python-utilities/blob/9c89e0aa5afac2c002b02a2d8f0e5b91eeb3d2a3/src/hdx/utilities/dictandlist.py#L16-L63
def merge_two_dictionaries(a, b, merge_lists=False): # type: (DictUpperBound, DictUpperBound, bool) -> DictUpperBound """Merges b into a and returns merged result NOTE: tuples and arbitrary objects are not handled as it is totally ambiguous what should happen Args: a (DictUpperBound): dictionary to merge into b (DictUpperBound): dictionary to merge from merge_lists (bool): Whether to merge lists (True) or replace lists (False). Default is False. Returns: DictUpperBound: Merged dictionary """ key = None # ## debug output # sys.stderr.write('DEBUG: %s to %s\n' %(b,a)) try: if a is None or isinstance(a, (six.string_types, six.text_type, six.integer_types, float)): # border case for first run or if a is a primitive a = b elif isinstance(a, list): # lists can be appended or replaced if isinstance(b, list): if merge_lists: # merge lists a.extend(b) else: # replace list a = b else: # append to list a.append(b) elif isinstance(a, (dict, UserDict)): # dicts must be merged if isinstance(b, (dict, UserDict)): for key in b: if key in a: a[key] = merge_two_dictionaries(a[key], b[key], merge_lists=merge_lists) else: a[key] = b[key] else: raise ValueError('Cannot merge non-dict "%s" into dict "%s"' % (b, a)) else: raise ValueError('NOT IMPLEMENTED "%s" into "%s"' % (b, a)) except TypeError as e: raise ValueError('TypeError "%s" in key "%s" when merging "%s" into "%s"' % (e, key, b, a)) return a
[ "def", "merge_two_dictionaries", "(", "a", ",", "b", ",", "merge_lists", "=", "False", ")", ":", "# type: (DictUpperBound, DictUpperBound, bool) -> DictUpperBound", "key", "=", "None", "# ## debug output", "# sys.stderr.write('DEBUG: %s to %s\\n' %(b,a))", "try", ":", "if", "a", "is", "None", "or", "isinstance", "(", "a", ",", "(", "six", ".", "string_types", ",", "six", ".", "text_type", ",", "six", ".", "integer_types", ",", "float", ")", ")", ":", "# border case for first run or if a is a primitive", "a", "=", "b", "elif", "isinstance", "(", "a", ",", "list", ")", ":", "# lists can be appended or replaced", "if", "isinstance", "(", "b", ",", "list", ")", ":", "if", "merge_lists", ":", "# merge lists", "a", ".", "extend", "(", "b", ")", "else", ":", "# replace list", "a", "=", "b", "else", ":", "# append to list", "a", ".", "append", "(", "b", ")", "elif", "isinstance", "(", "a", ",", "(", "dict", ",", "UserDict", ")", ")", ":", "# dicts must be merged", "if", "isinstance", "(", "b", ",", "(", "dict", ",", "UserDict", ")", ")", ":", "for", "key", "in", "b", ":", "if", "key", "in", "a", ":", "a", "[", "key", "]", "=", "merge_two_dictionaries", "(", "a", "[", "key", "]", ",", "b", "[", "key", "]", ",", "merge_lists", "=", "merge_lists", ")", "else", ":", "a", "[", "key", "]", "=", "b", "[", "key", "]", "else", ":", "raise", "ValueError", "(", "'Cannot merge non-dict \"%s\" into dict \"%s\"'", "%", "(", "b", ",", "a", ")", ")", "else", ":", "raise", "ValueError", "(", "'NOT IMPLEMENTED \"%s\" into \"%s\"'", "%", "(", "b", ",", "a", ")", ")", "except", "TypeError", "as", "e", ":", "raise", "ValueError", "(", "'TypeError \"%s\" in key \"%s\" when merging \"%s\" into \"%s\"'", "%", "(", "e", ",", "key", ",", "b", ",", "a", ")", ")", "return", "a" ]
Merges b into a and returns merged result NOTE: tuples and arbitrary objects are not handled as it is totally ambiguous what should happen Args: a (DictUpperBound): dictionary to merge into b (DictUpperBound): dictionary to merge from merge_lists (bool): Whether to merge lists (True) or replace lists (False). Default is False. Returns: DictUpperBound: Merged dictionary
[ "Merges", "b", "into", "a", "and", "returns", "merged", "result" ]
python
train
ArduPilot/MAVProxy
MAVProxy/modules/mavproxy_wp.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_wp.py#L190-L203
def idle_task(self): '''handle missing waypoints''' if self.wp_period.trigger(): # cope with packet loss fetching mission if self.master is not None and self.master.time_since('MISSION_ITEM') >= 2 and self.wploader.count() < getattr(self.wploader,'expected_count',0): wps = self.missing_wps_to_request(); print("re-requesting WPs %s" % str(wps)) self.send_wp_requests(wps) if self.module('console') is not None and not self.menu_added_console: self.menu_added_console = True self.module('console').add_menu(self.menu) if self.module('map') is not None and not self.menu_added_map: self.menu_added_map = True self.module('map').add_menu(self.menu)
[ "def", "idle_task", "(", "self", ")", ":", "if", "self", ".", "wp_period", ".", "trigger", "(", ")", ":", "# cope with packet loss fetching mission", "if", "self", ".", "master", "is", "not", "None", "and", "self", ".", "master", ".", "time_since", "(", "'MISSION_ITEM'", ")", ">=", "2", "and", "self", ".", "wploader", ".", "count", "(", ")", "<", "getattr", "(", "self", ".", "wploader", ",", "'expected_count'", ",", "0", ")", ":", "wps", "=", "self", ".", "missing_wps_to_request", "(", ")", "print", "(", "\"re-requesting WPs %s\"", "%", "str", "(", "wps", ")", ")", "self", ".", "send_wp_requests", "(", "wps", ")", "if", "self", ".", "module", "(", "'console'", ")", "is", "not", "None", "and", "not", "self", ".", "menu_added_console", ":", "self", ".", "menu_added_console", "=", "True", "self", ".", "module", "(", "'console'", ")", ".", "add_menu", "(", "self", ".", "menu", ")", "if", "self", ".", "module", "(", "'map'", ")", "is", "not", "None", "and", "not", "self", ".", "menu_added_map", ":", "self", ".", "menu_added_map", "=", "True", "self", ".", "module", "(", "'map'", ")", ".", "add_menu", "(", "self", ".", "menu", ")" ]
handle missing waypoints
[ "handle", "missing", "waypoints" ]
python
train
click-contrib/click-configfile
click_configfile.py
https://github.com/click-contrib/click-configfile/blob/a616204cb9944125fd5051556f27a7ccef611e22/click_configfile.py#L308-L320
def select_config_sections(configfile_sections, desired_section_patterns): """Select a subset of the sections in a configuration file by using a list of section names of list of section name patters (supporting :mod:`fnmatch` wildcards). :param configfile_sections: List of config section names (as strings). :param desired_section_patterns: :return: List of selected section names or empty list (as generator). """ for section_name in configfile_sections: for desired_section_pattern in desired_section_patterns: if fnmatch(section_name, desired_section_pattern): yield section_name
[ "def", "select_config_sections", "(", "configfile_sections", ",", "desired_section_patterns", ")", ":", "for", "section_name", "in", "configfile_sections", ":", "for", "desired_section_pattern", "in", "desired_section_patterns", ":", "if", "fnmatch", "(", "section_name", ",", "desired_section_pattern", ")", ":", "yield", "section_name" ]
Select a subset of the sections in a configuration file by using a list of section names of list of section name patters (supporting :mod:`fnmatch` wildcards). :param configfile_sections: List of config section names (as strings). :param desired_section_patterns: :return: List of selected section names or empty list (as generator).
[ "Select", "a", "subset", "of", "the", "sections", "in", "a", "configuration", "file", "by", "using", "a", "list", "of", "section", "names", "of", "list", "of", "section", "name", "patters", "(", "supporting", ":", "mod", ":", "fnmatch", "wildcards", ")", "." ]
python
train
wonambi-python/wonambi
wonambi/widgets/channels.py
https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/widgets/channels.py#L442-L448
def read_group_info(self): """Get information about groups directly from the widget.""" self.groups = [] for i in range(self.tabs.count()): one_group = self.tabs.widget(i).get_info() # one_group['name'] = self.tabs.tabText(i) self.groups.append(one_group)
[ "def", "read_group_info", "(", "self", ")", ":", "self", ".", "groups", "=", "[", "]", "for", "i", "in", "range", "(", "self", ".", "tabs", ".", "count", "(", ")", ")", ":", "one_group", "=", "self", ".", "tabs", ".", "widget", "(", "i", ")", ".", "get_info", "(", ")", "# one_group['name'] = self.tabs.tabText(i)", "self", ".", "groups", ".", "append", "(", "one_group", ")" ]
Get information about groups directly from the widget.
[ "Get", "information", "about", "groups", "directly", "from", "the", "widget", "." ]
python
train
tradenity/python-sdk
tradenity/resources/cash_on_delivery_payment.py
https://github.com/tradenity/python-sdk/blob/d13fbe23f4d6ff22554c6d8d2deaf209371adaf1/tradenity/resources/cash_on_delivery_payment.py#L700-L721
def replace_cash_on_delivery_payment_by_id(cls, cash_on_delivery_payment_id, cash_on_delivery_payment, **kwargs): """Replace CashOnDeliveryPayment Replace all attributes of CashOnDeliveryPayment This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_cash_on_delivery_payment_by_id(cash_on_delivery_payment_id, cash_on_delivery_payment, async=True) >>> result = thread.get() :param async bool :param str cash_on_delivery_payment_id: ID of cashOnDeliveryPayment to replace (required) :param CashOnDeliveryPayment cash_on_delivery_payment: Attributes of cashOnDeliveryPayment to replace (required) :return: CashOnDeliveryPayment If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_cash_on_delivery_payment_by_id_with_http_info(cash_on_delivery_payment_id, cash_on_delivery_payment, **kwargs) else: (data) = cls._replace_cash_on_delivery_payment_by_id_with_http_info(cash_on_delivery_payment_id, cash_on_delivery_payment, **kwargs) return data
[ "def", "replace_cash_on_delivery_payment_by_id", "(", "cls", ",", "cash_on_delivery_payment_id", ",", "cash_on_delivery_payment", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async'", ")", ":", "return", "cls", ".", "_replace_cash_on_delivery_payment_by_id_with_http_info", "(", "cash_on_delivery_payment_id", ",", "cash_on_delivery_payment", ",", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "cls", ".", "_replace_cash_on_delivery_payment_by_id_with_http_info", "(", "cash_on_delivery_payment_id", ",", "cash_on_delivery_payment", ",", "*", "*", "kwargs", ")", "return", "data" ]
Replace CashOnDeliveryPayment Replace all attributes of CashOnDeliveryPayment This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_cash_on_delivery_payment_by_id(cash_on_delivery_payment_id, cash_on_delivery_payment, async=True) >>> result = thread.get() :param async bool :param str cash_on_delivery_payment_id: ID of cashOnDeliveryPayment to replace (required) :param CashOnDeliveryPayment cash_on_delivery_payment: Attributes of cashOnDeliveryPayment to replace (required) :return: CashOnDeliveryPayment If the method is called asynchronously, returns the request thread.
[ "Replace", "CashOnDeliveryPayment" ]
python
train
mfitzp/padua
padua/visualize.py
https://github.com/mfitzp/padua/blob/8b14bf4d2f895da6aea5d7885d409315bd303ec6/padua/visualize.py#L1438-L1491
def comparedist(df, *args, **kwargs): """ Compare the distributions of two DataFrames giving visualisations of: - individual and combined distributions - distribution of non-common values - distribution of non-common values vs. each side Plot distribution as area (fill_between) + mean, median vertical bars. :param df1: `pandas.DataFrame` :param arg0: the base dataframe *selector* to perform calculation against. :param *: a number of `pandas.DataFrames` selectors to compare to arg0 :param bins: `int` number of bins for histogram :param xlabel: label for X axis :param ylabel: label for Y axis :param base_fmt: Text format to use for base selector legend. Python .format() syntax. :param arg_fmt: Text format to use for format, with selectors legend. Python .format() syntax. :return: Figure """ bins = kwargs.get('bins', 50) xlabel = kwargs.get('xlabel', 'Value') ylabel = kwargs.get('ylabel', 'Count') base_fmt = kwargs.get('base_fmt') arg_fmt = kwargs.get('arg_fmt') # The base for comparisons is the first passed selector. base_selector, selectors = args[0], args[1:] df1 = df[base_selector] fig, axes = plt.subplots(len(selectors), 1, figsize=(10, len(selectors) * 5)) if not isinstance(axes, np.ndarray): axes = [axes] # mpl returns a single object when only one. for n, (ax1, selector) in enumerate(zip(axes, selectors)): dfn = df[selector] xr = np.nanmin( [np.nanmin(df1), np.nanmin(dfn)] ), np.nanmax( [np.nanmax(df1), np.nanmax(dfn)] ) ax1.set_title('Distributions of %s and %s' % (base_selector, selector)) _areadist(ax1, dfn.values, xr, c='r', bins=bins, label=format_label(base_selector, base_fmt)) _areadist(ax1, df1.values, xr, c='k', bins=bins, alpha=0.3, label=format_label(selector, arg_fmt)) ax1.set_xlabel(xlabel) ax1.set_ylabel(ylabel) _, ymax = ax1.get_ylim() ax1.set_ylim(0, ymax) ax1.legend(loc='upper right') fig.tight_layout() return fig
[ "def", "comparedist", "(", "df", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "bins", "=", "kwargs", ".", "get", "(", "'bins'", ",", "50", ")", "xlabel", "=", "kwargs", ".", "get", "(", "'xlabel'", ",", "'Value'", ")", "ylabel", "=", "kwargs", ".", "get", "(", "'ylabel'", ",", "'Count'", ")", "base_fmt", "=", "kwargs", ".", "get", "(", "'base_fmt'", ")", "arg_fmt", "=", "kwargs", ".", "get", "(", "'arg_fmt'", ")", "# The base for comparisons is the first passed selector.", "base_selector", ",", "selectors", "=", "args", "[", "0", "]", ",", "args", "[", "1", ":", "]", "df1", "=", "df", "[", "base_selector", "]", "fig", ",", "axes", "=", "plt", ".", "subplots", "(", "len", "(", "selectors", ")", ",", "1", ",", "figsize", "=", "(", "10", ",", "len", "(", "selectors", ")", "*", "5", ")", ")", "if", "not", "isinstance", "(", "axes", ",", "np", ".", "ndarray", ")", ":", "axes", "=", "[", "axes", "]", "# mpl returns a single object when only one.", "for", "n", ",", "(", "ax1", ",", "selector", ")", "in", "enumerate", "(", "zip", "(", "axes", ",", "selectors", ")", ")", ":", "dfn", "=", "df", "[", "selector", "]", "xr", "=", "np", ".", "nanmin", "(", "[", "np", ".", "nanmin", "(", "df1", ")", ",", "np", ".", "nanmin", "(", "dfn", ")", "]", ")", ",", "np", ".", "nanmax", "(", "[", "np", ".", "nanmax", "(", "df1", ")", ",", "np", ".", "nanmax", "(", "dfn", ")", "]", ")", "ax1", ".", "set_title", "(", "'Distributions of %s and %s'", "%", "(", "base_selector", ",", "selector", ")", ")", "_areadist", "(", "ax1", ",", "dfn", ".", "values", ",", "xr", ",", "c", "=", "'r'", ",", "bins", "=", "bins", ",", "label", "=", "format_label", "(", "base_selector", ",", "base_fmt", ")", ")", "_areadist", "(", "ax1", ",", "df1", ".", "values", ",", "xr", ",", "c", "=", "'k'", ",", "bins", "=", "bins", ",", "alpha", "=", "0.3", ",", "label", "=", "format_label", "(", "selector", ",", "arg_fmt", ")", ")", "ax1", ".", "set_xlabel", "(", "xlabel", ")", "ax1", ".", "set_ylabel", "(", "ylabel", ")", "_", ",", "ymax", "=", "ax1", ".", "get_ylim", "(", ")", "ax1", ".", "set_ylim", "(", "0", ",", "ymax", ")", "ax1", ".", "legend", "(", "loc", "=", "'upper right'", ")", "fig", ".", "tight_layout", "(", ")", "return", "fig" ]
Compare the distributions of two DataFrames giving visualisations of: - individual and combined distributions - distribution of non-common values - distribution of non-common values vs. each side Plot distribution as area (fill_between) + mean, median vertical bars. :param df1: `pandas.DataFrame` :param arg0: the base dataframe *selector* to perform calculation against. :param *: a number of `pandas.DataFrames` selectors to compare to arg0 :param bins: `int` number of bins for histogram :param xlabel: label for X axis :param ylabel: label for Y axis :param base_fmt: Text format to use for base selector legend. Python .format() syntax. :param arg_fmt: Text format to use for format, with selectors legend. Python .format() syntax. :return: Figure
[ "Compare", "the", "distributions", "of", "two", "DataFrames", "giving", "visualisations", "of", ":", "-", "individual", "and", "combined", "distributions", "-", "distribution", "of", "non", "-", "common", "values", "-", "distribution", "of", "non", "-", "common", "values", "vs", ".", "each", "side" ]
python
train
ladybug-tools/ladybug
ladybug/_datacollectionbase.py
https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/_datacollectionbase.py#L137-L140
def convert_to_si(self): """Convert the Data Collection to SI units.""" self._values, self._header._unit = self._header.data_type.to_si( self._values, self._header.unit)
[ "def", "convert_to_si", "(", "self", ")", ":", "self", ".", "_values", ",", "self", ".", "_header", ".", "_unit", "=", "self", ".", "_header", ".", "data_type", ".", "to_si", "(", "self", ".", "_values", ",", "self", ".", "_header", ".", "unit", ")" ]
Convert the Data Collection to SI units.
[ "Convert", "the", "Data", "Collection", "to", "SI", "units", "." ]
python
train
brutasse/graphite-api
graphite_api/functions.py
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L3857-L3963
def summarize(requestContext, seriesList, intervalString, func='sum', alignToFrom=False): """ Summarize the data into interval buckets of a certain size. By default, the contents of each interval bucket are summed together. This is useful for counters where each increment represents a discrete event and retrieving a "per X" value requires summing all the events in that interval. Specifying 'avg' instead will return the mean for each bucket, which can be more useful when the value is a gauge that represents a certain value in time. 'max', 'min' or 'last' can also be specified. By default, buckets are calculated by rounding to the nearest interval. This works well for intervals smaller than a day. For example, 22:32 will end up in the bucket 22:00-23:00 when the interval=1hour. Passing alignToFrom=true will instead create buckets starting at the from time. In this case, the bucket for 22:32 depends on the from time. If from=6:30 then the 1hour bucket for 22:32 is 22:30-23:30. Example:: # total errors per hour &target=summarize(counter.errors, "1hour") # new users per week &target=summarize(nonNegativeDerivative(gauge.num_users), "1week") # average queue size per hour &target=summarize(queue.size, "1hour", "avg") # maximum queue size during each hour &target=summarize(queue.size, "1hour", "max") # 2010 Q1-4 &target=summarize(metric, "13week", "avg", true)&from=midnight+20100101 """ results = [] delta = parseTimeOffset(intervalString) interval = to_seconds(delta) for series in seriesList: buckets = {} timestamps = range(int(series.start), int(series.end) + 1, int(series.step)) datapoints = zip_longest(timestamps, series) for timestamp, value in datapoints: if timestamp is None: continue if alignToFrom: bucketInterval = int((timestamp - series.start) / interval) else: bucketInterval = timestamp - (timestamp % interval) if bucketInterval not in buckets: buckets[bucketInterval] = [] if value is not None: buckets[bucketInterval].append(value) if alignToFrom: newStart = series.start newEnd = series.end else: newStart = series.start - (series.start % interval) newEnd = series.end - (series.end % interval) + interval newValues = [] for timestamp in range(newStart, newEnd, interval): if alignToFrom: newEnd = timestamp bucketInterval = int((timestamp - series.start) / interval) else: bucketInterval = timestamp - (timestamp % interval) bucket = buckets.get(bucketInterval, []) if bucket: if func == 'avg': newValues.append(float(sum(bucket)) / float(len(bucket))) elif func == 'last': newValues.append(bucket[len(bucket)-1]) elif func == 'max': newValues.append(max(bucket)) elif func == 'min': newValues.append(min(bucket)) else: newValues.append(sum(bucket)) else: newValues.append(None) if alignToFrom: newEnd += interval newName = "summarize(%s, \"%s\", \"%s\"%s)" % ( series.name, intervalString, func, alignToFrom and ", true" or "") newSeries = TimeSeries(newName, newStart, newEnd, interval, newValues) newSeries.pathExpression = newName results.append(newSeries) return results
[ "def", "summarize", "(", "requestContext", ",", "seriesList", ",", "intervalString", ",", "func", "=", "'sum'", ",", "alignToFrom", "=", "False", ")", ":", "results", "=", "[", "]", "delta", "=", "parseTimeOffset", "(", "intervalString", ")", "interval", "=", "to_seconds", "(", "delta", ")", "for", "series", "in", "seriesList", ":", "buckets", "=", "{", "}", "timestamps", "=", "range", "(", "int", "(", "series", ".", "start", ")", ",", "int", "(", "series", ".", "end", ")", "+", "1", ",", "int", "(", "series", ".", "step", ")", ")", "datapoints", "=", "zip_longest", "(", "timestamps", ",", "series", ")", "for", "timestamp", ",", "value", "in", "datapoints", ":", "if", "timestamp", "is", "None", ":", "continue", "if", "alignToFrom", ":", "bucketInterval", "=", "int", "(", "(", "timestamp", "-", "series", ".", "start", ")", "/", "interval", ")", "else", ":", "bucketInterval", "=", "timestamp", "-", "(", "timestamp", "%", "interval", ")", "if", "bucketInterval", "not", "in", "buckets", ":", "buckets", "[", "bucketInterval", "]", "=", "[", "]", "if", "value", "is", "not", "None", ":", "buckets", "[", "bucketInterval", "]", ".", "append", "(", "value", ")", "if", "alignToFrom", ":", "newStart", "=", "series", ".", "start", "newEnd", "=", "series", ".", "end", "else", ":", "newStart", "=", "series", ".", "start", "-", "(", "series", ".", "start", "%", "interval", ")", "newEnd", "=", "series", ".", "end", "-", "(", "series", ".", "end", "%", "interval", ")", "+", "interval", "newValues", "=", "[", "]", "for", "timestamp", "in", "range", "(", "newStart", ",", "newEnd", ",", "interval", ")", ":", "if", "alignToFrom", ":", "newEnd", "=", "timestamp", "bucketInterval", "=", "int", "(", "(", "timestamp", "-", "series", ".", "start", ")", "/", "interval", ")", "else", ":", "bucketInterval", "=", "timestamp", "-", "(", "timestamp", "%", "interval", ")", "bucket", "=", "buckets", ".", "get", "(", "bucketInterval", ",", "[", "]", ")", "if", "bucket", ":", "if", "func", "==", "'avg'", ":", "newValues", ".", "append", "(", "float", "(", "sum", "(", "bucket", ")", ")", "/", "float", "(", "len", "(", "bucket", ")", ")", ")", "elif", "func", "==", "'last'", ":", "newValues", ".", "append", "(", "bucket", "[", "len", "(", "bucket", ")", "-", "1", "]", ")", "elif", "func", "==", "'max'", ":", "newValues", ".", "append", "(", "max", "(", "bucket", ")", ")", "elif", "func", "==", "'min'", ":", "newValues", ".", "append", "(", "min", "(", "bucket", ")", ")", "else", ":", "newValues", ".", "append", "(", "sum", "(", "bucket", ")", ")", "else", ":", "newValues", ".", "append", "(", "None", ")", "if", "alignToFrom", ":", "newEnd", "+=", "interval", "newName", "=", "\"summarize(%s, \\\"%s\\\", \\\"%s\\\"%s)\"", "%", "(", "series", ".", "name", ",", "intervalString", ",", "func", ",", "alignToFrom", "and", "\", true\"", "or", "\"\"", ")", "newSeries", "=", "TimeSeries", "(", "newName", ",", "newStart", ",", "newEnd", ",", "interval", ",", "newValues", ")", "newSeries", ".", "pathExpression", "=", "newName", "results", ".", "append", "(", "newSeries", ")", "return", "results" ]
Summarize the data into interval buckets of a certain size. By default, the contents of each interval bucket are summed together. This is useful for counters where each increment represents a discrete event and retrieving a "per X" value requires summing all the events in that interval. Specifying 'avg' instead will return the mean for each bucket, which can be more useful when the value is a gauge that represents a certain value in time. 'max', 'min' or 'last' can also be specified. By default, buckets are calculated by rounding to the nearest interval. This works well for intervals smaller than a day. For example, 22:32 will end up in the bucket 22:00-23:00 when the interval=1hour. Passing alignToFrom=true will instead create buckets starting at the from time. In this case, the bucket for 22:32 depends on the from time. If from=6:30 then the 1hour bucket for 22:32 is 22:30-23:30. Example:: # total errors per hour &target=summarize(counter.errors, "1hour") # new users per week &target=summarize(nonNegativeDerivative(gauge.num_users), "1week") # average queue size per hour &target=summarize(queue.size, "1hour", "avg") # maximum queue size during each hour &target=summarize(queue.size, "1hour", "max") # 2010 Q1-4 &target=summarize(metric, "13week", "avg", true)&from=midnight+20100101
[ "Summarize", "the", "data", "into", "interval", "buckets", "of", "a", "certain", "size", "." ]
python
train
wheeler-microfluidics/dmf-control-board-firmware
dmf_control_board_firmware/calibrate/impedance_benchmarks.py
https://github.com/wheeler-microfluidics/dmf-control-board-firmware/blob/1cd8cc9a148d530f9a11f634f2dbfe73f08aa27c/dmf_control_board_firmware/calibrate/impedance_benchmarks.py#L207-L250
def plot_stat_summary(df, fig=None): ''' Plot stats grouped by test capacitor load _and_ frequency. In other words, we calculate the mean of all samples in the data frame for each test capacitance and frequency pairing, plotting the following stats: - Root mean squared error - Coefficient of variation - Bias ## [Coefficient of variation][1] ## > In probability theory and statistics, the coefficient of > variation (CV) is a normalized measure of dispersion of a > probability distribution or frequency distribution. It is defined > as the ratio of the standard deviation to the mean. [1]: http://en.wikipedia.org/wiki/Coefficient_of_variation ''' if fig is None: fig = plt.figure(figsize=(8, 8)) # Define a subplot layout, 3 rows, 2 columns grid = GridSpec(3, 2) stats = calculate_stats(df, groupby=['test_capacitor', 'frequency']).dropna() for i, stat in enumerate(['RMSE %', 'cv %', 'bias %']): axis = fig.add_subplot(grid[i, 0]) axis.set_title(stat) # Plot a colormap to show how the statistical value changes # according to frequency/capacitance pairs. plot_colormap(stats, stat, axis=axis, fig=fig) axis = fig.add_subplot(grid[i, 1]) axis.set_title(stat) # Plot a histogram to show the distribution of statistical # values across all frequency/capacitance pairs. try: axis.hist(stats[stat].values, bins=50) except AttributeError: print stats[stat].describe() fig.tight_layout()
[ "def", "plot_stat_summary", "(", "df", ",", "fig", "=", "None", ")", ":", "if", "fig", "is", "None", ":", "fig", "=", "plt", ".", "figure", "(", "figsize", "=", "(", "8", ",", "8", ")", ")", "# Define a subplot layout, 3 rows, 2 columns", "grid", "=", "GridSpec", "(", "3", ",", "2", ")", "stats", "=", "calculate_stats", "(", "df", ",", "groupby", "=", "[", "'test_capacitor'", ",", "'frequency'", "]", ")", ".", "dropna", "(", ")", "for", "i", ",", "stat", "in", "enumerate", "(", "[", "'RMSE %'", ",", "'cv %'", ",", "'bias %'", "]", ")", ":", "axis", "=", "fig", ".", "add_subplot", "(", "grid", "[", "i", ",", "0", "]", ")", "axis", ".", "set_title", "(", "stat", ")", "# Plot a colormap to show how the statistical value changes", "# according to frequency/capacitance pairs.", "plot_colormap", "(", "stats", ",", "stat", ",", "axis", "=", "axis", ",", "fig", "=", "fig", ")", "axis", "=", "fig", ".", "add_subplot", "(", "grid", "[", "i", ",", "1", "]", ")", "axis", ".", "set_title", "(", "stat", ")", "# Plot a histogram to show the distribution of statistical", "# values across all frequency/capacitance pairs.", "try", ":", "axis", ".", "hist", "(", "stats", "[", "stat", "]", ".", "values", ",", "bins", "=", "50", ")", "except", "AttributeError", ":", "print", "stats", "[", "stat", "]", ".", "describe", "(", ")", "fig", ".", "tight_layout", "(", ")" ]
Plot stats grouped by test capacitor load _and_ frequency. In other words, we calculate the mean of all samples in the data frame for each test capacitance and frequency pairing, plotting the following stats: - Root mean squared error - Coefficient of variation - Bias ## [Coefficient of variation][1] ## > In probability theory and statistics, the coefficient of > variation (CV) is a normalized measure of dispersion of a > probability distribution or frequency distribution. It is defined > as the ratio of the standard deviation to the mean. [1]: http://en.wikipedia.org/wiki/Coefficient_of_variation
[ "Plot", "stats", "grouped", "by", "test", "capacitor", "load", "_and_", "frequency", "." ]
python
train
Kozea/cairocffi
cairocffi/fonts.py
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/fonts.py#L404-L409
def copy(self): """Return a new :class:`FontOptions` with the same values.""" cls = type(self) other = object.__new__(cls) cls._init_pointer(other, cairo.cairo_font_options_copy(self._pointer)) return other
[ "def", "copy", "(", "self", ")", ":", "cls", "=", "type", "(", "self", ")", "other", "=", "object", ".", "__new__", "(", "cls", ")", "cls", ".", "_init_pointer", "(", "other", ",", "cairo", ".", "cairo_font_options_copy", "(", "self", ".", "_pointer", ")", ")", "return", "other" ]
Return a new :class:`FontOptions` with the same values.
[ "Return", "a", "new", ":", "class", ":", "FontOptions", "with", "the", "same", "values", "." ]
python
train
mgedmin/check-manifest
check_manifest.py
https://github.com/mgedmin/check-manifest/blob/7f787e8272f56c5750670bfb3223509e0df72708/check_manifest.py#L138-L160
def run(command, encoding=None, decode=True, cwd=None): """Run a command [cmd, arg1, arg2, ...]. Returns the output (stdout + stderr). Raises CommandFailed in cases of error. """ if not encoding: encoding = locale.getpreferredencoding() try: with open(os.devnull, 'rb') as devnull: pipe = subprocess.Popen(command, stdin=devnull, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=cwd) except OSError as e: raise Failure("could not run %s: %s" % (command, e)) output = pipe.communicate()[0] if decode: output = output.decode(encoding) status = pipe.wait() if status != 0: raise CommandFailed(command, status, output) return output
[ "def", "run", "(", "command", ",", "encoding", "=", "None", ",", "decode", "=", "True", ",", "cwd", "=", "None", ")", ":", "if", "not", "encoding", ":", "encoding", "=", "locale", ".", "getpreferredencoding", "(", ")", "try", ":", "with", "open", "(", "os", ".", "devnull", ",", "'rb'", ")", "as", "devnull", ":", "pipe", "=", "subprocess", ".", "Popen", "(", "command", ",", "stdin", "=", "devnull", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "STDOUT", ",", "cwd", "=", "cwd", ")", "except", "OSError", "as", "e", ":", "raise", "Failure", "(", "\"could not run %s: %s\"", "%", "(", "command", ",", "e", ")", ")", "output", "=", "pipe", ".", "communicate", "(", ")", "[", "0", "]", "if", "decode", ":", "output", "=", "output", ".", "decode", "(", "encoding", ")", "status", "=", "pipe", ".", "wait", "(", ")", "if", "status", "!=", "0", ":", "raise", "CommandFailed", "(", "command", ",", "status", ",", "output", ")", "return", "output" ]
Run a command [cmd, arg1, arg2, ...]. Returns the output (stdout + stderr). Raises CommandFailed in cases of error.
[ "Run", "a", "command", "[", "cmd", "arg1", "arg2", "...", "]", "." ]
python
train
coleifer/walrus
walrus/containers.py
https://github.com/coleifer/walrus/blob/82bf15a6613487b5b5fefeb488f186d7e0106547/walrus/containers.py#L1188-L1196
def trim(self, count, approximate=True): """ Trim the stream to the given "count" of messages, discarding the oldest messages first. :param count: maximum size of stream :param approximate: allow size to be approximate """ return self.database.xtrim(self.key, count, approximate)
[ "def", "trim", "(", "self", ",", "count", ",", "approximate", "=", "True", ")", ":", "return", "self", ".", "database", ".", "xtrim", "(", "self", ".", "key", ",", "count", ",", "approximate", ")" ]
Trim the stream to the given "count" of messages, discarding the oldest messages first. :param count: maximum size of stream :param approximate: allow size to be approximate
[ "Trim", "the", "stream", "to", "the", "given", "count", "of", "messages", "discarding", "the", "oldest", "messages", "first", "." ]
python
train
475Cumulus/TBone
tbone/resources/mongo.py
https://github.com/475Cumulus/TBone/blob/5a6672d8bbac449a0ab9e99560609f671fe84d4d/tbone/resources/mongo.py#L169-L181
async def update(self, **kwargs): ''' Corresponds to PUT request with a resource identifier, updating a single document in the database ''' try: self.data[self.pk] = self.pk_type(kwargs['pk']) updated_obj = await self._meta.object_class().update(self.db, data=self.data) if updated_obj is None: raise NotFound('Object matching the given {} was not found'.format(self.pk)) return await updated_obj.serialize() except Exception as ex: logger.exception(ex) raise BadRequest(ex)
[ "async", "def", "update", "(", "self", ",", "*", "*", "kwargs", ")", ":", "try", ":", "self", ".", "data", "[", "self", ".", "pk", "]", "=", "self", ".", "pk_type", "(", "kwargs", "[", "'pk'", "]", ")", "updated_obj", "=", "await", "self", ".", "_meta", ".", "object_class", "(", ")", ".", "update", "(", "self", ".", "db", ",", "data", "=", "self", ".", "data", ")", "if", "updated_obj", "is", "None", ":", "raise", "NotFound", "(", "'Object matching the given {} was not found'", ".", "format", "(", "self", ".", "pk", ")", ")", "return", "await", "updated_obj", ".", "serialize", "(", ")", "except", "Exception", "as", "ex", ":", "logger", ".", "exception", "(", "ex", ")", "raise", "BadRequest", "(", "ex", ")" ]
Corresponds to PUT request with a resource identifier, updating a single document in the database
[ "Corresponds", "to", "PUT", "request", "with", "a", "resource", "identifier", "updating", "a", "single", "document", "in", "the", "database" ]
python
train
ConsenSys/mythril-classic
mythril/laser/smt/bool.py
https://github.com/ConsenSys/mythril-classic/blob/27af71c34b2ce94f4fae5613ec457f93df1a8f56/mythril/laser/smt/bool.py#L84-L90
def And(*args: Union[Bool, bool]) -> Bool: """Create an And expression.""" union = [] args_list = [arg if isinstance(arg, Bool) else Bool(arg) for arg in args] for arg in args_list: union.append(arg.annotations) return Bool(z3.And([a.raw for a in args_list]), union)
[ "def", "And", "(", "*", "args", ":", "Union", "[", "Bool", ",", "bool", "]", ")", "->", "Bool", ":", "union", "=", "[", "]", "args_list", "=", "[", "arg", "if", "isinstance", "(", "arg", ",", "Bool", ")", "else", "Bool", "(", "arg", ")", "for", "arg", "in", "args", "]", "for", "arg", "in", "args_list", ":", "union", ".", "append", "(", "arg", ".", "annotations", ")", "return", "Bool", "(", "z3", ".", "And", "(", "[", "a", ".", "raw", "for", "a", "in", "args_list", "]", ")", ",", "union", ")" ]
Create an And expression.
[ "Create", "an", "And", "expression", "." ]
python
train
PythonCharmers/python-future
src/future/backports/datetime.py
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/datetime.py#L1208-L1215
def utcoffset(self): """Return the timezone offset in minutes east of UTC (negative west of UTC).""" if self._tzinfo is None: return None offset = self._tzinfo.utcoffset(None) _check_utc_offset("utcoffset", offset) return offset
[ "def", "utcoffset", "(", "self", ")", ":", "if", "self", ".", "_tzinfo", "is", "None", ":", "return", "None", "offset", "=", "self", ".", "_tzinfo", ".", "utcoffset", "(", "None", ")", "_check_utc_offset", "(", "\"utcoffset\"", ",", "offset", ")", "return", "offset" ]
Return the timezone offset in minutes east of UTC (negative west of UTC).
[ "Return", "the", "timezone", "offset", "in", "minutes", "east", "of", "UTC", "(", "negative", "west", "of", "UTC", ")", "." ]
python
train
saltstack/salt
salt/client/api.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/api.py#L236-L293
def create_token(self, creds): ''' Create token with creds. Token authorizes salt access if successful authentication with the credentials in creds. creds format is as follows: { 'username': 'namestring', 'password': 'passwordstring', 'eauth': 'eauthtypestring', } examples of valid eauth type strings: 'pam' or 'ldap' Returns dictionary of token information with the following format: { 'token': 'tokenstring', 'start': starttimeinfractionalseconds, 'expire': expiretimeinfractionalseconds, 'name': 'usernamestring', 'user': 'usernamestring', 'username': 'usernamestring', 'eauth': 'eauthtypestring', 'perms: permslistofstrings, } The perms list provides those parts of salt for which the user is authorised to execute. example perms list: [ "grains.*", "status.*", "sys.*", "test.*" ] ''' try: tokenage = self.resolver.mk_token(creds) except Exception as ex: raise EauthAuthenticationError( "Authentication failed with {0}.".format(repr(ex))) if 'token' not in tokenage: raise EauthAuthenticationError("Authentication failed with provided credentials.") # Grab eauth config for the current backend for the current user tokenage_eauth = self.opts['external_auth'][tokenage['eauth']] if tokenage['name'] in tokenage_eauth: tokenage['perms'] = tokenage_eauth[tokenage['name']] else: tokenage['perms'] = tokenage_eauth['*'] tokenage['user'] = tokenage['name'] tokenage['username'] = tokenage['name'] return tokenage
[ "def", "create_token", "(", "self", ",", "creds", ")", ":", "try", ":", "tokenage", "=", "self", ".", "resolver", ".", "mk_token", "(", "creds", ")", "except", "Exception", "as", "ex", ":", "raise", "EauthAuthenticationError", "(", "\"Authentication failed with {0}.\"", ".", "format", "(", "repr", "(", "ex", ")", ")", ")", "if", "'token'", "not", "in", "tokenage", ":", "raise", "EauthAuthenticationError", "(", "\"Authentication failed with provided credentials.\"", ")", "# Grab eauth config for the current backend for the current user", "tokenage_eauth", "=", "self", ".", "opts", "[", "'external_auth'", "]", "[", "tokenage", "[", "'eauth'", "]", "]", "if", "tokenage", "[", "'name'", "]", "in", "tokenage_eauth", ":", "tokenage", "[", "'perms'", "]", "=", "tokenage_eauth", "[", "tokenage", "[", "'name'", "]", "]", "else", ":", "tokenage", "[", "'perms'", "]", "=", "tokenage_eauth", "[", "'*'", "]", "tokenage", "[", "'user'", "]", "=", "tokenage", "[", "'name'", "]", "tokenage", "[", "'username'", "]", "=", "tokenage", "[", "'name'", "]", "return", "tokenage" ]
Create token with creds. Token authorizes salt access if successful authentication with the credentials in creds. creds format is as follows: { 'username': 'namestring', 'password': 'passwordstring', 'eauth': 'eauthtypestring', } examples of valid eauth type strings: 'pam' or 'ldap' Returns dictionary of token information with the following format: { 'token': 'tokenstring', 'start': starttimeinfractionalseconds, 'expire': expiretimeinfractionalseconds, 'name': 'usernamestring', 'user': 'usernamestring', 'username': 'usernamestring', 'eauth': 'eauthtypestring', 'perms: permslistofstrings, } The perms list provides those parts of salt for which the user is authorised to execute. example perms list: [ "grains.*", "status.*", "sys.*", "test.*" ]
[ "Create", "token", "with", "creds", ".", "Token", "authorizes", "salt", "access", "if", "successful", "authentication", "with", "the", "credentials", "in", "creds", ".", "creds", "format", "is", "as", "follows", ":" ]
python
train
sbg/sevenbridges-python
sevenbridges/models/volume.py
https://github.com/sbg/sevenbridges-python/blob/f62640d1018d959f0b686f2dbe5e183085336607/sevenbridges/models/volume.py#L167-L212
def create_oss_volume(cls, name, bucket, endpoint, access_key_id, secret_access_key, access_mode, description=None, prefix=None, properties=None, api=None): """ Create oss volume. :param name: Volume name. :param bucket: Referenced bucket. :param access_key_id: Access key identifier. :param secret_access_key: Secret access key. :param access_mode: Access Mode. :param endpoint: Volume Endpoint. :param description: Volume description. :param prefix: Volume prefix. :param properties: Volume properties. :param api: Api instance. :return: Volume object. """ service = { 'type': VolumeType.OSS, 'bucket': bucket, 'endpoint': endpoint, 'credentials': { 'access_key_id': access_key_id, 'secret_access_key': secret_access_key } } if prefix: service['prefix'] = prefix if properties: service['properties'] = properties data = { 'name': name, 'service': service, 'access_mode': access_mode } if description: data['description'] = description api = api or cls._API extra = { 'resource': cls.__name__, 'query': data } logger.info('Creating oss volume', extra=extra) response = api.post(url=cls._URL['query'], data=data).json() return Volume(api=api, **response)
[ "def", "create_oss_volume", "(", "cls", ",", "name", ",", "bucket", ",", "endpoint", ",", "access_key_id", ",", "secret_access_key", ",", "access_mode", ",", "description", "=", "None", ",", "prefix", "=", "None", ",", "properties", "=", "None", ",", "api", "=", "None", ")", ":", "service", "=", "{", "'type'", ":", "VolumeType", ".", "OSS", ",", "'bucket'", ":", "bucket", ",", "'endpoint'", ":", "endpoint", ",", "'credentials'", ":", "{", "'access_key_id'", ":", "access_key_id", ",", "'secret_access_key'", ":", "secret_access_key", "}", "}", "if", "prefix", ":", "service", "[", "'prefix'", "]", "=", "prefix", "if", "properties", ":", "service", "[", "'properties'", "]", "=", "properties", "data", "=", "{", "'name'", ":", "name", ",", "'service'", ":", "service", ",", "'access_mode'", ":", "access_mode", "}", "if", "description", ":", "data", "[", "'description'", "]", "=", "description", "api", "=", "api", "or", "cls", ".", "_API", "extra", "=", "{", "'resource'", ":", "cls", ".", "__name__", ",", "'query'", ":", "data", "}", "logger", ".", "info", "(", "'Creating oss volume'", ",", "extra", "=", "extra", ")", "response", "=", "api", ".", "post", "(", "url", "=", "cls", ".", "_URL", "[", "'query'", "]", ",", "data", "=", "data", ")", ".", "json", "(", ")", "return", "Volume", "(", "api", "=", "api", ",", "*", "*", "response", ")" ]
Create oss volume. :param name: Volume name. :param bucket: Referenced bucket. :param access_key_id: Access key identifier. :param secret_access_key: Secret access key. :param access_mode: Access Mode. :param endpoint: Volume Endpoint. :param description: Volume description. :param prefix: Volume prefix. :param properties: Volume properties. :param api: Api instance. :return: Volume object.
[ "Create", "oss", "volume", ".", ":", "param", "name", ":", "Volume", "name", ".", ":", "param", "bucket", ":", "Referenced", "bucket", ".", ":", "param", "access_key_id", ":", "Access", "key", "identifier", ".", ":", "param", "secret_access_key", ":", "Secret", "access", "key", ".", ":", "param", "access_mode", ":", "Access", "Mode", ".", ":", "param", "endpoint", ":", "Volume", "Endpoint", ".", ":", "param", "description", ":", "Volume", "description", ".", ":", "param", "prefix", ":", "Volume", "prefix", ".", ":", "param", "properties", ":", "Volume", "properties", ".", ":", "param", "api", ":", "Api", "instance", ".", ":", "return", ":", "Volume", "object", "." ]
python
train
spyder-ide/spyder
spyder/plugins/variableexplorer/widgets/dataframeeditor.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/variableexplorer/widgets/dataframeeditor.py#L269-L295
def get_bgcolor(self, index): """Background color depending on value.""" column = index.column() if not self.bgcolor_enabled: return value = self.get_value(index.row(), column) if self.max_min_col[column] is None or isna(value): color = QColor(BACKGROUND_NONNUMBER_COLOR) if is_text_string(value): color.setAlphaF(BACKGROUND_STRING_ALPHA) else: color.setAlphaF(BACKGROUND_MISC_ALPHA) else: if isinstance(value, COMPLEX_NUMBER_TYPES): color_func = abs else: color_func = float vmax, vmin = self.return_max(self.max_min_col, column) hue = (BACKGROUND_NUMBER_MINHUE + BACKGROUND_NUMBER_HUERANGE * (vmax - color_func(value)) / (vmax - vmin)) hue = float(abs(hue)) if hue > 1: hue = 1 color = QColor.fromHsvF(hue, BACKGROUND_NUMBER_SATURATION, BACKGROUND_NUMBER_VALUE, BACKGROUND_NUMBER_ALPHA) return color
[ "def", "get_bgcolor", "(", "self", ",", "index", ")", ":", "column", "=", "index", ".", "column", "(", ")", "if", "not", "self", ".", "bgcolor_enabled", ":", "return", "value", "=", "self", ".", "get_value", "(", "index", ".", "row", "(", ")", ",", "column", ")", "if", "self", ".", "max_min_col", "[", "column", "]", "is", "None", "or", "isna", "(", "value", ")", ":", "color", "=", "QColor", "(", "BACKGROUND_NONNUMBER_COLOR", ")", "if", "is_text_string", "(", "value", ")", ":", "color", ".", "setAlphaF", "(", "BACKGROUND_STRING_ALPHA", ")", "else", ":", "color", ".", "setAlphaF", "(", "BACKGROUND_MISC_ALPHA", ")", "else", ":", "if", "isinstance", "(", "value", ",", "COMPLEX_NUMBER_TYPES", ")", ":", "color_func", "=", "abs", "else", ":", "color_func", "=", "float", "vmax", ",", "vmin", "=", "self", ".", "return_max", "(", "self", ".", "max_min_col", ",", "column", ")", "hue", "=", "(", "BACKGROUND_NUMBER_MINHUE", "+", "BACKGROUND_NUMBER_HUERANGE", "*", "(", "vmax", "-", "color_func", "(", "value", ")", ")", "/", "(", "vmax", "-", "vmin", ")", ")", "hue", "=", "float", "(", "abs", "(", "hue", ")", ")", "if", "hue", ">", "1", ":", "hue", "=", "1", "color", "=", "QColor", ".", "fromHsvF", "(", "hue", ",", "BACKGROUND_NUMBER_SATURATION", ",", "BACKGROUND_NUMBER_VALUE", ",", "BACKGROUND_NUMBER_ALPHA", ")", "return", "color" ]
Background color depending on value.
[ "Background", "color", "depending", "on", "value", "." ]
python
train
tjcsl/cslbot
cslbot/commands/s.py
https://github.com/tjcsl/cslbot/blob/aebe07be47141f61d7c180706bddfb707f19b2b5/cslbot/commands/s.py#L80-L124
def cmd(send, msg, args): """Corrects a previous message. Syntax: {command}/<msg>/<replacement>/<ig|nick> """ if not msg: send("Invalid Syntax.") return char = msg[0] msg = [x.replace(r'\/', '/') for x in re.split(r'(?<!\\)\%s' % char, msg[1:], maxsplit=2)] # fix for people who forget a trailing slash if len(msg) == 2 and args['config']['feature'].getboolean('lazyregex'): msg.append('') # not a valid sed statement. if not msg or len(msg) < 3: send("Invalid Syntax.") return if args['type'] == 'privmsg': send("Don't worry, %s is not a grammar Nazi." % args['botnick']) return string = msg[0] replacement = msg[1] modifiers = get_modifiers(msg[2], args['nick'], args['config']['core']['nickregex']) if modifiers is None: send("Invalid modifiers.") return try: regex = re.compile(string, re.IGNORECASE) if modifiers['ignorecase'] else re.compile(string) log = get_log(args['db'], args['target'], modifiers['nick']) workers = args['handler'].workers result = workers.run_pool(do_replace, [log, args['config']['core'], char, regex, replacement]) try: msg = result.get(5) except multiprocessing.TimeoutError: workers.restart_pool() send("Sed regex timed out.") return if msg: send(msg) else: send("No match found.") except sre_constants.error as ex: raise CommandFailedException(ex)
[ "def", "cmd", "(", "send", ",", "msg", ",", "args", ")", ":", "if", "not", "msg", ":", "send", "(", "\"Invalid Syntax.\"", ")", "return", "char", "=", "msg", "[", "0", "]", "msg", "=", "[", "x", ".", "replace", "(", "r'\\/'", ",", "'/'", ")", "for", "x", "in", "re", ".", "split", "(", "r'(?<!\\\\)\\%s'", "%", "char", ",", "msg", "[", "1", ":", "]", ",", "maxsplit", "=", "2", ")", "]", "# fix for people who forget a trailing slash", "if", "len", "(", "msg", ")", "==", "2", "and", "args", "[", "'config'", "]", "[", "'feature'", "]", ".", "getboolean", "(", "'lazyregex'", ")", ":", "msg", ".", "append", "(", "''", ")", "# not a valid sed statement.", "if", "not", "msg", "or", "len", "(", "msg", ")", "<", "3", ":", "send", "(", "\"Invalid Syntax.\"", ")", "return", "if", "args", "[", "'type'", "]", "==", "'privmsg'", ":", "send", "(", "\"Don't worry, %s is not a grammar Nazi.\"", "%", "args", "[", "'botnick'", "]", ")", "return", "string", "=", "msg", "[", "0", "]", "replacement", "=", "msg", "[", "1", "]", "modifiers", "=", "get_modifiers", "(", "msg", "[", "2", "]", ",", "args", "[", "'nick'", "]", ",", "args", "[", "'config'", "]", "[", "'core'", "]", "[", "'nickregex'", "]", ")", "if", "modifiers", "is", "None", ":", "send", "(", "\"Invalid modifiers.\"", ")", "return", "try", ":", "regex", "=", "re", ".", "compile", "(", "string", ",", "re", ".", "IGNORECASE", ")", "if", "modifiers", "[", "'ignorecase'", "]", "else", "re", ".", "compile", "(", "string", ")", "log", "=", "get_log", "(", "args", "[", "'db'", "]", ",", "args", "[", "'target'", "]", ",", "modifiers", "[", "'nick'", "]", ")", "workers", "=", "args", "[", "'handler'", "]", ".", "workers", "result", "=", "workers", ".", "run_pool", "(", "do_replace", ",", "[", "log", ",", "args", "[", "'config'", "]", "[", "'core'", "]", ",", "char", ",", "regex", ",", "replacement", "]", ")", "try", ":", "msg", "=", "result", ".", "get", "(", "5", ")", "except", "multiprocessing", ".", "TimeoutError", ":", "workers", ".", "restart_pool", "(", ")", "send", "(", "\"Sed regex timed out.\"", ")", "return", "if", "msg", ":", "send", "(", "msg", ")", "else", ":", "send", "(", "\"No match found.\"", ")", "except", "sre_constants", ".", "error", "as", "ex", ":", "raise", "CommandFailedException", "(", "ex", ")" ]
Corrects a previous message. Syntax: {command}/<msg>/<replacement>/<ig|nick>
[ "Corrects", "a", "previous", "message", "." ]
python
train
wbond/oscrypto
oscrypto/_win/asymmetric.py
https://github.com/wbond/oscrypto/blob/af778bf1c88bf6c4a7342f5353b130686a5bbe1c/oscrypto/_win/asymmetric.py#L3054-L3123
def _bcrypt_encrypt(certificate_or_public_key, data, rsa_oaep_padding=False): """ Encrypts a value using an RSA public key via CNG :param certificate_or_public_key: A Certificate or PublicKey instance to encrypt with :param data: A byte string of the data to encrypt :param rsa_oaep_padding: If OAEP padding should be used instead of PKCS#1 v1.5 :raises: ValueError - when any of the parameters contain an invalid value TypeError - when any of the parameters are of the wrong type OSError - when an error is returned by the OS crypto library :return: A byte string of the ciphertext """ flags = BcryptConst.BCRYPT_PAD_PKCS1 if rsa_oaep_padding is True: flags = BcryptConst.BCRYPT_PAD_OAEP padding_info_struct_pointer = struct(bcrypt, 'BCRYPT_OAEP_PADDING_INFO') padding_info_struct = unwrap(padding_info_struct_pointer) # This has to be assigned to a variable to prevent cffi from gc'ing it hash_buffer = buffer_from_unicode(BcryptConst.BCRYPT_SHA1_ALGORITHM) padding_info_struct.pszAlgId = cast(bcrypt, 'wchar_t *', hash_buffer) padding_info_struct.pbLabel = null() padding_info_struct.cbLabel = 0 padding_info = cast(bcrypt, 'void *', padding_info_struct_pointer) else: padding_info = null() out_len = new(bcrypt, 'ULONG *') res = bcrypt.BCryptEncrypt( certificate_or_public_key.key_handle, data, len(data), padding_info, null(), 0, null(), 0, out_len, flags ) handle_error(res) buffer_len = deref(out_len) buffer = buffer_from_bytes(buffer_len) res = bcrypt.BCryptEncrypt( certificate_or_public_key.key_handle, data, len(data), padding_info, null(), 0, buffer, buffer_len, out_len, flags ) handle_error(res) return bytes_from_buffer(buffer, deref(out_len))
[ "def", "_bcrypt_encrypt", "(", "certificate_or_public_key", ",", "data", ",", "rsa_oaep_padding", "=", "False", ")", ":", "flags", "=", "BcryptConst", ".", "BCRYPT_PAD_PKCS1", "if", "rsa_oaep_padding", "is", "True", ":", "flags", "=", "BcryptConst", ".", "BCRYPT_PAD_OAEP", "padding_info_struct_pointer", "=", "struct", "(", "bcrypt", ",", "'BCRYPT_OAEP_PADDING_INFO'", ")", "padding_info_struct", "=", "unwrap", "(", "padding_info_struct_pointer", ")", "# This has to be assigned to a variable to prevent cffi from gc'ing it", "hash_buffer", "=", "buffer_from_unicode", "(", "BcryptConst", ".", "BCRYPT_SHA1_ALGORITHM", ")", "padding_info_struct", ".", "pszAlgId", "=", "cast", "(", "bcrypt", ",", "'wchar_t *'", ",", "hash_buffer", ")", "padding_info_struct", ".", "pbLabel", "=", "null", "(", ")", "padding_info_struct", ".", "cbLabel", "=", "0", "padding_info", "=", "cast", "(", "bcrypt", ",", "'void *'", ",", "padding_info_struct_pointer", ")", "else", ":", "padding_info", "=", "null", "(", ")", "out_len", "=", "new", "(", "bcrypt", ",", "'ULONG *'", ")", "res", "=", "bcrypt", ".", "BCryptEncrypt", "(", "certificate_or_public_key", ".", "key_handle", ",", "data", ",", "len", "(", "data", ")", ",", "padding_info", ",", "null", "(", ")", ",", "0", ",", "null", "(", ")", ",", "0", ",", "out_len", ",", "flags", ")", "handle_error", "(", "res", ")", "buffer_len", "=", "deref", "(", "out_len", ")", "buffer", "=", "buffer_from_bytes", "(", "buffer_len", ")", "res", "=", "bcrypt", ".", "BCryptEncrypt", "(", "certificate_or_public_key", ".", "key_handle", ",", "data", ",", "len", "(", "data", ")", ",", "padding_info", ",", "null", "(", ")", ",", "0", ",", "buffer", ",", "buffer_len", ",", "out_len", ",", "flags", ")", "handle_error", "(", "res", ")", "return", "bytes_from_buffer", "(", "buffer", ",", "deref", "(", "out_len", ")", ")" ]
Encrypts a value using an RSA public key via CNG :param certificate_or_public_key: A Certificate or PublicKey instance to encrypt with :param data: A byte string of the data to encrypt :param rsa_oaep_padding: If OAEP padding should be used instead of PKCS#1 v1.5 :raises: ValueError - when any of the parameters contain an invalid value TypeError - when any of the parameters are of the wrong type OSError - when an error is returned by the OS crypto library :return: A byte string of the ciphertext
[ "Encrypts", "a", "value", "using", "an", "RSA", "public", "key", "via", "CNG" ]
python
valid
IS-ENES-Data/esgf-pid
esgfpid/rabbit/nodemanager.py
https://github.com/IS-ENES-Data/esgf-pid/blob/2f4909bb3ff79c0b6ed2932e0dd8b3bb6aec5e41/esgfpid/rabbit/nodemanager.py#L256-L316
def __complete_info_dict(self, node_info_dict, is_open): # Make pika credentials creds = pika.PlainCredentials( node_info_dict['username'], node_info_dict['password'] ) node_info_dict['credentials'] = creds if 'priority' in node_info_dict and node_info_dict['priority'] is not None: node_info_dict['priority'] = str(node_info_dict['priority']) else: node_info_dict['priority'] = DEFAULT_PRIO # Mandatories: host = node_info_dict['host'] credentials = node_info_dict['credentials'] # Optional ones # If not specified, fill in defaults. vhost = "" if 'vhost' in node_info_dict and node_info_dict['vhost'] is not None: vhost = node_info_dict['vhost'] port = 15672 if 'port' in node_info_dict and node_info_dict['port'] is not None: port = node_info_dict['port'] ssl_enabled = False if 'ssl_enabled' in node_info_dict and node_info_dict['ssl_enabled'] is not None: ssl_enabled = node_info_dict['ssl_enabled'] # Get some defaults: socket_timeout = esgfpid.defaults.RABBIT_PIKA_SOCKET_TIMEOUT connection_attempts = esgfpid.defaults.RABBIT_PIKA_CONNECTION_ATTEMPTS retry_delay = esgfpid.defaults.RABBIT_PIKA_CONNECTION_RETRY_DELAY_SECONDS # Make pika connection params # https://pika.readthedocs.org/en/0.9.6/connecting.html params = pika.ConnectionParameters( host=host, ssl=ssl_enabled, port=port, virtual_host=vhost, credentials=credentials, socket_timeout=socket_timeout, connection_attempts=connection_attempts, retry_delay=retry_delay ) node_info_dict['params'] = params # Add some stuff node_info_dict['is_open'] = is_open ''' https://pika.readthedocs.org/en/0.9.6/connecting.html class pika.connection.ConnectionParameters( host=None, port=None, virtual_host=None, credentials=None, channel_max=None, frame_max=None, heartbeat_interval=None, ssl=None, ssl_options=None, connection_attempts=None, retry_delay=None, socket_timeout=None, locale=None, backpressure_detection=None) ''' return node_info_dict
[ "def", "__complete_info_dict", "(", "self", ",", "node_info_dict", ",", "is_open", ")", ":", "# Make pika credentials", "creds", "=", "pika", ".", "PlainCredentials", "(", "node_info_dict", "[", "'username'", "]", ",", "node_info_dict", "[", "'password'", "]", ")", "node_info_dict", "[", "'credentials'", "]", "=", "creds", "if", "'priority'", "in", "node_info_dict", "and", "node_info_dict", "[", "'priority'", "]", "is", "not", "None", ":", "node_info_dict", "[", "'priority'", "]", "=", "str", "(", "node_info_dict", "[", "'priority'", "]", ")", "else", ":", "node_info_dict", "[", "'priority'", "]", "=", "DEFAULT_PRIO", "# Mandatories:", "host", "=", "node_info_dict", "[", "'host'", "]", "credentials", "=", "node_info_dict", "[", "'credentials'", "]", "# Optional ones", "# If not specified, fill in defaults.", "vhost", "=", "\"\"", "if", "'vhost'", "in", "node_info_dict", "and", "node_info_dict", "[", "'vhost'", "]", "is", "not", "None", ":", "vhost", "=", "node_info_dict", "[", "'vhost'", "]", "port", "=", "15672", "if", "'port'", "in", "node_info_dict", "and", "node_info_dict", "[", "'port'", "]", "is", "not", "None", ":", "port", "=", "node_info_dict", "[", "'port'", "]", "ssl_enabled", "=", "False", "if", "'ssl_enabled'", "in", "node_info_dict", "and", "node_info_dict", "[", "'ssl_enabled'", "]", "is", "not", "None", ":", "ssl_enabled", "=", "node_info_dict", "[", "'ssl_enabled'", "]", "# Get some defaults:", "socket_timeout", "=", "esgfpid", ".", "defaults", ".", "RABBIT_PIKA_SOCKET_TIMEOUT", "connection_attempts", "=", "esgfpid", ".", "defaults", ".", "RABBIT_PIKA_CONNECTION_ATTEMPTS", "retry_delay", "=", "esgfpid", ".", "defaults", ".", "RABBIT_PIKA_CONNECTION_RETRY_DELAY_SECONDS", "# Make pika connection params", "# https://pika.readthedocs.org/en/0.9.6/connecting.html", "params", "=", "pika", ".", "ConnectionParameters", "(", "host", "=", "host", ",", "ssl", "=", "ssl_enabled", ",", "port", "=", "port", ",", "virtual_host", "=", "vhost", ",", "credentials", "=", "credentials", ",", "socket_timeout", "=", "socket_timeout", ",", "connection_attempts", "=", "connection_attempts", ",", "retry_delay", "=", "retry_delay", ")", "node_info_dict", "[", "'params'", "]", "=", "params", "# Add some stuff", "node_info_dict", "[", "'is_open'", "]", "=", "is_open", "return", "node_info_dict" ]
https://pika.readthedocs.org/en/0.9.6/connecting.html class pika.connection.ConnectionParameters( host=None, port=None, virtual_host=None, credentials=None, channel_max=None, frame_max=None, heartbeat_interval=None, ssl=None, ssl_options=None, connection_attempts=None, retry_delay=None, socket_timeout=None, locale=None, backpressure_detection=None)
[ "https", ":", "//", "pika", ".", "readthedocs", ".", "org", "/", "en", "/", "0", ".", "9", ".", "6", "/", "connecting", ".", "html", "class", "pika", ".", "connection", ".", "ConnectionParameters", "(", "host", "=", "None", "port", "=", "None", "virtual_host", "=", "None", "credentials", "=", "None", "channel_max", "=", "None", "frame_max", "=", "None", "heartbeat_interval", "=", "None", "ssl", "=", "None", "ssl_options", "=", "None", "connection_attempts", "=", "None", "retry_delay", "=", "None", "socket_timeout", "=", "None", "locale", "=", "None", "backpressure_detection", "=", "None", ")" ]
python
train
PmagPy/PmagPy
pmagpy/pmag.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L9160-L9218
def sortshaw(s, datablock): """ sorts data block in to ARM1,ARM2 NRM,TRM,ARM1,ARM2=[],[],[],[] stick first zero field stuff into first_Z """ for rec in datablock: methcodes = rec["magic_method_codes"].split(":") step = float(rec["treatment_ac_field"]) str = float(rec["measurement_magn_moment"]) if "LT-NO" in methcodes: NRM.append([0, str]) if "LT-T-I" in methcodes: TRM.append([0, str]) field = float(rec["treatment_dc_field"]) if "LT-AF-I" in methcodes: ARM1.append([0, str]) if "LT-AF-I-2" in methcodes: ARM2.append([0, str]) if "LT-AF-Z" in methcodes: if "LP-ARM-AFD" in methcodes: ARM1.append([step, str]) elif "LP-TRM-AFD" in methcodes: TRM.append([step, str]) elif "LP-ARM2-AFD" in methcodes: ARM2.append([step, str]) else: NRM.append([step, str]) cont = 1 while cont == 1: if len(NRM) != len(TRM): print("Uneven NRM/TRM steps: ") NRM, TRM, cont = cleanup(TRM, NRM) else: cont = 0 cont = 1 while cont == 1: if len(ARM1) != len(ARM2): print("Uneven ARM1/ARM2 steps: ") ARM1, ARM2, cont = cleanup(ARM2, ARM1) else: cont = 0 # # final check # if len(NRM) != len(TRM) or len(ARM1) != len(ARM2): print(len(NRM), len(TRM), len(ARM1), len(ARM2)) print(" Something wrong with this specimen! Better fix it or delete it ") input(" press return to acknowledge message") # now do the ratio to "fix" NRM/TRM data # a TRM_ADJ = [] for kk in range(len(TRM)): step = TRM[kk][0] for k in range(len(ARM1)): if ARM1[k][0] == step: TRM_ADJ.append([step, TRM[kk][1] * ARM1[k][1] / ARM2[k][1]]) break shawblock = (NRM, TRM, ARM1, ARM2, TRM_ADJ) return shawblock, field
[ "def", "sortshaw", "(", "s", ",", "datablock", ")", ":", "for", "rec", "in", "datablock", ":", "methcodes", "=", "rec", "[", "\"magic_method_codes\"", "]", ".", "split", "(", "\":\"", ")", "step", "=", "float", "(", "rec", "[", "\"treatment_ac_field\"", "]", ")", "str", "=", "float", "(", "rec", "[", "\"measurement_magn_moment\"", "]", ")", "if", "\"LT-NO\"", "in", "methcodes", ":", "NRM", ".", "append", "(", "[", "0", ",", "str", "]", ")", "if", "\"LT-T-I\"", "in", "methcodes", ":", "TRM", ".", "append", "(", "[", "0", ",", "str", "]", ")", "field", "=", "float", "(", "rec", "[", "\"treatment_dc_field\"", "]", ")", "if", "\"LT-AF-I\"", "in", "methcodes", ":", "ARM1", ".", "append", "(", "[", "0", ",", "str", "]", ")", "if", "\"LT-AF-I-2\"", "in", "methcodes", ":", "ARM2", ".", "append", "(", "[", "0", ",", "str", "]", ")", "if", "\"LT-AF-Z\"", "in", "methcodes", ":", "if", "\"LP-ARM-AFD\"", "in", "methcodes", ":", "ARM1", ".", "append", "(", "[", "step", ",", "str", "]", ")", "elif", "\"LP-TRM-AFD\"", "in", "methcodes", ":", "TRM", ".", "append", "(", "[", "step", ",", "str", "]", ")", "elif", "\"LP-ARM2-AFD\"", "in", "methcodes", ":", "ARM2", ".", "append", "(", "[", "step", ",", "str", "]", ")", "else", ":", "NRM", ".", "append", "(", "[", "step", ",", "str", "]", ")", "cont", "=", "1", "while", "cont", "==", "1", ":", "if", "len", "(", "NRM", ")", "!=", "len", "(", "TRM", ")", ":", "print", "(", "\"Uneven NRM/TRM steps: \"", ")", "NRM", ",", "TRM", ",", "cont", "=", "cleanup", "(", "TRM", ",", "NRM", ")", "else", ":", "cont", "=", "0", "cont", "=", "1", "while", "cont", "==", "1", ":", "if", "len", "(", "ARM1", ")", "!=", "len", "(", "ARM2", ")", ":", "print", "(", "\"Uneven ARM1/ARM2 steps: \"", ")", "ARM1", ",", "ARM2", ",", "cont", "=", "cleanup", "(", "ARM2", ",", "ARM1", ")", "else", ":", "cont", "=", "0", "#", "# final check", "#", "if", "len", "(", "NRM", ")", "!=", "len", "(", "TRM", ")", "or", "len", "(", "ARM1", ")", "!=", "len", "(", "ARM2", ")", ":", "print", "(", "len", "(", "NRM", ")", ",", "len", "(", "TRM", ")", ",", "len", "(", "ARM1", ")", ",", "len", "(", "ARM2", ")", ")", "print", "(", "\" Something wrong with this specimen! Better fix it or delete it \"", ")", "input", "(", "\" press return to acknowledge message\"", ")", "# now do the ratio to \"fix\" NRM/TRM data", "# a", "TRM_ADJ", "=", "[", "]", "for", "kk", "in", "range", "(", "len", "(", "TRM", ")", ")", ":", "step", "=", "TRM", "[", "kk", "]", "[", "0", "]", "for", "k", "in", "range", "(", "len", "(", "ARM1", ")", ")", ":", "if", "ARM1", "[", "k", "]", "[", "0", "]", "==", "step", ":", "TRM_ADJ", ".", "append", "(", "[", "step", ",", "TRM", "[", "kk", "]", "[", "1", "]", "*", "ARM1", "[", "k", "]", "[", "1", "]", "/", "ARM2", "[", "k", "]", "[", "1", "]", "]", ")", "break", "shawblock", "=", "(", "NRM", ",", "TRM", ",", "ARM1", ",", "ARM2", ",", "TRM_ADJ", ")", "return", "shawblock", ",", "field" ]
sorts data block in to ARM1,ARM2 NRM,TRM,ARM1,ARM2=[],[],[],[] stick first zero field stuff into first_Z
[ "sorts", "data", "block", "in", "to", "ARM1", "ARM2", "NRM", "TRM", "ARM1", "ARM2", "=", "[]", "[]", "[]", "[]", "stick", "first", "zero", "field", "stuff", "into", "first_Z" ]
python
train
RIPE-NCC/ripe.atlas.sagan
ripe/atlas/sagan/traceroute.py
https://github.com/RIPE-NCC/ripe.atlas.sagan/blob/f0e57221cf0ba3504baddd3ea460fc955bc41cc6/ripe/atlas/sagan/traceroute.py#L175-L180
def set_last_hop_responded(self, last_hop): """Sets the flag if last hop responded.""" for packet in last_hop.packets: if packet.rtt: self.last_hop_responded = True break
[ "def", "set_last_hop_responded", "(", "self", ",", "last_hop", ")", ":", "for", "packet", "in", "last_hop", ".", "packets", ":", "if", "packet", ".", "rtt", ":", "self", ".", "last_hop_responded", "=", "True", "break" ]
Sets the flag if last hop responded.
[ "Sets", "the", "flag", "if", "last", "hop", "responded", "." ]
python
train
gdoermann/voicebase
voicebase/api/media.py
https://github.com/gdoermann/voicebase/blob/53cb4735327898a7a284dea3a60ace0b3956a8ec/voicebase/api/media.py#L153-L161
def prediction_model_dict(self): """ Converts the list of prediction_models passed in into properly formatted dictionaries :return: formatted prediction model dict """ models = {} for model in self.predictions_models: models[model.name] = model.keywords return models
[ "def", "prediction_model_dict", "(", "self", ")", ":", "models", "=", "{", "}", "for", "model", "in", "self", ".", "predictions_models", ":", "models", "[", "model", ".", "name", "]", "=", "model", ".", "keywords", "return", "models" ]
Converts the list of prediction_models passed in into properly formatted dictionaries :return: formatted prediction model dict
[ "Converts", "the", "list", "of", "prediction_models", "passed", "in", "into", "properly", "formatted", "dictionaries", ":", "return", ":", "formatted", "prediction", "model", "dict" ]
python
train
mongolab/dex
dex/dex.py
https://github.com/mongolab/dex/blob/f6dc27321028ef1ffdb3d4b1165fdcce7c8f20aa/dex/dex.py#L231-L239
def analyze_logfile(self, logfile_path): self._run_stats['logSource'] = logfile_path """Analyzes queries from a given log file""" with open(logfile_path) as obj: self.analyze_logfile_object(obj) self._output_aggregated_report(sys.stdout) return 0
[ "def", "analyze_logfile", "(", "self", ",", "logfile_path", ")", ":", "self", ".", "_run_stats", "[", "'logSource'", "]", "=", "logfile_path", "with", "open", "(", "logfile_path", ")", "as", "obj", ":", "self", ".", "analyze_logfile_object", "(", "obj", ")", "self", ".", "_output_aggregated_report", "(", "sys", ".", "stdout", ")", "return", "0" ]
Analyzes queries from a given log file
[ "Analyzes", "queries", "from", "a", "given", "log", "file" ]
python
train