max_stars_repo_path
stringlengths
4
277
max_stars_repo_name
stringlengths
4
130
max_stars_count
int64
0
191k
id
stringlengths
1
8
content
stringlengths
1
996k
score
float64
-1.25
4.06
int_score
int64
0
4
public_data/serializers.py
MTES-MCT/sparte
0
0
from rest_framework_gis import serializers from rest_framework import serializers as s from .models import ( Artificialisee2015to2018, Artificielle2018, CommunesSybarval, CouvertureSol, EnveloppeUrbaine2018, Ocsge, Renaturee2018to2015, Sybarval, Voirie2018, ZonesBaties2018, UsageSol, ) def get_label(code="", label=""): if code is None: code = "-" if label is None: label = "inconnu" return f"{code} {label[:30]}" class Artificialisee2015to2018Serializer(serializers.GeoFeatureModelSerializer): usage_2015 = s.SerializerMethodField() usage_2018 = s.SerializerMethodField() couverture_2015 = s.SerializerMethodField() couverture_2018 = s.SerializerMethodField() def get_usage_2015(self, obj): return get_label(code=obj.us_2015, label=obj.us_2015_label) def get_usage_2018(self, obj): return get_label(code=obj.us_2018, label=obj.us_2018_label) def get_couverture_2015(self, obj): return get_label(code=obj.cs_2015, label=obj.cs_2015_label) def get_couverture_2018(self, obj): return get_label(code=obj.cs_2018, label=obj.cs_2018_label) class Meta: fields = ( "id", "surface", "usage_2015", "usage_2018", "couverture_2015", "couverture_2018", ) geo_field = "mpoly" model = Artificialisee2015to2018 class Artificielle2018Serializer(serializers.GeoFeatureModelSerializer): couverture = s.SerializerMethodField() def get_couverture(self, obj): return get_label(code=obj.couverture, label=obj.couverture_label) class Meta: fields = ( "id", "surface", "couverture", ) geo_field = "mpoly" model = Artificielle2018 class CommunesSybarvalSerializer(serializers.GeoFeatureModelSerializer): """Marker GeoJSON serializer.""" class Meta: """Marker serializer meta class.""" fields = ( "nom", "code_insee", "surface", ) geo_field = "mpoly" model = CommunesSybarval class EnveloppeUrbaine2018Serializer(serializers.GeoFeatureModelSerializer): couverture = s.SerializerMethodField() def get_couverture(self, obj): return get_label(code=obj.couverture, label=obj.couverture_label) class Meta: fields = ( "id", "couverture", "surface", ) geo_field = "mpoly" model = EnveloppeUrbaine2018 class OcsgeSerializer(serializers.GeoFeatureModelSerializer): couverture = s.SerializerMethodField() usage = s.SerializerMethodField() def get_couverture(self, obj): return get_label(code=obj.couverture, label=obj.couverture_label) def get_usage(self, obj): return get_label(code=obj.usage, label=obj.usage_label) class Meta: fields = ( "id", "couverture", "usage", "millesime", "map_color", "year", ) geo_field = "mpoly" model = Ocsge class Renaturee2018to2015Serializer(serializers.GeoFeatureModelSerializer): usage_2015 = s.SerializerMethodField() usage_2018 = s.SerializerMethodField() couverture_2015 = s.SerializerMethodField() couverture_2018 = s.SerializerMethodField() def get_usage_2015(self, obj): return get_label(code=obj.us_2015, label=obj.us_2015_label) def get_usage_2018(self, obj): return get_label(code=obj.us_2018, label=obj.us_2018_label) def get_couverture_2015(self, obj): return get_label(code=obj.cs_2015, label=obj.cs_2015_label) def get_couverture_2018(self, obj): return get_label(code=obj.cs_2018, label=obj.cs_2018_label) class Meta: fields = ( "id", "surface", "usage_2015", "usage_2018", "couverture_2015", "couverture_2018", ) geo_field = "mpoly" model = Renaturee2018to2015 class SybarvalSerializer(serializers.GeoFeatureModelSerializer): class Meta: fields = ( "id", "surface", ) geo_field = "mpoly" model = Sybarval class Voirie2018Serializer(serializers.GeoFeatureModelSerializer): couverture = s.SerializerMethodField() usage = s.SerializerMethodField() def get_couverture(self, obj): return get_label(code=obj.couverture, label=obj.couverture_label) def get_usage(self, obj): return get_label(code=obj.usage, label=obj.usage_label) class Meta: fields = ( "id", "surface", "couverture", "usage", ) geo_field = "mpoly" model = Voirie2018 class ZonesBaties2018Serializer(serializers.GeoFeatureModelSerializer): couverture = s.SerializerMethodField() usage = s.SerializerMethodField() def get_couverture(self, obj): return get_label(code=obj.couverture, label=obj.couverture_label) def get_usage(self, obj): return get_label(code=obj.usage, label=obj.usage_label) class Meta: fields = ( "id", "couverture", "usage", "surface", ) geo_field = "mpoly" model = ZonesBaties2018 class CouvertureSolSerializer(serializers.ModelSerializer): class Meta: fields = ( "id", "parent", "code", "label", "is_artificial", ) model = CouvertureSol class UsageSolSerializer(serializers.ModelSerializer): class Meta: fields = ( "id", "parent", "code", "label", ) model = UsageSol
1.257813
1
lib/variables/latent_variables/__init__.py
joelouismarino/variational_rl
15
8
from .fully_connected import FullyConnectedLatentVariable from .convolutional import ConvolutionalLatentVariable
0.408203
0
app/views/web/homestack.py
geudrik/hautomation
0
16
#! /usr/bin/env python2.7 # -*- coding: latin-1 -*- from flask import Blueprint from flask import current_app from flask import render_template from flask_login import login_required homestack = Blueprint("homestack", __name__, url_prefix="/homestack") @homestack.route("/", methods=["GET"]) @login_required def home(): return render_template("homestack/home.html")
1.242188
1
src/transformers/models/mmbt/modeling_mmbt.py
MaximovaIrina/transformers
1
32
# coding=utf-8 # Copyright (c) Facebook, Inc. and its affiliates. # Copyright (c) HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch MMBT model. """ import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from ...file_utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings from ...modeling_outputs import BaseModelOutputWithPooling, SequenceClassifierOutput from ...modeling_utils import ModuleUtilsMixin from ...utils import logging logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "MMBTConfig" class ModalEmbeddings(nn.Module): """Generic Modal Embeddings which takes in an encoder, and a transformer embedding.""" def __init__(self, config, encoder, embeddings): super().__init__() self.config = config self.encoder = encoder self.proj_embeddings = nn.Linear(config.modal_hidden_size, config.hidden_size) self.position_embeddings = embeddings.position_embeddings self.token_type_embeddings = embeddings.token_type_embeddings self.word_embeddings = embeddings.word_embeddings self.LayerNorm = embeddings.LayerNorm self.dropout = nn.Dropout(p=config.hidden_dropout_prob) def forward(self, input_modal, start_token=None, end_token=None, position_ids=None, token_type_ids=None): token_embeddings = self.proj_embeddings(self.encoder(input_modal)) seq_length = token_embeddings.size(1) if start_token is not None: start_token_embeds = self.word_embeddings(start_token) seq_length += 1 token_embeddings = torch.cat([start_token_embeds.unsqueeze(1), token_embeddings], dim=1) if end_token is not None: end_token_embeds = self.word_embeddings(end_token) seq_length += 1 token_embeddings = torch.cat([token_embeddings, end_token_embeds.unsqueeze(1)], dim=1) if position_ids is None: position_ids = torch.arange(seq_length, dtype=torch.long, device=input_modal.device) position_ids = position_ids.unsqueeze(0).expand(input_modal.size(0), seq_length) if token_type_ids is None: token_type_ids = torch.zeros( (input_modal.size(0), seq_length), dtype=torch.long, device=input_modal.device ) position_embeddings = self.position_embeddings(position_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = token_embeddings + position_embeddings + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings MMBT_START_DOCSTRING = r""" MMBT model was proposed in [Supervised Multimodal Bitransformers for Classifying Images and Text](https://github.com/facebookresearch/mmbt) by <NAME>, <NAME>, <NAME>, <NAME>. It's a supervised multimodal bitransformer model that fuses information from text and other image encoders, and obtain state-of-the-art performance on various multimodal classification benchmark tasks. This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`MMBTConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. transformer (:class: *~nn.Module*): A text transformer that is used by MMBT. It should have embeddings, encoder, and pooler attributes. encoder (:class: *~nn.Module*): Encoder for the second modality. It should take in a batch of modal inputs and return k, n dimension embeddings. """ MMBT_INPUTS_DOCSTRING = r""" Args: input_modal (`torch.FloatTensor` of shape `(batch_size, ***)`): The other modality data. It will be the shape that the encoder for that type expects. e.g. With an Image Encoder, the shape would be (batch_size, channels, height, width) input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. It does not expect [CLS] token to be added as it's appended to the end of other modality embeddings. Indices can be obtained using [`BertTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) modal_start_tokens (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Optional start token to be added to Other Modality Embedding. [CLS] Most commonly used for classification tasks. modal_end_tokens (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Optional end token to be added to Other Modality Embedding. [SEP] Most commonly used. attention_mask (*optional*) `torch.FloatTensor` of shape `(batch_size, sequence_length)`: Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (*optional*) `torch.LongTensor` of shape `(batch_size, sequence_length)`: Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) modal_token_type_ids (*optional*) `torch.LongTensor` of shape `(batch_size, modal_sequence_length)`: Segment token indices to indicate different portions of the non-text modality. The embeddings from these tokens will be summed with the respective token embeddings for the non-text modality. position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) modal_position_ids (`torch.LongTensor` of shape `(batch_size, modal_sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings for the non-text modality. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, embedding_dim)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare MMBT Model outputting raw hidden-states without any specific head on top.", MMBT_START_DOCSTRING, ) class MMBTModel(nn.Module, ModuleUtilsMixin): def __init__(self, config, transformer, encoder): super().__init__() self.config = config self.transformer = transformer self.modal_encoder = ModalEmbeddings(config, encoder, transformer.embeddings) @add_start_docstrings_to_model_forward(MMBT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC) def forward( self, input_modal, input_ids=None, modal_start_tokens=None, modal_end_tokens=None, attention_mask=None, token_type_ids=None, modal_token_type_ids=None, position_ids=None, modal_position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Returns: Examples:: # For example purposes. Not runnable. transformer = BertModel.from_pretrained('bert-base-uncased') encoder = ImageEncoder(args) mmbt = MMBTModel(config, transformer, encoder) """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_txt_shape = input_ids.size() elif inputs_embeds is not None: input_txt_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device modal_embeddings = self.modal_encoder( input_modal, start_token=modal_start_tokens, end_token=modal_end_tokens, position_ids=modal_position_ids, token_type_ids=modal_token_type_ids, ) input_modal_shape = modal_embeddings.size()[:-1] if token_type_ids is None: token_type_ids = torch.ones(input_txt_shape, dtype=torch.long, device=device) txt_embeddings = self.transformer.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds ) embedding_output = torch.cat([modal_embeddings, txt_embeddings], 1) input_shape = embedding_output.size()[:-1] if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) else: attention_mask = torch.cat( [torch.ones(input_modal_shape, device=device, dtype=torch.long), attention_mask], dim=1 ) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(input_shape, device=device) else: encoder_attention_mask = torch.cat( [torch.ones(input_modal_shape, device=device), encoder_attention_mask], dim=1 ) extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, self.device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) encoder_outputs = self.transformer.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = self.transformer.pooler(sequence_output) if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value @add_start_docstrings( """ MMBT Model with a sequence classification/regression head on top (a linear layer on top of the pooled output) """, MMBT_START_DOCSTRING, MMBT_INPUTS_DOCSTRING, ) class MMBTForClassification(nn.Module): r""" **labels**: (*optional*) `torch.LongTensor` of shape `(batch_size,)`: Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Returns: *Tuple* comprising various elements depending on the configuration (config) and inputs: **loss**: (*optional*, returned when `labels` is provided) `torch.FloatTensor` of shape `(1,)`: Classification (or regression if config.num_labels==1) loss. **logits**: `torch.FloatTensor` of shape `(batch_size, config.num_labels)` Classification (or regression if config.num_labels==1) scores (before SoftMax). **hidden_states**: (*optional*, returned when `output_hidden_states=True`) list of `torch.FloatTensor` (one for the output of each layer + the output of the embeddings) of shape `(batch_size, sequence_length, hidden_size)`: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (*optional*, returned when `output_attentions=True`) list of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples: ```python # For example purposes. Not runnable. transformer = BertModel.from_pretrained('bert-base-uncased') encoder = ImageEncoder(args) model = MMBTForClassification(config, transformer, encoder) outputs = model(input_modal, input_ids, labels=labels) loss, logits = outputs[:2] ```""" def __init__(self, config, transformer, encoder): super().__init__() self.num_labels = config.num_labels self.mmbt = MMBTModel(config, transformer, encoder) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) def forward( self, input_modal, input_ids=None, modal_start_tokens=None, modal_end_tokens=None, attention_mask=None, token_type_ids=None, modal_token_type_ids=None, position_ids=None, modal_position_ids=None, head_mask=None, inputs_embeds=None, labels=None, return_dict=None, ): return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mmbt( input_modal=input_modal, input_ids=input_ids, modal_start_tokens=modal_start_tokens, modal_end_tokens=modal_end_tokens, attention_mask=attention_mask, token_type_ids=token_type_ids, modal_token_type_ids=modal_token_type_ids, position_ids=position_ids, modal_position_ids=modal_position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) loss = None if labels is not None: if self.num_labels == 1: # We are doing regression loss_fct = MSELoss() loss = loss_fct(logits.view(-1), labels.view(-1)) else: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
1.515625
2
garaged/src/garage/tf/regressors/gaussian_mlp_regressor_model.py
artberryx/LSD
7
48
"""GaussianMLPRegressorModel.""" import numpy as np import tensorflow as tf import tensorflow_probability as tfp from garage.experiment import deterministic from garage.tf.models import GaussianMLPModel class GaussianMLPRegressorModel(GaussianMLPModel): """GaussianMLPRegressor based on garage.tf.models.Model class. This class can be used to perform regression by fitting a Gaussian distribution to the outputs. Args: input_shape (tuple[int]): Input shape of the training data. output_dim (int): Output dimension of the model. name (str): Model name, also the variable scope. hidden_sizes (list[int]): Output dimension of dense layer(s) for the MLP for mean. For example, (32, 32) means the MLP consists of two hidden layers, each with 32 hidden units. hidden_nonlinearity (callable): Activation function for intermediate dense layer(s). It should return a tf.Tensor. Set it to None to maintain a linear activation. hidden_w_init (callable): Initializer function for the weight of intermediate dense layer(s). The function should return a tf.Tensor. hidden_b_init (callable): Initializer function for the bias of intermediate dense layer(s). The function should return a tf.Tensor. output_nonlinearity (callable): Activation function for output dense layer. It should return a tf.Tensor. Set it to None to maintain a linear activation. output_w_init (callable): Initializer function for the weight of output dense layer(s). The function should return a tf.Tensor. output_b_init (callable): Initializer function for the bias of output dense layer(s). The function should return a tf.Tensor. learn_std (bool): Is std trainable. init_std (float): Initial value for std. adaptive_std (bool): Is std a neural network. If False, it will be a parameter. std_share_network (bool): Boolean for whether mean and std share the same network. std_hidden_sizes (list[int]): Output dimension of dense layer(s) for the MLP for std. For example, (32, 32) means the MLP consists of two hidden layers, each with 32 hidden units. min_std (float): If not None, the std is at least the value of min_std, to avoid numerical issues. max_std (float): If not None, the std is at most the value of max_std, to avoid numerical issues. std_hidden_nonlinearity (callable): Nonlinearity for each hidden layer in the std network. std_hidden_w_init (callable): Initializer function for the weight of intermediate dense layer(s) in the std network. std_hidden_b_init (callable): Initializer function for the bias of intermediate dense layer(s) in the std network. std_output_nonlinearity (callable): Activation function for output dense layer in the std network. It should return a tf.Tensor. Set it to None to maintain a linear activation. std_output_w_init (callable): Initializer function for the weight of output dense layer(s) in the std network. std_parameterization (str): How the std should be parametrized. There are two options: - exp: the logarithm of the std will be stored, and applied a exponential transformation - softplus: the std will be computed as log(1+exp(x)) layer_normalization (bool): Bool for using layer normalization or not. """ def __init__(self, input_shape, output_dim, name='GaussianMLPRegressorModel', hidden_sizes=(32, 32), hidden_nonlinearity=tf.nn.tanh, hidden_w_init=tf.initializers.glorot_uniform( seed=deterministic.get_tf_seed_stream()), hidden_b_init=tf.zeros_initializer(), output_nonlinearity=None, output_w_init=tf.initializers.glorot_uniform( seed=deterministic.get_tf_seed_stream()), output_b_init=tf.zeros_initializer(), learn_std=True, adaptive_std=False, std_share_network=False, init_std=1.0, min_std=1e-6, max_std=None, std_hidden_sizes=(32, 32), std_hidden_nonlinearity=tf.nn.tanh, std_hidden_w_init=tf.initializers.glorot_uniform( seed=deterministic.get_tf_seed_stream()), std_hidden_b_init=tf.zeros_initializer(), std_output_nonlinearity=None, std_output_w_init=tf.initializers.glorot_uniform( seed=deterministic.get_tf_seed_stream()), std_parameterization='exp', layer_normalization=False): super().__init__(output_dim=output_dim, name=name, hidden_sizes=hidden_sizes, hidden_nonlinearity=hidden_nonlinearity, hidden_w_init=hidden_w_init, hidden_b_init=hidden_b_init, output_nonlinearity=output_nonlinearity, output_w_init=output_w_init, output_b_init=output_b_init, learn_std=learn_std, adaptive_std=adaptive_std, std_share_network=std_share_network, init_std=init_std, min_std=min_std, max_std=max_std, std_hidden_sizes=std_hidden_sizes, std_hidden_nonlinearity=std_hidden_nonlinearity, std_output_nonlinearity=std_output_nonlinearity, std_parameterization=std_parameterization, layer_normalization=layer_normalization) self._input_shape = input_shape def network_output_spec(self): """Network output spec. Return: list[str]: List of key(str) for the network outputs. """ return [ 'normalized_dist', 'normalized_mean', 'normalized_log_std', 'dist', 'mean', 'log_std', 'x_mean', 'x_std', 'y_mean', 'y_std' ] def _build(self, state_input, name=None): """Build model given input placeholder(s). Args: state_input (tf.Tensor): Place holder for state input. name (str): Inner model name, also the variable scope of the inner model, if exist. One example is garage.tf.models.Sequential. Return: tfp.distributions.MultivariateNormalDiag: Normlizaed distribution. tf.Tensor: Normalized mean. tf.Tensor: Normalized log_std. tfp.distributions.MultivariateNormalDiag: Vanilla distribution. tf.Tensor: Vanilla mean. tf.Tensor: Vanilla log_std. tf.Tensor: Mean for data. tf.Tensor: log_std for data. tf.Tensor: Mean for label. tf.Tensor: log_std for label. """ with tf.compat.v1.variable_scope('normalized_vars'): x_mean_var = tf.compat.v1.get_variable( name='x_mean', shape=(1, ) + self._input_shape, dtype=np.float32, initializer=tf.zeros_initializer(), trainable=False) x_std_var = tf.compat.v1.get_variable( name='x_std_var', shape=(1, ) + self._input_shape, dtype=np.float32, initializer=tf.ones_initializer(), trainable=False) y_mean_var = tf.compat.v1.get_variable( name='y_mean_var', shape=(1, self._output_dim), dtype=np.float32, initializer=tf.zeros_initializer(), trainable=False) y_std_var = tf.compat.v1.get_variable( name='y_std_var', shape=(1, self._output_dim), dtype=np.float32, initializer=tf.ones_initializer(), trainable=False) normalized_xs_var = (state_input - x_mean_var) / x_std_var _, normalized_dist_mean, normalized_dist_log_std = super()._build( normalized_xs_var) # Since regressor expects [N, *dims], we need to squeeze the extra # dimension normalized_dist_log_std = tf.squeeze(normalized_dist_log_std, 1) with tf.name_scope('mean_network'): means_var = normalized_dist_mean * y_std_var + y_mean_var with tf.name_scope('std_network'): log_stds_var = normalized_dist_log_std + tf.math.log(y_std_var) normalized_dist = tfp.distributions.MultivariateNormalDiag( loc=normalized_dist_mean, scale_diag=tf.exp(normalized_dist_log_std)) vanilla_dist = tfp.distributions.MultivariateNormalDiag( loc=means_var, scale_diag=tf.exp(log_stds_var)) return (normalized_dist, normalized_dist_mean, normalized_dist_log_std, vanilla_dist, means_var, log_stds_var, x_mean_var, x_std_var, y_mean_var, y_std_var) def clone(self, name): """Return a clone of the model. It copies the configuration and parameters of the primitive. Args: name (str): Name of the newly created model. It has to be different from source model if cloned under the same computational graph. Returns: garage.tf.policies.GaussianMLPModel: Newly cloned model. """ new_regressor = self.__class__( name=name, input_shape=self._input_shape, output_dim=self._output_dim, hidden_sizes=self._hidden_sizes, hidden_nonlinearity=self._hidden_nonlinearity, hidden_w_init=self._hidden_w_init, hidden_b_init=self._hidden_b_init, output_nonlinearity=self._output_nonlinearity, output_w_init=self._output_w_init, output_b_init=self._output_b_init, learn_std=self._learn_std, adaptive_std=self._adaptive_std, std_share_network=self._std_share_network, init_std=self._init_std, min_std=self._min_std, max_std=self._max_std, std_hidden_sizes=self._std_hidden_sizes, std_hidden_nonlinearity=self._std_hidden_nonlinearity, std_hidden_w_init=self._std_hidden_w_init, std_hidden_b_init=self._std_hidden_b_init, std_output_nonlinearity=self._std_output_nonlinearity, std_output_w_init=self._std_output_w_init, std_parameterization=self._std_parameterization, layer_normalization=self._layer_normalization) new_regressor.parameters = self.parameters return new_regressor
2.671875
3
src/biotite/copyable.py
danijoo/biotite
208
56
# This source code is part of the Biotite package and is distributed # under the 3-Clause BSD License. Please see 'LICENSE.rst' for further # information. __name__ = "biotite" __author__ = "<NAME>" __all__ = ["Copyable"] import abc class Copyable(metaclass=abc.ABCMeta): """ Base class for all objects, that should be copyable. The public method `copy()` first creates a fresh instance of the class of the instance, that is copied via the `__copy_create__()` method. All variables, that could not be set via the constructor, are then copied via `__copy_fill__()`, starting with the method in the uppermost base class and ending with the class of the instance to be copied. This approach solves the problem of encapsulated variables in superclasses. """ def copy(self): """ Create a deep copy of this object. Returns ------- copy A copy of this object. """ clone = self.__copy_create__() self.__copy_fill__(clone) return clone def __copy_create__(self): """ Instantiate a new object of this class. Only the constructor should be called in this method. All further attributes, that need to be copied are handled in `__copy_fill__()` Do not call the `super()` method here. This method must be overridden, if the constructor takes parameters. Returns ------- copy A freshly instantiated copy of *self*. """ return type(self)() def __copy_fill__(self, clone): """ Copy all necessary attributes to the new object. Always call the `super()` method as first statement. Parameters ---------- clone The freshly instantiated copy of *self*. """ pass
3.078125
3
uhd_restpy/testplatform/sessions/ixnetwork/quicktest/learnframes_58e01d83db5d99bcabff902f5cf6ec51.py
OpenIxia/ixnetwork_restpy
20
64
# MIT LICENSE # # Copyright 1997 - 2020 by IXIA Keysight # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. from uhd_restpy.base import Base from uhd_restpy.files import Files from typing import List, Any, Union class LearnFrames(Base): """The learning frames that IxNetwork sends during the test. The LearnFrames class encapsulates a required learnFrames resource which will be retrieved from the server every time the property is accessed. """ __slots__ = () _SDM_NAME = 'learnFrames' _SDM_ATT_MAP = { 'FastPathEnable': 'fastPathEnable', 'FastPathLearnFrameSize': 'fastPathLearnFrameSize', 'FastPathNumFrames': 'fastPathNumFrames', 'FastPathRate': 'fastPathRate', 'LearnFrameSize': 'learnFrameSize', 'LearnFrequency': 'learnFrequency', 'LearnNumFrames': 'learnNumFrames', 'LearnRate': 'learnRate', 'LearnSendMacOnly': 'learnSendMacOnly', 'LearnSendRouterSolicitation': 'learnSendRouterSolicitation', 'LearnWaitTime': 'learnWaitTime', 'LearnWaitTimeBeforeTransmit': 'learnWaitTimeBeforeTransmit', } _SDM_ENUM_MAP = { 'learnFrequency': ['never', 'onBinaryIteration', 'oncePerFramesize', 'oncePerTest', 'onTrial'], } def __init__(self, parent, list_op=False): super(LearnFrames, self).__init__(parent, list_op) @property def FastPathEnable(self): # type: () -> bool """ Returns ------- - bool: If true, enables fast path transmit. """ return self._get_attribute(self._SDM_ATT_MAP['FastPathEnable']) @FastPathEnable.setter def FastPathEnable(self, value): # type: (bool) -> None self._set_attribute(self._SDM_ATT_MAP['FastPathEnable'], value) @property def FastPathLearnFrameSize(self): # type: () -> int """ Returns ------- - number: Specifies the size of the learning frames in the fast path. """ return self._get_attribute(self._SDM_ATT_MAP['FastPathLearnFrameSize']) @FastPathLearnFrameSize.setter def FastPathLearnFrameSize(self, value): # type: (int) -> None self._set_attribute(self._SDM_ATT_MAP['FastPathLearnFrameSize'], value) @property def FastPathNumFrames(self): # type: () -> int """ Returns ------- - number: Specifies the number of learn frames that IxNetwork sends through fast path. """ return self._get_attribute(self._SDM_ATT_MAP['FastPathNumFrames']) @FastPathNumFrames.setter def FastPathNumFrames(self, value): # type: (int) -> None self._set_attribute(self._SDM_ATT_MAP['FastPathNumFrames'], value) @property def FastPathRate(self): # type: () -> int """ Returns ------- - number: Specifies the rate at which IxNetwork sends learn frames through fast path. """ return self._get_attribute(self._SDM_ATT_MAP['FastPathRate']) @FastPathRate.setter def FastPathRate(self, value): # type: (int) -> None self._set_attribute(self._SDM_ATT_MAP['FastPathRate'], value) @property def LearnFrameSize(self): # type: () -> int """ Returns ------- - number: Specifies the size of the learning frames. """ return self._get_attribute(self._SDM_ATT_MAP['LearnFrameSize']) @LearnFrameSize.setter def LearnFrameSize(self, value): # type: (int) -> None self._set_attribute(self._SDM_ATT_MAP['LearnFrameSize'], value) @property def LearnFrequency(self): # type: () -> str """ Returns ------- - str(never | onBinaryIteration | oncePerFramesize | oncePerTest | onTrial): Allows to choose how frequently IxNetwork sends learning frames during the test. """ return self._get_attribute(self._SDM_ATT_MAP['LearnFrequency']) @LearnFrequency.setter def LearnFrequency(self, value): # type: (str) -> None self._set_attribute(self._SDM_ATT_MAP['LearnFrequency'], value) @property def LearnNumFrames(self): # type: () -> int """ Returns ------- - number: Specifies the number of learning frames that IxNetwork sends for each address. """ return self._get_attribute(self._SDM_ATT_MAP['LearnNumFrames']) @LearnNumFrames.setter def LearnNumFrames(self, value): # type: (int) -> None self._set_attribute(self._SDM_ATT_MAP['LearnNumFrames'], value) @property def LearnRate(self): # type: () -> int """ Returns ------- - number: Specifies the rate at which IxNetwork sends learn frames to the DUT. """ return self._get_attribute(self._SDM_ATT_MAP['LearnRate']) @LearnRate.setter def LearnRate(self, value): # type: (int) -> None self._set_attribute(self._SDM_ATT_MAP['LearnRate'], value) @property def LearnSendMacOnly(self): # type: () -> bool """ Returns ------- - bool: Sends learning frames to MAC address only. """ return self._get_attribute(self._SDM_ATT_MAP['LearnSendMacOnly']) @LearnSendMacOnly.setter def LearnSendMacOnly(self, value): # type: (bool) -> None self._set_attribute(self._SDM_ATT_MAP['LearnSendMacOnly'], value) @property def LearnSendRouterSolicitation(self): # type: () -> bool """ Returns ------- - bool: Sends router solicitation messages. """ return self._get_attribute(self._SDM_ATT_MAP['LearnSendRouterSolicitation']) @LearnSendRouterSolicitation.setter def LearnSendRouterSolicitation(self, value): # type: (bool) -> None self._set_attribute(self._SDM_ATT_MAP['LearnSendRouterSolicitation'], value) @property def LearnWaitTime(self): # type: () -> int """ Returns ------- - number: Specifies the length of time in ms that IxNetwork pauses before sending all the learning frames from all the ports. """ return self._get_attribute(self._SDM_ATT_MAP['LearnWaitTime']) @LearnWaitTime.setter def LearnWaitTime(self, value): # type: (int) -> None self._set_attribute(self._SDM_ATT_MAP['LearnWaitTime'], value) @property def LearnWaitTimeBeforeTransmit(self): # type: () -> int """ Returns ------- - number: Specifies the length of time in ms that IxNetwork pauses before sending all the """ return self._get_attribute(self._SDM_ATT_MAP['LearnWaitTimeBeforeTransmit']) @LearnWaitTimeBeforeTransmit.setter def LearnWaitTimeBeforeTransmit(self, value): # type: (int) -> None self._set_attribute(self._SDM_ATT_MAP['LearnWaitTimeBeforeTransmit'], value) def update(self, FastPathEnable=None, FastPathLearnFrameSize=None, FastPathNumFrames=None, FastPathRate=None, LearnFrameSize=None, LearnFrequency=None, LearnNumFrames=None, LearnRate=None, LearnSendMacOnly=None, LearnSendRouterSolicitation=None, LearnWaitTime=None, LearnWaitTimeBeforeTransmit=None): # type: (bool, int, int, int, int, str, int, int, bool, bool, int, int) -> LearnFrames """Updates learnFrames resource on the server. Args ---- - FastPathEnable (bool): If true, enables fast path transmit. - FastPathLearnFrameSize (number): Specifies the size of the learning frames in the fast path. - FastPathNumFrames (number): Specifies the number of learn frames that IxNetwork sends through fast path. - FastPathRate (number): Specifies the rate at which IxNetwork sends learn frames through fast path. - LearnFrameSize (number): Specifies the size of the learning frames. - LearnFrequency (str(never | onBinaryIteration | oncePerFramesize | oncePerTest | onTrial)): Allows to choose how frequently IxNetwork sends learning frames during the test. - LearnNumFrames (number): Specifies the number of learning frames that IxNetwork sends for each address. - LearnRate (number): Specifies the rate at which IxNetwork sends learn frames to the DUT. - LearnSendMacOnly (bool): Sends learning frames to MAC address only. - LearnSendRouterSolicitation (bool): Sends router solicitation messages. - LearnWaitTime (number): Specifies the length of time in ms that IxNetwork pauses before sending all the learning frames from all the ports. - LearnWaitTimeBeforeTransmit (number): Specifies the length of time in ms that IxNetwork pauses before sending all the Raises ------ - ServerError: The server has encountered an uncategorized error condition """ return self._update(self._map_locals(self._SDM_ATT_MAP, locals())) def Apply(self, *args, **kwargs): # type: (*Any, **Any) -> None """Executes the apply operation on the server. Applies the specified Quick Test. apply(async_operation=bool) --------------------------- - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete. Raises ------ - NotFoundError: The requested resource does not exist on the server - ServerError: The server has encountered an uncategorized error condition """ payload = { "Arg1": self.href } for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i] for item in kwargs.items(): payload[item[0]] = item[1] return self._execute('apply', payload=payload, response_object=None) def ApplyAsync(self, *args, **kwargs): # type: (*Any, **Any) -> None """Executes the applyAsync operation on the server. applyAsync(async_operation=bool) -------------------------------- - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete. Raises ------ - NotFoundError: The requested resource does not exist on the server - ServerError: The server has encountered an uncategorized error condition """ payload = { "Arg1": self.href } for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i] for item in kwargs.items(): payload[item[0]] = item[1] return self._execute('applyAsync', payload=payload, response_object=None) def ApplyAsyncResult(self, *args, **kwargs): # type: (*Any, **Any) -> Union[bool, None] """Executes the applyAsyncResult operation on the server. applyAsyncResult(async_operation=bool)bool ------------------------------------------ - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete. - Returns bool: Raises ------ - NotFoundError: The requested resource does not exist on the server - ServerError: The server has encountered an uncategorized error condition """ payload = { "Arg1": self.href } for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i] for item in kwargs.items(): payload[item[0]] = item[1] return self._execute('applyAsyncResult', payload=payload, response_object=None) def ApplyITWizardConfiguration(self, *args, **kwargs): # type: (*Any, **Any) -> None """Executes the applyITWizardConfiguration operation on the server. Applies the specified Quick Test. applyITWizardConfiguration(async_operation=bool) ------------------------------------------------ - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete. Raises ------ - NotFoundError: The requested resource does not exist on the server - ServerError: The server has encountered an uncategorized error condition """ payload = { "Arg1": self.href } for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i] for item in kwargs.items(): payload[item[0]] = item[1] return self._execute('applyITWizardConfiguration', payload=payload, response_object=None) def GenerateReport(self, *args, **kwargs): # type: (*Any, **Any) -> Union[str, None] """Executes the generateReport operation on the server. Generate a PDF report for the last succesfull test run. generateReport(async_operation=bool)string ------------------------------------------ - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete. - Returns str: This method is asynchronous and has no return value. Raises ------ - NotFoundError: The requested resource does not exist on the server - ServerError: The server has encountered an uncategorized error condition """ payload = { "Arg1": self.href } for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i] for item in kwargs.items(): payload[item[0]] = item[1] return self._execute('generateReport', payload=payload, response_object=None) def Run(self, *args, **kwargs): # type: (*Any, **Any) -> Union[List[str], None] """Executes the run operation on the server. Starts the specified Quick Test and waits for its execution to finish. The IxNetwork model allows for multiple method Signatures with the same name while python does not. run(async_operation=bool)list ----------------------------- - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete. - Returns list(str): This method is synchronous and returns the result of the test. run(InputParameters=string, async_operation=bool)list ----------------------------------------------------- - InputParameters (str): The input arguments of the test. - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete. - Returns list(str): This method is synchronous and returns the result of the test. Raises ------ - NotFoundError: The requested resource does not exist on the server - ServerError: The server has encountered an uncategorized error condition """ payload = { "Arg1": self.href } for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i] for item in kwargs.items(): payload[item[0]] = item[1] return self._execute('run', payload=payload, response_object=None) def Start(self, *args, **kwargs): # type: (*Any, **Any) -> None """Executes the start operation on the server. Starts the specified Quick Test. The IxNetwork model allows for multiple method Signatures with the same name while python does not. start(async_operation=bool) --------------------------- - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete. start(InputParameters=string, async_operation=bool) --------------------------------------------------- - InputParameters (str): The input arguments of the test. - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete. Raises ------ - NotFoundError: The requested resource does not exist on the server - ServerError: The server has encountered an uncategorized error condition """ payload = { "Arg1": self.href } for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i] for item in kwargs.items(): payload[item[0]] = item[1] return self._execute('start', payload=payload, response_object=None) def Stop(self, *args, **kwargs): # type: (*Any, **Any) -> None """Executes the stop operation on the server. Stops the currently running Quick Test. stop(async_operation=bool) -------------------------- - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete. Raises ------ - NotFoundError: The requested resource does not exist on the server - ServerError: The server has encountered an uncategorized error condition """ payload = { "Arg1": self.href } for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i] for item in kwargs.items(): payload[item[0]] = item[1] return self._execute('stop', payload=payload, response_object=None) def WaitForTest(self, *args, **kwargs): # type: (*Any, **Any) -> Union[List[str], None] """Executes the waitForTest operation on the server. Waits for the execution of the specified Quick Test to be completed. waitForTest(async_operation=bool)list ------------------------------------- - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete. - Returns list(str): This method is synchronous and returns the result of the test. Raises ------ - NotFoundError: The requested resource does not exist on the server - ServerError: The server has encountered an uncategorized error condition """ payload = { "Arg1": self.href } for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i] for item in kwargs.items(): payload[item[0]] = item[1] return self._execute('waitForTest', payload=payload, response_object=None)
1.296875
1
factory_generator/management/commands/generate_factories.py
gamabounty/django-factory-generator
10
88
import os from django.apps import apps from django.core.management.base import BaseCommand from factory_generator.generator import FactoryAppGenerator class Command(BaseCommand): help = 'Create model factories for all installed apps' def handle(self, *args, **options): created_files = [] for app in apps.get_app_configs(): factory_app_generator = FactoryAppGenerator(app) created_files += factory_app_generator.create_files() self.stdout.write(self.style.SUCCESS('Successfully created factories:')) for created_file in created_files: self.stdout.write(self.style.SUCCESS('- ' + created_file))
1.578125
2
bin/ticker.py
aleasoluciones/infrabbitmq
0
96
# -*- coding: utf-8 -*- import time import puka import argparse import logging from infcommon import utils from infrabbitmq import factory as infrabbitmq_factory from infrabbitmq.rabbitmq import RabbitMQError from infrabbitmq.events_names import ( TICK_1_SECOND, TICK_1_MINUTE, TICK_2_MINUTES, TICK_5_MINUTES, TICK_60_MINUTES, ) def publish_event(publisher, event, network, secs, mins): logging.info("publish event {} {}".format(event, secs)) publisher.publish(event, network, data={'tick': secs, 'mins': mins}) def main(network): publisher = infrabbitmq_factory.event_publisher_json_serializer() secs = 0 mins = 0 rabbitmq_exceptions = (RabbitMQError, puka.AMQPError, KeyError,) while True: time.sleep(1) secs += 1 utils.do_stuff_with_exponential_backoff(rabbitmq_exceptions, publish_event, publisher, TICK_1_SECOND, network, secs, mins) if secs % 60 == 0: mins += 1 secs = 0 utils.do_stuff_with_exponential_backoff(rabbitmq_exceptions, publish_event, publisher, TICK_1_MINUTE, network, secs, mins) if mins % 2 == 0: utils.do_stuff_with_exponential_backoff(rabbitmq_exceptions, publish_event, publisher, TICK_2_MINUTES, network, secs, mins) if mins % 5 == 0: utils.do_stuff_with_exponential_backoff(rabbitmq_exceptions, publish_event, publisher, TICK_5_MINUTES, network, secs, mins) if mins % 60 == 0: utils.do_stuff_with_exponential_backoff(rabbitmq_exceptions, publish_event, publisher, TICK_60_MINUTES, network, secs, mins) if __name__ == '__main__': try: parser = argparse.ArgumentParser() parser.add_argument('-n', '--network', action='store', required=True, help='Network name (ilo, c2k, ...)') args = parser.parse_args() network = args.network.split('-')[0] main(network) except Exception as exc: logging.critical("Ticker Fails: {}".format(exc))
1.539063
2
tests/test_process.py
confluentinc/utils-core
0
112
import pytest from utils.process import run, silent_run, RunError from utils.fs import in_temp_dir def test_run(capsys): with in_temp_dir(): assert run('echo hello > hello.txt; echo world >> hello.txt', shell=True) out = run('ls', return_output=True) assert out == 'hello.txt\n' out = run(['cat', 'hello.txt'], return_output=True) assert out == 'hello\nworld\n' with pytest.raises(RunError): run('blah') assert not run('blah', raises=False) assert silent_run('ls -l') out, _ = capsys.readouterr() assert out == ''
1.039063
1
tests/test_model/test_recognizer/test_shufflenetv1.py
YinAoXiong/ZCls
0
128
# -*- coding: utf-8 -*- """ @date: 2021/5/16 下午10:22 @file: test_shufflenetv1.py @author: zj @description: """ import torch from zcls.config import cfg from zcls.config.key_word import KEY_OUTPUT from zcls.model.recognizers.build import build_recognizer def test_data(model): data = torch.randn(1, 3, 224, 224) outputs = model(data)[KEY_OUTPUT] print(outputs.shape) assert outputs.shape == (1, 1000) def test_shufflenet(): cfg.merge_from_file('configs/benchmarks/shufflenet/shufflenet_v1_3g2x_zcls_imagenet_224.yaml') print(cfg) model = build_recognizer(cfg, torch.device('cpu')) print(model) test_data(model) if __name__ == '__main__': test_shufflenet()
1.210938
1
algo/vigenere.py
dkushche/Crypto
3
144
import crypto_tools from itertools import cycle def vigenere_little_doc(): return "encrypt/decrypt using vigenere cypher" def vigenere_full_doc(): return """ Advanced caesar we change dict on each char """ def vigenere_str_to_list(string, vigenere_dict): result = list() for char in string: try: result.append(vigenere_dict.index(char)) except ValueError: err_msg = f"There is no {key[inx]} in alphabet" raise ValueError(err_msg) return result def vigenere_processing(data, key, lang, encrypt): vigenere_dict = crypto_tools.get_param_json_data("alphabets.json", lang) num_data = vigenere_str_to_list(data, vigenere_dict) num_key = vigenere_str_to_list(key, vigenere_dict) dict_size = len(vigenere_dict) num_key = cycle(num_key) if (encrypt == "encrypt"): num_result = [(a + b) % dict_size for a, b in zip(num_data, num_key)] else: num_result = [ (a + dict_size - b) % dict_size for a, b in zip(num_data, num_key) ] result_str = "" for val in num_result: result_str += vigenere_dict[val] return result_str @crypto_tools.file_manipulation() def vigenere(data): lang = crypto_tools.cterm('input', 'Data language: ', 'ans') key = crypto_tools.cterm('input', 'Enter key(str): ', 'ans') encrypt = crypto_tools.cterm('input', 'You want encrypt or decrypt: ', 'ans') if encrypt != "encrypt" and encrypt != "decrypt": raise ValueError("Incorrect action") data = crypto_tools.utf_decoder(data) return vigenere_processing(data, key, lang, encrypt) vigenere.little_doc = vigenere_little_doc vigenere.full_doc = vigenere_full_doc
2.1875
2
met/metadataparser/models/entity_type.py
z1digitalstudio/met
11
184
################################################################# # MET v2 Metadate Explorer Tool # # This Software is Open Source. See License: https://github.com/TERENA/met/blob/master/LICENSE.md # Copyright (c) 2012, TERENA All rights reserved. # # This Software is based on MET v1 developed for TERENA by Yaco Sistemas, http://www.yaco.es/ # MET v2 was developed for TERENA by <NAME>, DAASI International GmbH, http://www.daasi.de # Current version of MET has been revised for performance improvements by <NAME>, # Consortium GARR, http://www.garr.it ########################################################################## from django.db import models from django.utils.translation import ugettext_lazy as _ class EntityType(models.Model): """ Model describing the type of an entity. """ name = models.CharField(blank=False, max_length=20, unique=True, verbose_name=_(u'Name'), db_index=True) xmlname = models.CharField(blank=False, max_length=20, unique=True, verbose_name=_(u'Name in XML'), db_index=True) def __unicode__(self): return self.name
1.273438
1
deduplicate.py
Ghostofapacket/NewsGrabber-Deduplicate
0
192
import sys sys.path.append('/usr/local/lib/python3.4/site-packages/') from warc_dedup import deduplicate def main(): if len(sys.argv) == 1: raise Exception('Please provide the WARC file as argument.') deduplicate.Warc(*sys.argv[1:]).deduplicate() if __name__ == '__main__': main()
0.882813
1
gcp-python-fn/main.py
FuriKuri/faas-playground
1
216
def hello_world(request): request_json = request.get_json() name = 'World' if request_json and 'name' in request_json: name = request_json['name'] headers = { 'Access-Control-Allow-Origin': 'https://furikuri.net', 'Access-Control-Allow-Methods': 'GET, POST', 'Access-Control-Allow-Headers': 'Content-Type' } return ('Hello ' + name + '! From GCP + Python', 200, headers)
1.1875
1
setup.py
dantas/wifi
1
272
#!/usr/bin/env python from setuptools import setup import os __doc__ = """ Command line tool and library wrappers around iwlist and /etc/network/interfaces. """ def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() install_requires = [ 'setuptools', 'pbkdf2', ] try: import argparse except: install_requires.append('argparse') version = '1.0.0' setup( name='wifi', version=version, author='<NAME>, <NAME>', author_email='<EMAIL>', description=__doc__, long_description=read('README.rst'), packages=['wifi'], scripts=['bin/wifi'], test_suite='tests', platforms=["Debian"], license='BSD', install_requires=install_requires, classifiers=[ "License :: OSI Approved :: BSD License", "Topic :: System :: Networking", "Operating System :: POSIX :: Linux", "Environment :: Console", "Programming Language :: Python", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3.3", ], data_files=[ ('/etc/bash_completion.d/', ['extras/wifi-completion.bash']), ] )
1.117188
1
Chapter07/library/check_user_py3.py
djouani/Learning-Ansible-2.X-Third-Edition
22
280
#!/usr/bin/env python import pwd from ansible.module_utils.basic import AnsibleModule class User: def __init__(self, user): self.user = user # Check if user exists def check_if_user_exists(self): try: user = pwd.getpwnam(self.user) success = True ret_msg = 'User %s exists' % self.user except KeyError: success = False ret_msg = 'User %s does not exists' % self.user return success, ret_msg def main(): # Parsing argument file module = AnsibleModule( argument_spec = dict( user = dict(required=True) ) ) user = module.params.get('user') chkusr = User(user) success, ret_msg = chkusr.check_if_user_exists() # Error handling and JSON return if success: module.exit_json(msg=ret_msg) else: module.fail_json(msg=ret_msg) if __name__ == "__main__": main()
1.71875
2
unit_13/26-Data_Structures/4_Merge_Sort_and_Linked_Lists/3_linked_list_merge_sort.py
duliodenis/python_master_degree
19
288
# # Data Structures: Linked List Merge Sort: The Conquer Step # Python Techdegree # # Created by <NAME> on 3/24/19. # Copyright (c) 2019 ddApps. All rights reserved. # ------------------------------------------------ from linked_list import Node, LinkedList def merge_sort(linked_list): ''' Sorts a linked list in ascending order. - Recuresively divide the linked list into sublists containing a single node - Repeatedly merge the sublists to produce sorted swublists until one remains Returns a sorted linked list. Runs in O(kn log n) time. ''' if linked_list.size() == 1: return linked_list elif linked_list.is_empty(): return linked_list left_half, right_half = split(linked_list) left = merge_sort(left_half) right = merge_sort(right_half) return merge(left, right) def split(linked_list): ''' Divide the unsorted list at the midpoint into sublists. Takes O(k log n) quasilinear time. ''' if linked_list == None or linked_list.head == None: left_half = linked_list right_half = None return left_half, right_half else: # non-empty linked lists size = linked_list.size() midpoint = size // 2 mid_node = linked_list.node_at_index(midpoint-1) left_half = linked_list right_half = LinkedList() right_half = mid_node.next_node mid_node.next_node = None return left_half, right_half def merge(left, right): ''' Merges two linked lists, sorting by data in nodes. Returns a new, merged list. Runs in O(n) linear time. ''' # Create a new linked list that contains nodes from # merging left and right merged = LinkedList() # Add a fake head that is discarded later to simplify code merged.add(0) # Set current to the head of the linked list current = merged.head # Obtain head nodes for left and right linked lists left_head = left.head right_head = right.head # Iterate over left and right until we reach the tail node # of either while left_head or right_head: # If the head node of the left is None, we're past the tail # Add the node from right to merged linkned list if left_head is None: current.next_node = right_head # Call next on right to set loop condition to False right_head = right_head.next_node # If the head node of right is None, we're past the tail # Add the tail node from left to merged linked list elif right_head is None: current.next_node = left_head # Call next on left to set loop condition to False left_head = left_head.next_node else: # Not at either tail node # Obtain node data to perform comparison operations left_data = left_head.data right_data = right_head.data # If data on left is less than right, set current to left node if left_data < right_data: current.next_node = left_head # Move left head to next node left_head = left_head.next_node # If data on left is greater than right, set current to right node else: current.next_node = right_head # Move right head to next node right_head = right_head.next_node # Move current to next node current = current.next_node # Discard fake head and set first merged node as head head = merged.head.next_node merged.head = head return merged l = LinkedList() l.add(10) l.add(2) l.add(44) l.add(15) l.add(200) print(l) sorted_linked_list = merge_sort(l) print(sorted_linked_list)
3.484375
3
examples/calc.py
manatlan/htag
1
328
import os,sys; sys.path.insert(0,os.path.dirname(os.path.dirname(__file__))) from htag import Tag """ This example show you how to make a "Calc App" (with physical buttons + keyboard events) There is no work for rendering the layout ;-) Can't be simpler ! """ class Calc(Tag.div): statics=[Tag.H.style(""" .mycalc *,button {font-size:2em;font-family: monospace} """)] def init(self): self.txt="" self.aff = Tag.Div("&nbsp;",_style="border:1px solid black") self["class"]="mycalc" self <= self.aff self <= Tag.button("C", _onclick=self.bind( self.clean) ) self <= [Tag.button(i, _onclick=self.bind( self.press, i) ) for i in "0123456789+-x/."] self <= Tag.button("=", _onclick=self.bind( self.compute ) ) #-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/ with real keyboard self["onkeyup"] = self.bind( self.presskey, b"event.key" ) def presskey(self,key): if key in "0123456789+-*/.": self.press(key) elif key=="Enter": self.compute() elif key in ["Delete","Backspace"]: self.clean() #-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/ def press(self,val): self.txt += val self.aff.set( self.txt ) def compute(self): try: self.txt = str(eval(self.txt.replace("x","*"))) self.aff.set( self.txt ) except: self.txt = "" self.aff.set( "Error" ) def clean(self): self.txt="" self.aff.set("&nbsp;") if __name__=="__main__": # import logging # logging.basicConfig(format='[%(levelname)-5s] %(name)s: %(message)s',level=logging.DEBUG) # logging.getLogger("htag.tag").setLevel( logging.INFO ) # and execute it in a pywebview instance from htag.runners import * # here is another runner, in a simple browser (thru ajax calls) BrowserHTTP( Calc ).run() # PyWebWiew( Calc ).run()
1.625
2
libsaas/services/twilio/applications.py
MidtownFellowship/libsaas
155
344
from libsaas import http, parsers from libsaas.services import base from libsaas.services.twilio import resource class ApplicationsBase(resource.TwilioResource): path = 'Applications' class Application(ApplicationsBase): def create(self, *args, **kwargs): raise base.MethodNotSupported() class Applications(ApplicationsBase): @base.apimethod def get(self, FriendlyName=None, Page=None, PageSize=None, AfterSid=None): """ Fetch the Applications belonging to an account. :var FriendlyName: Only return the Account resources with friendly names that exactly match this name. :vartype FriendlyName: str :var Page: The current page number. Zero-indexed, so the first page is 0. :vartype Page: int :var PageSize: How many resources to return in each list page. The default is 50, and the maximum is 1000. :vartype PageSize: int :var AfterSid: The last Sid returned in the previous page, used to avoid listing duplicated resources if new ones are created while paging. :vartype AfterSid: str """ params = resource.get_params(None, locals()) request = http.Request('GET', self.get_url(), params) return request, parsers.parse_json def update(self, *args, **kwargs): raise base.MethodNotSupported() def delete(self, *args, **kwargs): raise base.MethodNotSupported() class ConnectAppsBase(resource.TwilioResource): path = 'ConnectApps' def create(self, *args, **kwargs): raise base.MethodNotSupported() def delete(self, *args, **kwargs): raise base.MethodNotSupported() class ConnectApp(ConnectAppsBase): pass class ConnectApps(ConnectAppsBase): @base.apimethod def get(self, Page=None, PageSize=None, AfterSid=None): """ Fetch the Connect Apps belonging to an account. :var Page: The current page number. Zero-indexed, so the first page is 0. :vartype Page: int :var PageSize: How many resources to return in each list page. The default is 50, and the maximum is 1000. :vartype PageSize: int :var AfterSid: The last Sid returned in the previous page, used to avoid listing duplicated resources if new ones are created while paging. :vartype AfterSid: str """ params = resource.get_params(None, locals()) request = http.Request('GET', self.get_url(), params) return request, parsers.parse_json def update(self, *args, **kwargs): raise base.MethodNotSupported() class AuthorizedConnectAppsBase(resource.TwilioResource): path = 'AuthorizedConnectApps' def create(self, *args, **kwargs): raise base.MethodNotSupported() def update(self, *args, **kwargs): raise base.MethodNotSupported() def delete(self, *args, **kwargs): raise base.MethodNotSupported() class AuthorizedConnectApp(AuthorizedConnectAppsBase): pass class AuthorizedConnectApps(AuthorizedConnectAppsBase): @base.apimethod def get(self, Page=None, PageSize=None, AfterSid=None): """ Fetch the Authorized Connect Apps belonging to an account. :var Page: The current page number. Zero-indexed, so the first page is 0. :vartype Page: int :var PageSize: How many resources to return in each list page. The default is 50, and the maximum is 1000. :vartype PageSize: int :var AfterSid: The last Sid returned in the previous page, used to avoid listing duplicated resources if new ones are created while paging. :vartype AfterSid: str """ params = resource.get_params(None, locals()) request = http.Request('GET', self.get_url(), params) return request, parsers.parse_json
1.71875
2
MarkReport/MarkReport.py
dedukun/MarkReport
0
360
#!/usr/bin/env python3 # Command line flags import os import glob import re import pyinotify import subprocess from sys import stdout, stderr from time import time, sleep from tempfile import gettempdir from distutils.dir_util import copy_tree from shutil import copyfile from weasyprint import HTML import argparse parser = argparse.ArgumentParser( description='Converts Markdown to elegant PDF reports') parser.add_argument('--basic', dest='basic', action='store_true', help='Do not enrich HTML with LaTeX and syntax highlighting (faster builds)') parser.add_argument('--watch', dest='watch', action='store_true', help='Watch the current folder for changes and rebuild automatically') parser.add_argument('--quiet', dest='quiet', action='store_true', help='Do not output any information') parser.add_argument("--timeout", type=int, default=2, help='Page generation timeout') parser.add_argument("--base-html", type=str, default="", help='The path to the base HTML file') parser.set_defaults(watch=False) args = parser.parse_args() # Check directory ok = False for file in os.listdir("."): if file.endswith(".md"): ok = True break if not ok: stderr.write("No markdown file found in the current folder") exit(1) if args.base_html != "": if not os.path.isfile(args.base_html): stderr.write("The given base HTML file doesn't exist") exit(1) script_path = os.path.dirname(os.path.realpath(__file__)) # Temp dir timestamp = str(int(time())) tmp_dir = gettempdir() + "/" + timestamp + "_md-report/" os.makedirs(tmp_dir, exist_ok=True) # Headless browser if not args.basic: from selenium import webdriver from selenium.webdriver.firefox.options import Options from selenium.webdriver.common.desired_capabilities import DesiredCapabilities options = Options() options.headless = True options.log.level = "trace" d = DesiredCapabilities.FIREFOX d['loggingPrefs'] = {'browser': 'ALL'} driver = webdriver.Firefox(options=options, capabilities=d) driver.set_page_load_timeout(args.timeout) prev_compile_time = 0 def recompile(notifier): if notifier is not None and (notifier.maskname != "IN_MODIFY" or notifier.pathname.endswith(".pdf")): return global prev_compile_time if time() - prev_compile_time < 1: return prev_compile_time = time() if not args.quiet: stdout.write("\rBuilding the PDF file...") stdout.flush() files = glob.glob(tmp_dir + '/*.md') for f in files: os.remove(f) if args.base_html == "": copyfile(script_path + "/base.html", tmp_dir + "/base.html") else: copyfile(args.base_html, tmp_dir + "/base.html") if not os.path.islink(tmp_dir + "/src"): os.symlink(script_path + "/src", tmp_dir + "/src") copy_tree(".", tmp_dir) # Markdown parsing subprocess.check_output(script_path + "/md-parsing " + tmp_dir, shell=True).decode('utf-8') html_file_name = tmp_dir + "output.html" # Interpret JS code if not args.basic: driver.get("file:///" + html_file_name) sleep(2) elem = driver.find_element_by_xpath("//*") interpreted_html = elem.get_attribute("outerHTML") with open(html_file_name, "w") as html_out_file: html_out_file.write(interpreted_html) # Create final PDF file pdf = HTML(html_file_name).write_pdf() f = open("output.pdf", 'wb') f.write(pdf) if not args.quiet: stdout.write("\rDone. ") stdout.flush() recompile(None) if not args.watch: if not args.basic: driver.quit() exit(0) watch_manager = pyinotify.WatchManager() event_notifier = pyinotify.Notifier(watch_manager, recompile) watch_manager.add_watch(os.path.abspath("."), pyinotify.ALL_EVENTS, rec=True) event_notifier.loop() if not args.basic: driver.quit()
1.65625
2
beansdbadmin/core/client.py
ariesdevil/beansdbadmin
11
400
#!/usr/bin/python # encoding: utf-8 '''a rich client 1. for one server (instead of multi like in libmc.Client) 2. encapsulate @, ?, gc ... use is instead of libmc.Client ''' import telnetlib import logging import libmc import string import urllib import itertools import warnings from collections import defaultdict from beansdbadmin.core.hint import parse_new_hint_body from beansdbadmin.core.data import parse_records from beansdbadmin.core.hash import get_khash64 def get_url_content(url): return urllib.urlopen(url).read() def check_bucket(bucket): assert 0 <= bucket < 16 def dir_to_dict(dir_str): d = dict() if dir_str: for line in [x for x in dir_str.split('\n') if x]: key_or_bucket, _hash, ver_or_count = line.split(' ') d[key_or_bucket] = int(_hash) & 0xffff, int(ver_or_count) return d def get_bucket_keys_count(store, bucket, depth=1): cmd = "@" sub = bucket if depth == 2: cmd = "@%x" % (bucket/16) sub = bucket % 16 result = store.get(cmd) if result: lines = result.split('\n') for line in lines: if len(line) == 0: continue d, _, c = line.split() if d.endswith('/'): bucket_ = int(d[0], 16) if bucket_ == sub: return int(c) raise Exception('get %s from %s, reply = [%s], bucket %x not found' % (cmd, store, result, bucket)) def get_buckets_keys_count(store): """ return dict: buckets -> count """ st = {} try: for line in (store.get('@') or '').split('\n'): if line: d, _, c = line.split(' ') if not d.endswith('/'): continue st[int(d[0], 16)] = int(c) return st except IOError: raise Exception("cannot get @ from %s" % (store)) def get_primary_buckets(store): """ return possible primary buckets, might be wrong on temporary nodes, result is list of buckets in integer """ ss = get_buckets_keys_count(store) bucket_list = ss.items() bucket_list = [x for x in bucket_list if x[1] > 0] if not bucket_list: return None bucket_list.sort(lambda a, b: cmp(a[1], b[1]), reverse=True) result = [bucket_list[0]] for i in bucket_list[1:]: if result[-1][1] / i[1] >= 2: break result.append(i) return [x[0] for x in result] def get_key_info_disk(store, key): '''return ver, vhash, flag, vsz, ts, fid, pos''' info = store.get('??' + key) if info: return [int(x) for x in info.split()] def is_gc_running(ip, port): s = get_gc_status(ip, port) if s and s.find('running') >= 0: return True return False def get_gc_status(ip, port): t = telnetlib.Telnet(ip, port) t.write('optimize_stat\r\n') out = t.read_until('\n') t.write('quit\r\n') t.close() return out.strip("\r\n") def connect(server, **kwargs): comp_threshold = kwargs.pop('comp_threshold', 0) prefix = kwargs.pop('prefix', None) if prefix is not None: warnings.warn('"prefix" is deprecated. ' 'use douban.wrapper.Prefix instead.') c = libmc.Client([server], do_split=0, comp_threshold=comp_threshold, prefix=prefix) c.config(libmc.MC_CONNECT_TIMEOUT, 300) # 0.3s c.config(libmc.MC_POLL_TIMEOUT, 3000) # 3s c.config(libmc.MC_RETRY_TIMEOUT, 5) # 5s return c class MCStore(object): IGNORED_LIBMC_RET = frozenset([ libmc.MC_RETURN_OK, libmc.MC_RETURN_INVALID_KEY_ERR ]) def __init__(self, addr): self.addr = addr self.host, port = addr.split(":") self.port = int(port) self.mc = connect(addr) def __repr__(self): return '<MCStore(addr=%s)>' % repr(self.addr) def __str__(self): return self.addr def set(self, key, data, rev=0): return bool(self.mc.set(key, data, rev)) def set_raw(self, key, data, rev=0, flag=0): if rev < 0: raise Exception(str(rev)) return self.mc.set_raw(key, data, rev, flag) def set_multi(self, values, return_failure=False): return self.mc.set_multi(values, return_failure=return_failure) def _check_last_error(self): last_err = self.mc.get_last_error() if last_err not in self.IGNORED_LIBMC_RET: raise IOError(last_err, self.mc.get_last_strerror()) def get(self, key): try: r = self.mc.get(key) if r is None: self._check_last_error() return r except ValueError: self.mc.delete(key) def get_raw(self, key): r, flag = self.mc.get_raw(key) if r is None: self._check_last_error() return r, flag def get_multi(self, keys): r = self.mc.get_multi(keys) self._check_last_error() return r def delete(self, key): return bool(self.mc.delete(key)) def delete_multi(self, keys, return_failure=False): return self.mc.delete_multi(keys, return_failure=return_failure) def exists(self, key): return bool(self.mc.get('?' + key)) def incr(self, key, value): return self.mc.incr(key, int(value)) class DBClient(MCStore): def __init__(self, addr): MCStore.__init__(self, addr) self._is_old = None def stats(self): stats = self.mc.stats() return stats.values()[0] if stats else None def is_old(self): if self._is_old is None: ver = self.get_server_version() self._is_old = (ver.strip().split(".")[0] == "0") return self._is_old def get_collision_summary(self, bucket): check_bucket(bucket) raw = self.get("@collision_%x" % bucket) if raw is None: return None count, hcount, khash, data_size = raw.split() return (int(count), int(hcount), int(khash, 16), int(data_size)) def get_collision(self, bucket): check_bucket(bucket) collisions = defaultdict(dict) hint_data = self.get("@collision_all_%x" % bucket) if hint_data is None: return dict() for key, meta, _ in parse_new_hint_body(hint_data): khash_str, _, ver, vhash = meta collisions[khash_str][key] = (vhash, ver) return dict(collisions) def get_records_by_khash_raw(self, khash): if self.is_old(): return [] if not isinstance(khash, str): khash = "%016x" % khash return self.get("@@" + khash) def get_records_by_khash(self, khash_str): raw = self.get_records_by_khash_raw(khash_str) if raw: return parse_records(raw, False) else: return [] def start_gc(self, bucket='', start_fid=0, end_fid=None): """ bucket must be in 0 or 00 string """ if bucket: assert isinstance(bucket, basestring) and len(bucket) <= 2 t = telnetlib.Telnet(self.host, self.port) tree = '@%s' % bucket if end_fid is None: gc_cmd = 'gc {} {}\n'.format(tree, start_fid) else: gc_cmd = 'gc {} {} {}\n'.format(tree, start_fid, end_fid) t.write(gc_cmd) out = t.read_until('\n').strip('\r\n') assert out == 'OK' t.write('quit\n') t.close() def start_gc_all_buckets(self, db_depth): hex_digits = string.digits + 'abcdef' buckets_iter = itertools.product(*[hex_digits for _ in range(db_depth)]) buckets = [''.join(i) for i in buckets_iter] self.start_gc_buckets(buckets) def start_gc_buckets(self, buckets): for b in buckets: self.start_gc(bucket=b) while True: status = self.get_gc_status() if status.find('running') >= 0: continue elif status == 'success': print "bucket %s gc done" % b break elif status == 'fail': return self.fail("optimize_stat = fail") else: self.fail(status) def get_gc_status(self): return get_gc_status(self.host, self.port) def get_version(self, key): meta = self.get("?" + key) if meta: return int(meta.split()[0]) def item_count(self): s = self.stats() if s is None: return None return int(s['total_items']) def get_key_info_mem(self, key, khash64=None): ''' return (vhash, ver) or None''' if khash64 is None: khash64 = get_khash64(key) khash32_str = "@%08x" % (khash64 >> 32) _dir = self.get_dir(khash32_str) if self.is_old(): return _dir.get(key, None) else: return _dir.get("%016x" % khash64, None) def get_khash_info_mem(self, khash): ''' return [(key, (vhash, ver))], key is "" for v2.''' khash32 = "@%08x" % (khash >> 32) _dir = self.get_dir(khash32) ret = [] if self.is_old(): for k, (vhash, ver) in _dir.iteritems(): if get_khash64(k) == khash: ret.append((k, (vhash, ver))) else: for k, (vhash, ver) in _dir.iteritems(): if int(k, 16) == khash: return [("", (int(vhash), ver))] return ret def get_server_version(self): try: st = self.stats() if st: return st["version"] except IOError: logging.error("fail to get version %s", self) except KeyError: logging.error("fail to get version %s %s", self, st) def get_dir(self, path): ''' return dict case1: map dir(0-f) to (hash, count), like {'0/': (1471, 27784005), ... }, case2: map key(or khash) to (vhash, version), like {'3000000377e9c2ad': (22212, 1), ... }''' try: content = self.get(path) except IOError: content = '' return dir_to_dict(content) def list_dir(self, d): # FIXME: d should not need prefix @? '''list all KEY in the dir! not use it if dir is large!''' for path, (vhash, ver) in sorted(self.get_dir(d).items()): if path.endswith('/') and len(path) == 2: for v in self.list_dir(d + path[:-1]): yield v else: yield path, int(vhash), int(ver) def get_bucket_keys_count(self, bucket, depth=1): return get_bucket_keys_count(self, bucket, depth) def get_key_info_disk(self, key): '''return ver, vhash, flag, vsz, ts, fid, pos''' return get_key_info_disk(self, key) def prepare(self, data): return libmc.encode_value(data, self.mc.comp_threshold) def close(self): pass def test_new(addr, bucket): b = bucket c = DBClient(addr) print "stats:", c.stats() print 'version:', c.get_server_version() print "isold:", c.is_old() print "dir root:", c.get_dir("@") print "bucket key count:", c.get_bucket_keys_count(int(b)) print "item_count:", c.item_count() print "primary_buckets", get_primary_buckets(c) leaf = c.get_dir("@" + b + "000000") print "a dir leaf:", leaf khash_str = list(leaf)[0] print "a khash_str", khash_str r = c.get_records_by_khash(khash_str)[0] k = r[0] print "key, len(value), (flag, tstamp, ver):", k, r[1], r[3:] print "key info mem:", c.get_key_info_mem(k) print "key info disk(ver, vhash, flag, vsz, ts, fid, pos):", \ c.get_key_info_disk(k) print "key version:", c.get_version(k) print "collision_summary", c.get_collision_summary(int(b)) print "gc status:", c.get_gc_status() if __name__ == '__main__': test_new("rosa3a:7900", '3')
1.554688
2
TransactionBook/gui_kivy/generic/MultiSelectPopUp.py
LukHad/AccountBook
0
408
from kivy.uix.gridlayout import GridLayout from kivy.uix.label import Label from kivy.uix.textinput import TextInput from kivy.garden.matplotlib.backend_kivyagg import FigureCanvasKivyAgg from kivy.uix.anchorlayout import AnchorLayout from kivy.uix.boxlayout import BoxLayout from kivy.uix.button import Button import matplotlib.pyplot as plt import matplotlib import datetime from TransactionBook.model.Filter import Filter from datetime import datetime from kivy.uix.popup import Popup from kivy.properties import NumericProperty, ReferenceListProperty from kivy.uix.checkbox import CheckBox from kivy.core.window import Window class MultiSelectPopUp(Popup): pHint_x = NumericProperty(0.7) pHint_y = NumericProperty(0.7) pHint = ReferenceListProperty(pHint_x, pHint_y) def __init__(self, title, option_list, option_init=None, callback=None, multiselect=True, **kwargs): super().__init__(**kwargs) self.title = title self.callback = callback self.main_layout = AnchorLayout() if option_init is None: option_init = [True] * len(option_list) self.grid = GridLayout(cols=1) self.opt_boxes = [] self.labels = [] for i, opt in enumerate(option_list): box = BoxLayout(orientation='horizontal') check_box = CheckBox(active=option_init[i]) if not multiselect: check_box.group = "Single_Select_Only_Group" label = Label(text=str(opt)) self.opt_boxes.append(check_box) self.labels.append(label) box.add_widget(check_box) box.add_widget(label) self.grid.add_widget(box) cancel_button = Button(text="Cancel") cancel_button.bind(on_press=self.cancel_callback) ok_button = Button(text="Ok") ok_button.bind(on_press=self.ok_callback) box = BoxLayout(orientation='horizontal') box.add_widget(cancel_button) box.add_widget(ok_button) self.grid.add_widget(box) self.main_layout.add_widget(self.grid) self.content = self.main_layout self.size_hint = self.pHint Window.release_all_keyboards() self.open() def ok_callback(self, _): selection = [] for i, check_box in enumerate(self.opt_boxes): if check_box.active: selection.append(self.labels[i].text) self.callback(selection) self.dismiss() def cancel_callback(self, _): self.dismiss() if __name__ == "__main__": from kivy.base import runTouchApp def cb(list_of_selection): print(list_of_selection) c = MultiSelectPopUp(title="Test", option_list=["Item1", "Item2", "Item3"], callback=cb, option_init=[True, False, True]) runTouchApp(c)
1.8125
2
microbepy/plot/mutation_plot.py
ScienceStacks/MicrobEPy
1
416
"""Provides plots of mutations for Isolates and Lines.""" from microbepy.common import constants as cn from microbepy.common.dataframe_sorter import DataframeSorter from microbepy.common.isolate import Isolate from microbepy.common import util from microbepy.correlation import genome_correlation from microbepy.data.model_data_provider import ModelDataProvider from microbepy.data import util_data from microbepy.plot.mutation_cofraction import MutationCofraction from microbepy.plot.util_plot import PlotParms import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns COLORS = ['red', 'green', 'blue'] SPECIES = {cn.SPECIES_MIX_DVH: "DVH", cn.SPECIES_MIX_MMP: "MMP", None: "both"} FONTSIZE_TITLE = 16 FONTSIZE_LABEL = 8 MAX_LINES = 9 MIN_FRACTION = 0.25 THRESHOLD_FRAC = 0.2 MAX_SIGLVL = 0.01 COLORBAR_MIN = 1.0 COLORBAR_MAX = 4.0 class MutationLinePlot(object): """ Plot mutations by occurrences within Lines. """ def __init__(self, mutation_column=cn.GGENE_ID, species=None, is_plot=True): """ :param str mutation_column: :param bool is_plot: """ self._mutation_column = mutation_column self._is_plot = is_plot self._species = species self.cofraction = MutationCofraction(species=self._species, mutation_column=mutation_column) def plotTransfers(self, parms=PlotParms(is_initialize=False), is_unit_fraction = False, is_cluster_mutations=True): """ Does a stacked bar plot of mutation frequency for all transfers. :param bool is_unit_fraction: round fraction to 1 :param bool is_cluster_mutations: Group similar mutations together :return pd.DataFrame: row=mutation, col=line + transfer, value is fraction """ permitted_mutations = self.cofraction.ordered_mutations transfers = self.cofraction.transfers num_transfers = len(transfers) fig, axes = plt.subplots(nrows=num_transfers, ncols=1) dfs = [] for idx, transfer in enumerate(transfers): parms[cn.PLT_YTICKLABELS] = True if self._species is None: parms[cn.PLT_TITLE] = "%d" % transfer else: parms[cn.PLT_TITLE] = "%s, %d" % (self._species, transfer) if idx == 0: parms[cn.PLT_YLABEL] = True else: parms[cn.PLT_YLABEL] = False if idx < num_transfers - 1: parms[cn.PLT_LEGEND] = False parms[cn.PLT_XLABEL] = False parms[cn.PLT_XTICKLABELS] = False else: parms[cn.PLT_LEGEND] = True parms[cn.PLT_XLABEL] = True parms[cn.PLT_XTICKLABELS] = True df = self.plotLine(transfer, parms=parms, is_plot=False, ax=axes[idx], permitted_mutations=permitted_mutations, is_unit_fraction=is_unit_fraction) df[cn.TRANSFER] = transfer dfs.append(df) if self._is_plot: plt.show() return pd.concat(dfs) def plotLine(self, transfer, parms=PlotParms(is_initialize=False), is_unit_fraction=False, is_plot=None, ax=None, permitted_mutations=None): """ Does a stacked bar plot of mutation frequency by line with colors :params int transfer: :params PlotParms parms: :params Axis ax: axis to use in plot :param list-str permitted_mutations: to use and how they are ordered if None, then use alphabetical order :param bool is_unit_fraction: round non-zero fraction to 1 :return pd.DataFrame: row=mutation, col=line, value is fraction """ if is_plot is None: is_plot = self._is_plot parms.setTrueIfAbsent(cn.PLT_XLABEL) parms.setTrueIfAbsent(cn.PLT_XTICKLABELS) # df_plot = self.cofraction.makeLineDF( permitted_mutations=permitted_mutations, transfer=transfer) if is_unit_fraction: df_plot = df_plot.applymap( lambda v: 1 if v> MIN_FRACTION else v) # Do the plot if not cn.PLT_FIGSIZE in parms: parms[cn.PLT_FIGSIZE] = (12, 8) if ax is None: ax = df_plot.plot(kind='bar', stacked=True, figsize=parms[cn.PLT_FIGSIZE], legend=None) else: df_plot.plot(kind='bar', stacked=True, legend=None, ax=ax, figsize=parms[cn.PLT_FIGSIZE]) ax.set_xlabel("", fontsize=FONTSIZE_LABEL) # Eliminate implicit label if parms.isFalse(cn.PLT_XTICKLABELS): labels = ax.get_xticklabels() new_labels = np.repeat("", len(labels)) ax.set_xticklabels(new_labels) if parms.isFalse(cn.PLT_YTICKLABELS): labels = ax.get_yticklabels() new_labels = np.repeat("", len(labels)) ax.set_yticklabels(new_labels) if cn.PLT_TITLE in parms: title = parms[cn.PLT_TITLE] else: title = "%s Mutations" % SPECIES[self._species] xpos = int(len(df_plot)*0.5) ypos = MAX_LINES - 3 ax.text(xpos, ypos, title, fontsize=FONTSIZE_TITLE) ax.set_ylim([0, MAX_LINES]) if parms.isTrue(cn.PLT_YLABEL): if is_unit_fraction: label = "No. Lines" else: label = "Fraction" ax.set_ylabel(label , fontsize=FONTSIZE_LABEL) if parms.isTrue(cn.PLT_XLABEL): ax.set_xlabel(self._mutation_column, fontsize=FONTSIZE_LABEL) if parms.isTrue(cn.PLT_LEGEND): ax.legend(loc=(1,2)) #ax.legend() if is_plot: plt.show() return df_plot def _makeMutationSiglvlMatrix(self, transfer=cn.TRANSFER_DEFAULT, other_transfer=None, min_fraction=MIN_FRACTION): """ Creates a significance level matrix for mutations. :param int transfer: transfer time for row mutations :param int other_transfer: transfer time for column mutations :param float min_fraction: minimum fractional occurrence of a mutation within a line for it to be considered :return pd.DataFrame: row index and columns are mutations """ def makeDF(transfer): df_line = self.cofraction.makeLineDF(transfer=transfer) df_binary = df_line.applymap( lambda v: 0 if np.isnan(v) else v) df_binary = df_line.applymap( lambda v: 1.0 if v > min_fraction else 0) return df_binary.transpose() # if other_transfer is None: other_transfer = transfer # df_binary_rows = makeDF(transfer) df_binary_columns = makeDF(other_transfer) df_matrix = genome_correlation.makeSiglvlDF(df_binary_rows, df_other=df_binary_columns) return df_matrix def _plotSiglvlDF(self, transfer=cn.TRANSFER_DEFAULT, other_transfer=None, max_siglvl=MAX_SIGLVL): """ Constructs a the dataframe used for heatmap. :param int transfer: :param float max_siglvl: :return pd.DataFrame: mutations, mutations, values are -log10 significance level """ df_matrix = self._makeMutationSiglvlMatrix(transfer=transfer, other_transfer=other_transfer) sorter = DataframeSorter(df_matrix) df_sort = sorter.orderBoth() # df_transformed = df_sort.applymap(lambda v: np.log10(v)) df_transformed = df_transformed.applymap(lambda v: -v) ubound = -np.log10(max_siglvl) df_plot = df_transformed.applymap( lambda v: np.nan if v < ubound else v) sorter = DataframeSorter(df_plot) df_plot = sorter.deleteNanRowsAndColumns() return df_plot def plotCofractions(self, is_time_lag=False, threshold_frac=THRESHOLD_FRAC, is_difference_frac=False, is_differenced=False, is_compress=False, parms=PlotParms(), **kwargs): """ Does a subplots of the fraction of lines in which mutations co-occur. :param bool is_time_lag: construct time lag subplots :param bool is_differenced: Computes the difference in count fractions :param dict kwargs: non-transfer parameters passed to next level :return dict: key is pair of transfers, value is data_frame """ def funcDF(transfer, other_transfer): if is_differenced: df = self.cofraction.makeCofractionDifferencedDF( transfer=transfer, other_transfer=other_transfer, threshold_frac=threshold_frac) else: df = self.cofraction.makeCofractionDF(transfer=transfer, is_difference_frac=is_difference_frac, other_transfer=other_transfer) if is_compress: df.dropna(axis=0, how='all', inplace=True) df.dropna(axis=1, how='all', inplace=True) return df # return self._plotTransfers(funcDF, is_time_lag, parms=parms, heat_range=[0, 1.0], **kwargs) def plotSiglvls(self, is_time_lag=False, max_siglvl=MAX_SIGLVL, parms=PlotParms(), **kwargs): """ Does a subplots of mutation correlation significance levels. :param bool is_time_lag: construct time lag subplots :param dict kwargs: non-transfer parameters passed to next level :return dict: key is pair of transfers, value is data_frame """ def funcDF(transfer, other_transfer): return self._plotSiglvlDF(transfer=transfer, max_siglvl=max_siglvl, other_transfer=other_transfer) # return self._plotTransfers(funcDF, is_time_lag, parms=parms, heat_range = [COLORBAR_MIN, COLORBAR_MAX], **kwargs) def _plotTransfers(self, funcDF, is_time_lag, parms=PlotParms(), **kwargs): """ Does a subplots of mutation mutations over transfers. :param Function funcDF: has kwargs transfer, other_transfer; returns a dataframe of mutations as columns and index; values are used in the heatmap. :param bool is_time_lag: construct time lag subplots :param dict kwargs: non-transfer parameters passed to next level :return dict: key is pair of transfers, value is data_frame """ NCOLS = 3 plot_pos = {1:1, 2:3, 3:4, 4:6} NPLOTS = 6 transfers = self.cofraction.transfers if is_time_lag: pairs = [p for p in zip(transfers[:-1], transfers[1:])] else: pairs = [p for p in zip(transfers[:-1], transfers[:-1])] # # Calculate the column order df = funcDF(transfer=cn.TRANSFER_1000G, other_transfer=cn.TRANSFER_1000G) df = df.fillna(0) # Set up for plots nrows = 2 if (len(pairs) == 4) else 3 fig = plt.figure(figsize=parms[cn.PLT_FIGSIZE]) result = {} for idx, pair in enumerate(pairs): idx += 1 ax = fig.add_subplot(nrows, NCOLS, plot_pos[idx]) if idx < len(pairs): is_plot = False else: is_plot = True if idx in [1, 2, 5]: parms[cn.PLT_XAXISTICKTOP] = True else: parms[cn.PLT_XAXISTICKTOP] = False if idx == 4: parms[cn.PLT_COLORBAR] = True else: parms[cn.PLT_COLORBAR] = False transfer = pair[0] other_transfer = pair[1] df = funcDF(transfer=transfer, other_transfer=other_transfer) df = df.applymap(lambda v: np.nan if v == 0 else v) self._plotTransferCompare(df, transfer=transfer, other_transfer=other_transfer, ordered_columns=self.cofraction.ordered_mutations, is_center_colorbar=True, fig=fig, ax=ax, parms=parms, is_plot=is_plot, **kwargs) result[pair] = df return result def plotSiglvl(self, max_siglvl=MAX_SIGLVL, transfer=cn.TRANSFER_DEFAULT, other_transfer=None, is_center_colorbar = True, **kwargs): """ Constructs a heatmap of the mutation coocurrence significance levels. :param float max_siglvl: maximum significance level :return pd.DataFrame: columns, rows are mutations """ df_plot = self._plotSiglvlDF(transfer=transfer, other_transfer=other_transfer, max_siglvl=max_siglvl) self._plotTransferCompare(df_plot, heat_range = [COLORBAR_MIN, COLORBAR_MAX], ordered_mutations=self.cofraction.ordered_mutations, transfer=transfer, other_transfer=other_transfer, is_center_colorbar=is_center_colorbar, **kwargs) return df_plot def plotCofraction(self, threshold_frac=THRESHOLD_FRAC, transfer=cn.TRANSFER_DEFAULT, other_transfer=None, is_difference_frac=False, is_differenced=False, is_center_colorbar=True, is_compress=False, parms=PlotParms(), **kwargs): """ Constructs a heatmap of the mutation coocurrence fractions. :param int transfer: Transfer for which plot is done :param bool is_differenced: Computes the difference in count fractions :param bool is_compress: Eliminate rows/columns with 0 values :return pd.DataFrame: columns, rows are mutations """ if is_differenced: df = self.cofraction.makeCofractionDifferencedDF( threshold_frac=threshold_frac, transfer=transfer, other_transfer=other_transfer, **kwargs) df = df.applymap(lambda v: np.nan if np.abs(v) < threshold_frac else v) else: df = self.cofraction.makeCofractionDF(transfer=transfer, is_difference_frac=is_difference_frac, other_transfer=other_transfer, **kwargs) df = df.applymap(lambda v: np.nan if v < threshold_frac else v) if is_compress: df.dropna(axis=0, how='all', inplace=True) df.dropna(axis=1, how='all', inplace=True) is_include_missing_mutations = False else: is_include_missing_mutations = True ordered_columns = self.cofraction.ordered_mutations self._plotTransferCompare(df, heat_range=[0, 1.0], ordered_columns=ordered_columns, parms=parms, transfer=transfer, other_transfer=other_transfer, is_center_colorbar=is_center_colorbar, is_include_missing_mutations=is_include_missing_mutations, **kwargs) return df def _plotTransferCompare(self, df_plot, heat_range, ordered_columns=None, is_center_colorbar=True, transfer=cn.TRANSFER_DEFAULT, other_transfer=None, ax=None, fig=None, is_include_missing_mutations=True, parms=PlotParms(), is_plot=None): """ Constructs a heatmap comparing values for mutations from two transfers. :param pd.DataFrame df_plot: index and columns are mutations; values are plotted on the heatmap :param list-str ordered_columns: order in which columns appear :param bool is_center_colorbar: center the colorbar in the plot :param float, float: values on the heatmap range :param int transfer: :param int other_transfer: Allow comparisons across time :param Matplotlib.Axes ax: :param PlotParms parms: Parameters for the plot :param bool is_plot: Overrides constructor plotting directive :param bool is_include_missing_mutations: """ def makeLabel(transfer, column, is_include_column=False): if is_include_column: label = "%d-%s" % (transfer, column) else: label = "%d" % transfer return label def setValue(a_dict, key, default): if not key in a_dict.keys(): a_dict[key] = default # if is_plot is None: is_plot = self._is_plot elif not self._is_plot: is_plot = self._is_plot if ordered_columns is None: ordered_columns = list(set(df_plot.columns.tolist()).union( df_plot.index)) # Do the plot if not cn.PLT_COLORBAR in parms: parms[cn.PLT_COLORBAR] = True if other_transfer is None: other_transfer = transfer if ax is None: if fig is None: fig = plt.figure(figsize=parms[cn.PLT_FIGSIZE]) ax = fig.add_subplot(1, 1, 1) # Order the columns if is_include_missing_mutations: columns = df_plot.columns.tolist() missing_columns = set(ordered_columns).difference(columns) extended_ordered_columns = list(ordered_columns) extended_ordered_columns.extend( set(columns).difference(ordered_columns)) for col in missing_columns: df_plot[col] = np.nan df_plot.loc[col, :] = np.nan df_plot = df_plot.reindex(extended_ordered_columns) df_plot = df_plot[extended_ordered_columns] rows = df_plot.columns.tolist() columns = df_plot.columns.tolist() else: extended_ordered_columns = ordered_columns rows = df_plot.index.tolist() columns = df_plot.columns.tolist() mutations = df_plot.columns.tolist() # Set up plot information parms[cn.PLT_XLABEL] = "" setValue(parms, cn.PLT_COLORBAR, True) xpos = 1.05*len(columns) ypos = -0.05*len(rows) parms[cn.PLT_XLABEL] = "" xlabel = makeLabel(other_transfer, self._mutation_column) parms[cn.PLT_YLABEL] = makeLabel( transfer, self._mutation_column) ax.text(xpos, ypos, xlabel, fontsize=parms.fontsize_label) # # Construct the plot plot = ax.pcolor(df_plot, cmap='jet', vmin=heat_range[0], vmax=heat_range[1]) if parms.isTrue(cn.PLT_COLORBAR): if is_center_colorbar: # Colorbar positions: left, bottom, width, height cbaxes = fig.add_axes([.45, 0.2, 0.01, 0.5]) cb = fig.colorbar(plot, cax = cbaxes, cmap='jet') cb.ax.tick_params(labelsize=parms.fontsize_label) else: cb = fig.colorbar(plot, cmap='jet') cb.ax.tick_params(labelsize=parms.fontsize_label) row_labels = df_plot.columns.tolist() col_labels = df_plot.index.tolist() if parms.isTrue(cn.PLT_XAXISTICKTOP): ax.xaxis.tick_top() ax.set_xticks(np.arange(0.5, len(row_labels))) ax.set_xticklabels(row_labels, rotation=90, fontsize=parms.fontsize_label) ax.set_yticks(np.arange(0.5, len(col_labels))) ax.set_yticklabels(col_labels, fontsize=parms.fontsize_label) #parms[cn.PLT_YLABEL] = "" parms.do(is_plot=False) if is_plot: parms[cn.PLT_YLABEL] = "" parms.do(is_plot=False) ylabel = makeLabel(transfer, self._mutation_column) xpos = -3 ypos = 0.5*len(rows) ypos = -1 ax.set_ylabel(ylabel, fontsize=parms.fontsize_label, x=xpos, y=ypos) #plt.show() parms.do(is_plot=is_plot) else: parms.do(is_plot=is_plot)
2.34375
2
machine.py
yukti07/Dell_Hire_hack
0
448
import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score from flask import flash import numpy as np def check(X, clf): # print("TTTTTTTTTTTTThis is XXXXXXXXXXXX") # print(X) X = np.array(X) labelencoder_X_1 = LabelEncoder() X[:, 1] = labelencoder_X_1.fit_transform(X[:, 1]) labelencoder_X_2 = LabelEncoder() X[:, 2] = labelencoder_X_2.fit_transform(X[:, 2]) labelencoder_X_5 = LabelEncoder() X[:, 5] = labelencoder_X_5.fit_transform(X[:, 5]) labelencoder_X_6 = LabelEncoder() X[:, 6] = labelencoder_X_6.fit_transform(X[:, 6]) labelencoder_X_7 = LabelEncoder() X[:, 7] = labelencoder_X_7.fit_transform(X[:, 7]) labelencoder_X_9 = LabelEncoder() X[:, 9] = labelencoder_X_9.fit_transform(X[:, 9]) labelencoder_X_12 = LabelEncoder() X[:, 12] = labelencoder_X_12.fit_transform(X[:, 12]) p = clf.predict(X) t = () for x in p: if x == 0: a = 'No' else: a = 'Yes' t = t+(a,) return t def analyze(df, clf): feature_importances = pd.DataFrame(clf.feature_importances_, index=['Age', 'BusinessTravel', 'Department', 'DistanceFromHome', 'Education', 'EducationField', 'Gender', 'JobRole', 'JobSatisfaction', 'MaritalStatus', 'MonthlyIncome', 'NumCompaniesWorked', 'OverTime', 'PercentSalaryHike', 'YearsInCurrentRole', 'YearsSinceLastPromotion'],columns=['importance']).sort_values('importance',ascending=False) feature_importances['x1'] = feature_importances.index ax = feature_importances.plot.bar(x='x1', y='importance', rot=90) plt.savefig('templates/graphs/raw/feature_importances.png', frameon=True) intervals = [x for x in range(0, 22000, 2000)] categories = ['<'+str(x) for x in range(2000, 22000, 2000)] df1 = df df1['Income_Categories'] = pd.cut(df.MonthlyIncome, intervals, labels=categories) ax = sns.countplot(x="Income_Categories", hue="Attrition", palette="Set1", data=df1) ax.set(title="Monthly Income vs Attrition", xlabel="Income group", ylabel="Total") plt.xticks(rotation=-30) plt.savefig('templates/graphs/raw/MIvsAttr.png') intervals = [x for x in range(18,63,3)] categories = ['<'+str(x) for x in range(21,63,3)] df1 = df df1['Age_Categories'] = pd.cut(df.Age, intervals, labels=categories) ax = sns.countplot(x="Age_Categories", hue="Attrition", palette="Set1", data=df1) ax.set(title="Age vs Attrition", xlabel="Age group", ylabel="Total") plt.xticks(rotation=-30) plt.savefig('templates/graphs/raw/AgevsAttr.png') intervals = [x for x in range(0,32,2)] categories = ['<'+str(x) for x in range(2,32,2)] df1 = df df1['Distance_from_home'] = pd.cut(df.DistanceFromHome, intervals, labels=categories) ax = sns.countplot(x="Distance_from_home", hue="Attrition", palette="Set1", data=df1) ax.set(title="Distance from home vs Attrition", xlabel="Distance", ylabel="Total") plt.xticks(rotation=-30) plt.savefig('templates/graphs/raw/DistanceFromHomevsAttr.png') ax = sns.countplot(x="PercentSalaryHike", hue="Attrition", palette="Set1", data=df1) ax.set(title="Salary Hike Percentage vs Attrition", xlabel="Salary Hike Percentage", ylabel="Total") plt.savefig('templates/graphs/raw/PercentSalaryHikevsAttr.png') ax = sns.countplot(x="NumCompaniesWorked", hue="Attrition", palette="Set1", data=df1) ax.set(title="Number Of Previously Worked Companies vs Attrition", xlabel="Number Of Previously Worked Companies", ylabel="Total") plt.savefig('templates/graphs/raw/NPWCvsAttr.png') intervals = [x for x in range(0,22,2)] categories = ['<'+str(x) for x in range(2,22,2)] df1 = df df1['Current_Role'] = pd.cut(df.YearsInCurrentRole, intervals, labels=categories) ax = sns.countplot(x="Current_Role", hue="Attrition", palette="Set1", data=df1) ax.set(title="Number Of Years in Current Role vs Attrition", xlabel="Number Of Years in Current Role", ylabel="Total") plt.xticks(rotation=-30) plt.savefig('templates/graphs/raw/YICRvsAttr.png') ax = sns.countplot(x="OverTime", hue="Attrition", palette="Set1", data=df1) ax.set(title="Over Time vs Attrition", xlabel="Over Time", ylabel="Total") plt.savefig('templates/graphs/raw/OverTimevsAttr.png') ax = sns.countplot(x="JobRole", hue="Attrition", palette="Set1", data=df1) ax.set(title="Job Role vs Attrition", xlabel="Job Role", ylabel="Total") plt.xticks(rotation=70) plt.savefig('templates/graphs/raw/JobRolevsAttr.png') intervals = [x for x in range(0,18,2)] categories = ['<'+str(x) for x in range(2,18,2)] df1 = df df1['Promotion'] = pd.cut(df.YearsSinceLastPromotion, intervals, labels=categories) ax = sns.countplot(x="Promotion", hue="Attrition", palette="Set1", data=df1) ax.set(title="Number of Years since Promotion vs Attrition", xlabel="Number of Years since Promotion", ylabel="Total") plt.xticks(rotation=-30) plt.savefig('templates/graphs/raw/YSCPvsAttr.png') ax = sns.countplot(x="MaritalStatus", hue="Attrition", palette="Set1", data=df1) ax.set(title="Marital Status vs Attrition", xlabel="Marital Status", ylabel="Total") plt.savefig('templates/graphs/raw/MSvsAttr.png') def run(data): df = pd.read_csv('original_dataset.csv') skills = df['Skills'].tolist() # print("SKKKKKKKKKKKKKKKILLLLLLLLLLLLLLLS") # print(skills) df = df.drop(['DailyRate', 'EmployeeCount', 'YearsAtCompany', 'TotalWorkingYears', 'JobLevel', 'HourlyRate', 'MonthlyRate', 'Over18', 'StandardHours', 'EnvironmentSatisfaction', 'JobInvolvement', 'PerformanceRating', 'TrainingTimesLastYear', 'RelationshipSatisfaction', 'StockOptionLevel', 'WorkLifeBalance', 'YearsWithCurrManager'], axis=1) df = df[['Attrition', 'Age', 'BusinessTravel', 'Department', 'DistanceFromHome', 'Education', 'EducationField', 'Gender', 'JobRole', 'JobSatisfaction', 'MaritalStatus', 'MonthlyIncome', 'NumCompaniesWorked', 'OverTime', 'PercentSalaryHike', 'YearsInCurrentRole', 'YearsSinceLastPromotion']] #print("These re SKILSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS") #print(skills) X = df.iloc[:, 1:].values y = df.iloc[:, 0].values labelencoder_X_1 = LabelEncoder() X[:, 1] = labelencoder_X_1.fit_transform(X[:, 1]) labelencoder_X_2 = LabelEncoder() X[:, 2] = labelencoder_X_2.fit_transform(X[:, 2]) labelencoder_X_5 = LabelEncoder() X[:, 5] = labelencoder_X_5.fit_transform(X[:, 5]) labelencoder_X_6 = LabelEncoder() X[:, 6] = labelencoder_X_6.fit_transform(X[:, 6]) labelencoder_X_7 = LabelEncoder() X[:, 7] = labelencoder_X_7.fit_transform(X[:, 7]) labelencoder_X_9 = LabelEncoder() X[:, 9] = labelencoder_X_9.fit_transform(X[:, 9]) labelencoder_X_12 = LabelEncoder() X[:, 12] = labelencoder_X_12.fit_transform(X[:, 12]) X = X.astype(float) labelencoder_y = LabelEncoder() y = labelencoder_y.fit_transform(y) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.40,random_state=0) clf = RandomForestClassifier(n_estimators=200) clf.fit(X_train,y_train) p = clf.predict(X_test) acc = accuracy_score(y_test,p)*100 flash(acc) X = [list(elem) for elem in data] [r.pop(0) for r in X] #print("####### THIS IS XXXX##########") #print(X) att = check(X, clf) skills = skills[:(len(att)):] print("LLLLLLLLLLLLLLLENGHT" + str(len(att)) +" " + str(len(skills))) i = 0 for row in att: X[i].insert(0, row) i = i+1 df1 = pd.DataFrame(X) df1.columns=['Attrition', 'Age', 'BusinessTravel', 'Department', 'DistanceFromHome', 'Education', 'EducationField', 'Gender', 'JobRole', 'JobSatisfaction', 'MaritalStatus', 'MonthlyIncome', 'NumCompaniesWorked', 'OverTime', 'PercentSalaryHike', 'YearsInCurrentRole', 'YearsSinceLastPromotion'] analyze(df, clf) df1.to_csv('dataset1.csv') return att, skills
2.0625
2
scripts/get_file_name_as_variable.py
amin-henteti/airflow-dags
0
472
import inspect def foo(): print(inspect.stack()[0][3]) foo()
0.917969
1
examples/cmrc2018_example/main.trainer.py
fangd123/TextBrewer
1,121
480
import logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%Y/%m/%d %H:%M:%S', level=logging.INFO, ) logger = logging.getLogger("Main") import os,random import numpy as np import torch from processing import convert_examples_to_features, read_squad_examples from processing import ChineseFullTokenizer from pytorch_pretrained_bert.my_modeling import BertConfig from optimization import BERTAdam import config from utils import read_and_convert, divide_parameters from modeling import BertForQASimple, BertForQASimpleAdaptorTraining from textbrewer import DistillationConfig, TrainingConfig, BasicTrainer from torch.utils.data import TensorDataset, DataLoader, RandomSampler from functools import partial from train_eval import predict def args_check(args): if os.path.exists(args.output_dir) and os.listdir(args.output_dir): logger.warning("Output directory () already exists and is not empty.") if args.gradient_accumulation_steps < 1: raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format( args.gradient_accumulation_steps)) if not args.do_train and not args.do_predict: raise ValueError("At least one of `do_train` or `do_predict` must be True.") if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") n_gpu = torch.cuda.device_count() if not args.no_cuda else 0 else: device = torch.device("cuda", args.local_rank) n_gpu = 1 torch.distributed.init_process_group(backend='nccl') logger.info("device %s n_gpu %d distributed training %r", device, n_gpu, bool(args.local_rank != -1)) args.n_gpu = n_gpu args.device = device return device, n_gpu def main(): #parse arguments config.parse() args = config.args for k,v in vars(args).items(): logger.info(f"{k}:{v}") #set seeds torch.manual_seed(args.random_seed) torch.cuda.manual_seed_all(args.random_seed) np.random.seed(args.random_seed) random.seed(args.random_seed) #arguments check device, n_gpu = args_check(args) os.makedirs(args.output_dir, exist_ok=True) forward_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps) args.forward_batch_size = forward_batch_size #load bert config bert_config_S = BertConfig.from_json_file(args.bert_config_file_S) assert args.max_seq_length <= bert_config_S.max_position_embeddings #read data train_examples = None train_features = None eval_examples = None eval_features = None num_train_steps = None tokenizer = ChineseFullTokenizer(vocab_file=args.vocab_file, do_lower_case=args.do_lower_case) convert_fn = partial(convert_examples_to_features, tokenizer=tokenizer, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length) if args.do_train: train_examples,train_features = read_and_convert(args.train_file,is_training=True, do_lower_case=args.do_lower_case, read_fn=read_squad_examples,convert_fn=convert_fn) if args.fake_file_1: fake_examples1,fake_features1 = read_and_convert(args.fake_file_1,is_training=True, do_lower_case=args.do_lower_case, read_fn=read_squad_examples,convert_fn=convert_fn) train_examples += fake_examples1 train_features += fake_features1 if args.fake_file_2: fake_examples2, fake_features2 = read_and_convert(args.fake_file_2,is_training=True, do_lower_case=args.do_lower_case, read_fn=read_squad_examples,convert_fn=convert_fn) train_examples += fake_examples2 train_features += fake_features2 num_train_steps = int(len(train_features)/args.train_batch_size) * args.num_train_epochs if args.do_predict: eval_examples,eval_features = read_and_convert(args.predict_file,is_training=False, do_lower_case=args.do_lower_case, read_fn=read_squad_examples,convert_fn=convert_fn) #Build Model and load checkpoint model_S = BertForQASimple(bert_config_S,args) #Load student if args.load_model_type=='bert': assert args.init_checkpoint_S is not None state_dict_S = torch.load(args.init_checkpoint_S, map_location='cpu') state_weight = {k[5:]:v for k,v in state_dict_S.items() if k.startswith('bert.')} missing_keys,_ = model_S.bert.load_state_dict(state_weight,strict=False) assert len(missing_keys)==0 elif args.load_model_type=='all': assert args.tuned_checkpoint_S is not None state_dict_S = torch.load(args.tuned_checkpoint_S,map_location='cpu') model_S.load_state_dict(state_dict_S) else: logger.info("Model is randomly initialized.") model_S.to(device) if args.local_rank != -1 or n_gpu > 1: if args.local_rank != -1: raise NotImplementedError elif n_gpu > 1: model_S = torch.nn.DataParallel(model_S) #,output_device=n_gpu-1) if args.do_train: #parameters params = list(model_S.named_parameters()) all_trainable_params = divide_parameters(params, lr=args.learning_rate) logger.info("Length of all_trainable_params: %d", len(all_trainable_params)) optimizer = BERTAdam(all_trainable_params,lr=args.learning_rate, warmup=args.warmup_proportion,t_total=num_train_steps,schedule=args.schedule, s_opt1=args.s_opt1, s_opt2=args.s_opt2, s_opt3=args.s_opt3) logger.info("***** Running training *****") logger.info(" Num orig examples = %d", len(train_examples)) logger.info(" Num split examples = %d", len(train_features)) logger.info(" Forward batch size = %d", forward_batch_size) logger.info(" Num backward steps = %d", num_train_steps) ########### DISTILLATION ########### train_config = TrainingConfig( gradient_accumulation_steps = args.gradient_accumulation_steps, ckpt_frequency = args.ckpt_frequency, log_dir = args.output_dir, output_dir = args.output_dir, device = args.device) distiller = BasicTrainer(train_config = train_config, model = model_S, adaptor = BertForQASimpleAdaptorTraining) all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long) all_doc_mask = torch.tensor([f.doc_mask for f in train_features], dtype=torch.float) all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long) all_start_positions = torch.tensor([f.start_position for f in train_features], dtype=torch.long) all_end_positions = torch.tensor([f.end_position for f in train_features], dtype=torch.long) train_dataset = TensorDataset(all_input_ids, all_segment_ids, all_input_mask, all_doc_mask, all_start_positions, all_end_positions) if args.local_rank == -1: train_sampler = RandomSampler(train_dataset) else: raise NotImplementedError train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.forward_batch_size,drop_last=True) callback_func = partial(predict, eval_examples=eval_examples, eval_features=eval_features, args=args) with distiller: distiller.train(optimizer, scheduler=None, dataloader=train_dataloader, num_epochs=args.num_train_epochs, callback=callback_func) if not args.do_train and args.do_predict: res = predict(model_S,eval_examples,eval_features,step=0,args=args) print (res) if __name__ == "__main__": main()
1.765625
2
tests/test_models/test_components/test_discriminators/test_light_cnn.py
ChenShuwei1001/mmediting
0
488
import pytest import torch from mmedit.models.builder import build_component from mmedit.models.components.discriminators.light_cnn import MaxFeature def test_max_feature(): # cpu conv2d = MaxFeature(16, 16, filter_type='conv2d') x1 = torch.rand(3, 16, 16, 16) y1 = conv2d(x1) assert y1.shape == (3, 16, 16, 16) linear = MaxFeature(16, 16, filter_type='linear') x2 = torch.rand(3, 16) y2 = linear(x2) assert y2.shape == (3, 16) # gpu if torch.cuda.is_available(): x1 = x1.cuda() x2 = x2.cuda() conv2d = conv2d.cuda() linear = linear.cuda() y1 = conv2d(x1) assert y1.shape == (3, 16, 16, 16) y2 = linear(x2) assert y2.shape == (3, 16) # filter_type should be conv2d or linear with pytest.raises(ValueError): MaxFeature(12, 12, filter_type='conv1d') def test_light_cnn(): cfg = dict(type='LightCNN', in_channels=3) net = build_component(cfg) net.init_weights(pretrained=None) # cpu inputs = torch.rand((2, 3, 128, 128)) output = net(inputs) assert output.shape == (2, 1) # gpu if torch.cuda.is_available(): net.init_weights(pretrained=None) net = net.cuda() output = net(inputs.cuda()) assert output.shape == (2, 1) # pretrained should be str or None with pytest.raises(TypeError): net.init_weights(pretrained=[1])
1.492188
1
basic_stats.py/basic_stats.py
RahmB/basic_stats
0
520
# Import the matplotlib module here. No other modules should be used. # Import plotting library import matplotlib.pyplot as plt #import.... from os import * # Import Numpy import numpy as np def mean(my_list): # This is the defintion in the head. i = 0 my_sum = 0 for number in my_list: my_sum = my_sum + my_list[i] i+=1 mu = my_sum / i print('mean = ' + str(mu)) return mu def sd(my_list): j = 0 sigma = 0 my_sumsd = 0 mu = mean(my_list) for number in my_list: my_sumsd = my_sumsd + (my_list[j] - mu)**2 j +=1 sigma = (my_sumsd/j)**(.5) print('standard deviation = ' + str(sigma)) return sigma def norm(my_list): k = 0 l = 0 mu = mean(my_list) sigma = sd(my_list) for number in my_list: if abs(my_list[l] - mu) < sigma: k += 1 l += 1 else: l += 1 dist = k / l return dist def is_norm(my_list): dist = norm(my_list) if 0.66 < dist < 0.70: print('Data is normally distributed') return True else: print('Data is not normally distributed') return False def is_skew(my_list): m = 0 skew = 0 sumsk = 0 mu = mean(my_list) sigma = sd(my_list) for numbers in my_list: sumsk = (my_list[m] - mu)**3 + sumsk m +=1 skew = sumsk /(len(my_list)*sigma**3) print('skewness = ' + str(skew)) if skew == 0: print('skewness = 0, therefore sample is normally distributed') else: print('skewness =/= 0, therefore sample is not normally distributed') def graph(my_list): plt.hist(my_list,density=True, facecolor='b') sigma = sd(my_list) #stores standard deviation mu = mean(my_list) #stores mean plt.title('my_list Histogram') plt.xlabel('Number') plt.ylabel('Probability') plt.xlim(mu - 4*sigma, mu + 4*sigma) plt.grid(True) plt.show() def stats(my_list): mu = mean(my_list) std = sd(my_list) dist = norm(my_list) graph(my_list) is_norm(my_list) is_skew(my_list) return (mu, std, dist)
3.03125
3
starry/_core/ops/lib/include/oblate/tests/test_derivs.py
rodluger/starry
116
560
import oblate import numpy as np import pytest # TODO!
0.445313
0
process_script/stat.py
vitorebatista/AVEMH
2
616
import numpy as np import pandas as pd import sys markets = ["hangseng", "dax", "ftse", "sp", "nikkei"] market = markets[int(sys.argv[1])-1] # read GD data file dat = pd.read_csv("./num_res/{}.GD.csv".format(market)) # split into two experiments exp1_GD = dat[dat.columns[:5]] exp2_GD = dat[dat.columns[5:]] # calculate statistics stat1_GD = pd.DataFrame([exp1_GD.min(), exp1_GD.median(), exp1_GD.std()]) stat1_GD.index = ["Best", "Median", "Std."] stat2_GD = pd.DataFrame([exp2_GD.min(), exp2_GD.median(), exp2_GD.std()]) stat2_GD.index = ["Best", "Median", "Std."] # find best and second best algorithm meds1_GD = stat1_GD.loc["Median"].sort_values() best1_GD = list(meds1_GD.index[:2]) meds2_GD = stat2_GD.loc["Median"].sort_values() best2_GD = list(meds2_GD.index[:2]) print("{}.GD:".format(market), best1_GD[0], best1_GD[1]) # print("{}.GD:".format(market), best2_GD[0], best2_GD[1]) # TODO: check error # read Spacing data file dat = pd.read_csv("./num_res/{}.Spacing.csv".format(market)) # split into two experiments exp1_Spacing = dat[dat.columns[:5]] exp2_Spacing = dat[dat.columns[5:]] # calculate statistics stat1_Spacing = pd.DataFrame( [exp1_Spacing.min(), exp1_Spacing.median(), exp1_Spacing.std()]) stat1_Spacing.index = ["Best", "Median", "Std."] stat2_Spacing = pd.DataFrame( [exp2_Spacing.min(), exp2_Spacing.median(), exp2_Spacing.std()]) stat2_Spacing.index = ["Best", "Median", "Std."] # find best and second best algorithm meds1_Spacing = stat1_Spacing.loc["Median"].sort_values() best1_Spacing = list(meds1_Spacing.index[:2]) meds2_Spacing = stat2_Spacing.loc["Median"].sort_values() best2_Spacing = list(meds2_Spacing.index[:2]) print("{}.Spacing:".format(market), best1_Spacing[0], best1_Spacing[1]) # print("{}.Spacing:".format(market), best2_Spacing[0], best2_Spacing[1]) # TODO: check error # read MaxSpread data file dat = pd.read_csv("./num_res/{}.MaxSpread.csv".format(market)) # split into two experiments exp1_MaxSpread = dat[dat.columns[:5]] exp2_MaxSpread = dat[dat.columns[5:]] # calculate statistics stat1_MaxSpread = pd.DataFrame( [exp1_MaxSpread.max(), exp1_MaxSpread.median(), exp1_MaxSpread.std()]) stat1_MaxSpread.index = ["Best", "Median", "Std."] stat2_MaxSpread = pd.DataFrame( [exp2_MaxSpread.max(), exp2_MaxSpread.median(), exp2_MaxSpread.std()]) stat2_MaxSpread.index = ["Best", "Median", "Std."] # find best and second best algorithm meds1_MaxSpread = stat1_MaxSpread.loc["Median"].sort_values(ascending=False) best1_MaxSpread = list(meds1_MaxSpread.index[:2]) meds2_MaxSpread = stat2_MaxSpread.loc["Median"].sort_values(ascending=False) best2_MaxSpread = list(meds2_MaxSpread.index[:2]) print("{}.MaxSpread:".format(market), best1_MaxSpread[0], best1_MaxSpread[1]) # print("{}.MaxSpread:".format(market), best2_MaxSpread[0], best2_MaxSpread[1]) # TODO: check error # read Delta data file dat = pd.read_csv("./num_res/{}.Delta.csv".format(market)) # split into two experiments exp1_Delta = dat[dat.columns[:5]] exp2_Delta = dat[dat.columns[5:]] # calculate statistics stat1_Delta = pd.DataFrame( [exp1_Delta.min(), exp1_Delta.median(), exp1_Delta.std()]) stat1_Delta.index = ["Best", "Median", "Std."] stat2_Delta = pd.DataFrame( [exp2_Delta.min(), exp2_Delta.median(), exp2_Delta.std()]) stat2_Delta.index = ["Best", "Median", "Std."] # find best and second best algorithm meds1_Delta = stat1_Delta.loc["Median"].sort_values() best1_Delta = list(meds1_Delta.index[:2]) meds2_Delta = stat2_Delta.loc["Median"].sort_values() best2_Delta = list(meds2_Delta.index[:2]) print("{}.Delta:".format(market), best1_Delta[0], best1_Delta[1]) # print("{}.Delta:".format(market), best2_Delta[0], best2_Delta[1]) # TODO: check error # read IGD data file dat = pd.read_csv("./num_res/{}.IGD.csv".format(market)) # split into two experiments exp1_IGD = dat[dat.columns[:5]] exp2_IGD = dat[dat.columns[5:]] # calculate statistics stat1_IGD = pd.DataFrame([exp1_IGD.min(), exp1_IGD.median(), exp1_IGD.std()]) stat1_IGD.index = ["Best", "Median", "Std."] stat2_IGD = pd.DataFrame([exp2_IGD.min(), exp2_IGD.median(), exp2_IGD.std()]) stat2_IGD.index = ["Best", "Median", "Std."] # find best and second best algorithm meds1_IGD = stat1_IGD.loc["Median"].sort_values() best1_IGD = list(meds1_IGD.index[:2]) meds2_IGD = stat2_IGD.loc["Median"].sort_values() best2_IGD = list(meds2_IGD.index[:2]) print("{}.IGD:".format(market), best1_IGD[0], best1_IGD[1]) # print("{}.IGD:".format(market), best2_IGD[0], best2_IGD[1]) # TODO: check error # read Hypervolume data file dat = pd.read_csv("./num_res/{}.Hypervolume.csv".format(market)) # split into two experiments exp1_Hypervolume = dat[dat.columns[:5]] exp2_Hypervolume = dat[dat.columns[5:]] # calculate statistics stat1_Hypervolume = pd.DataFrame( [exp1_Hypervolume.max(), exp1_Hypervolume.median(), exp1_Hypervolume.std()]) stat1_Hypervolume.index = ["Best", "Median", "Std."] stat2_Hypervolume = pd.DataFrame( [exp2_Hypervolume.max(), exp2_Hypervolume.median(), exp2_Hypervolume.std()]) stat2_Hypervolume.index = ["Best", "Median", "Std."] # find best and second best algorithm meds1_Hypervolume = stat1_Hypervolume.loc["Median"].sort_values( ascending=False) best1_Hypervolume = list(meds1_Hypervolume.index[:2]) meds2_Hypervolume = stat2_Hypervolume.loc["Median"].sort_values( ascending=False) best2_Hypervolume = list(meds2_Hypervolume.index[:2]) print("{}.Hypervolume:".format(market), best1_Hypervolume[0], best1_Hypervolume[1]) # print("{}.Hypervolume:".format(market), # best2_Hypervolume[0], best2_Hypervolume[1]) # TODO: check error print("{}\n----------------------------------------------".format(market)) pd.options.display.float_format = '{:.2e}'.format stat1_overall = pd.concat( [stat1_GD, stat1_Spacing, stat1_MaxSpread, stat1_Delta, stat1_IGD, stat1_Hypervolume]) stat2_overall = pd.concat( [stat2_GD, stat2_Spacing, stat2_MaxSpread, stat2_Delta, stat2_IGD, stat2_Hypervolume]) arrays = [["GD", "GD", "GD", "Spacing", "Spacing", "Spacing", "MaxSpread", "MaxSpread", "MaxSpread", "Delta", "Delta", "Delta", "IGD", "IGD", "IGD", "Hypervolume", "Hypervolume", "Hypervolume"], stat1_overall.index ] index = pd.MultiIndex.from_arrays(arrays, names=["Metric", ""]) stat1_overall.index = index stat2_overall.index = index print(stat1_overall) print("----------------------------------------------") print(stat2_overall)
2.421875
2
app/rss_feeder_api/migrations/0003_auto_20200813_1623.py
RSaab/rss-scraper
0
632
# Generated by Django 3.1 on 2020-08-13 16:23 from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('rss_feeder_api', '0002_feed_subtitle'), ] operations = [ migrations.AlterModelOptions( name='entry', options={'ordering': ('-updated_at',), 'verbose_name_plural': 'entries'}, ), migrations.AlterModelOptions( name='feed', options={'ordering': ('-updated_at',), 'verbose_name': 'Feed', 'verbose_name_plural': 'Feeds'}, ), migrations.AddField( model_name='entry', name='created_at', field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now), preserve_default=False, ), migrations.AddField( model_name='entry', name='updated_at', field=models.DateTimeField(auto_now=True), ), migrations.AlterUniqueTogether( name='entry', unique_together={('guid',)}, ), ]
1.015625
1
httprunner/compat.py
panyuan209/httprunner
0
656
""" This module handles compatibility issues between testcase format v2 and v3. 解决httprunner2 和 3 之间测试用例兼容性问题 """ import os import sys from typing import List, Dict, Text, Union, Any from loguru import logger from httprunner import exceptions from httprunner.loader import load_project_meta, convert_relative_project_root_dir from httprunner.parser import parse_data from httprunner.utils import sort_dict_by_custom_order def convert_variables( raw_variables: Union[Dict, List, Text], test_path: Text ) -> Dict[Text, Any]: if isinstance(raw_variables, Dict): return raw_variables if isinstance(raw_variables, List): # [{"var1": 1}, {"var2": 2}] variables: Dict[Text, Any] = {} for var_item in raw_variables: if not isinstance(var_item, Dict) or len(var_item) != 1: raise exceptions.TestCaseFormatError( f"Invalid variables format: {raw_variables}" ) variables.update(var_item) return variables elif isinstance(raw_variables, Text): # get variables by function, e.g. ${get_variables()} project_meta = load_project_meta(test_path) variables = parse_data(raw_variables, {}, project_meta.functions) return variables else: raise exceptions.TestCaseFormatError( f"Invalid variables format: {raw_variables}" ) def _convert_jmespath(raw: Text) -> Text: if not isinstance(raw, Text): raise exceptions.TestCaseFormatError(f"Invalid jmespath extractor: {raw}") # content.xx/json.xx => body.xx if raw.startswith("content"): raw = f"body{raw[len('content'):]}" elif raw.startswith("json"): raw = f"body{raw[len('json'):]}" raw_list = [] for item in raw.split("."): if "-" in item: # add quotes for field with separator # e.g. headers.Content-Type => headers."Content-Type" item = item.strip('"') raw_list.append(f'"{item}"') elif item.isdigit(): # convert lst.0.name to lst[0].name if len(raw_list) == 0: logger.error(f"Invalid jmespath: {raw}") sys.exit(1) last_item = raw_list.pop() item = f"{last_item}[{item}]" raw_list.append(item) else: raw_list.append(item) return ".".join(raw_list) def _convert_extractors(extractors: Union[List, Dict]) -> Dict: """ convert extract list(v2) to dict(v3) Args: extractors: [{"varA": "content.varA"}, {"varB": "json.varB"}] Returns: {"varA": "body.varA", "varB": "body.varB"} """ v3_extractors: Dict = {} if isinstance(extractors, List): # [{"varA": "content.varA"}, {"varB": "json.varB"}] for extractor in extractors: if not isinstance(extractor, Dict): logger.error(f"Invalid extractor: {extractors}") sys.exit(1) for k, v in extractor.items(): v3_extractors[k] = v elif isinstance(extractors, Dict): # {"varA": "body.varA", "varB": "body.varB"} v3_extractors = extractors else: logger.error(f"Invalid extractor: {extractors}") sys.exit(1) for k, v in v3_extractors.items(): v3_extractors[k] = _convert_jmespath(v) return v3_extractors def _convert_validators(validators: List) -> List: for v in validators: if "check" in v and "expect" in v: # format1: {"check": "content.abc", "assert": "eq", "expect": 201} v["check"] = _convert_jmespath(v["check"]) elif len(v) == 1: # format2: {'eq': ['status_code', 201]} comparator = list(v.keys())[0] v[comparator][0] = _convert_jmespath(v[comparator][0]) return validators def _sort_request_by_custom_order(request: Dict) -> Dict: custom_order = [ "method", "url", "params", "headers", "cookies", "data", "json", "files", "timeout", "allow_redirects", "proxies", "verify", "stream", "auth", "cert", ] return sort_dict_by_custom_order(request, custom_order) def _sort_step_by_custom_order(step: Dict) -> Dict: custom_order = [ "name", "variables", "request", "testcase", "setup_hooks", "teardown_hooks", "extract", "validate", "validate_script", ] return sort_dict_by_custom_order(step, custom_order) def _ensure_step_attachment(step: Dict) -> Dict: test_dict = { "name": step["name"], } if "variables" in step: test_dict["variables"] = step["variables"] if "setup_hooks" in step: test_dict["setup_hooks"] = step["setup_hooks"] if "teardown_hooks" in step: test_dict["teardown_hooks"] = step["teardown_hooks"] if "extract" in step: test_dict["extract"] = _convert_extractors(step["extract"]) if "export" in step: test_dict["export"] = step["export"] if "validate" in step: if not isinstance(step["validate"], List): raise exceptions.TestCaseFormatError( f'Invalid teststep validate: {step["validate"]}' ) test_dict["validate"] = _convert_validators(step["validate"]) if "validate_script" in step: test_dict["validate_script"] = step["validate_script"] return test_dict def ensure_testcase_v3_api(api_content: Dict) -> Dict: logger.info("convert api in v2 to testcase format v3") teststep = { "request": _sort_request_by_custom_order(api_content["request"]), } teststep.update(_ensure_step_attachment(api_content)) teststep = _sort_step_by_custom_order(teststep) config = {"name": api_content["name"]} extract_variable_names: List = list(teststep.get("extract", {}).keys()) if extract_variable_names: config["export"] = extract_variable_names return { "config": config, "teststeps": [teststep], } def ensure_testcase_v3(test_content: Dict) -> Dict: logger.info("ensure compatibility with testcase format v2") v3_content = {"config": test_content["config"], "teststeps": []} if "teststeps" not in test_content: logger.error(f"Miss teststeps: {test_content}") sys.exit(1) if not isinstance(test_content["teststeps"], list): logger.error( f'teststeps should be list type, got {type(test_content["teststeps"])}: {test_content["teststeps"]}' ) sys.exit(1) for step in test_content["teststeps"]: teststep = {} if "request" in step: teststep["request"] = _sort_request_by_custom_order(step.pop("request")) elif "api" in step: teststep["testcase"] = step.pop("api") elif "testcase" in step: teststep["testcase"] = step.pop("testcase") else: raise exceptions.TestCaseFormatError(f"Invalid teststep: {step}") teststep.update(_ensure_step_attachment(step)) teststep = _sort_step_by_custom_order(teststep) v3_content["teststeps"].append(teststep) return v3_content def ensure_cli_args(args: List) -> List: """ ensure compatibility with deprecated cli args in v2 """ # remove deprecated --failfast if "--failfast" in args: logger.warning(f"remove deprecated argument: --failfast") args.pop(args.index("--failfast")) # convert --report-file to --html if "--report-file" in args: logger.warning(f"replace deprecated argument --report-file with --html") index = args.index("--report-file") args[index] = "--html" args.append("--self-contained-html") # keep compatibility with --save-tests in v2 if "--save-tests" in args: logger.warning( f"generate conftest.py keep compatibility with --save-tests in v2" ) args.pop(args.index("--save-tests")) _generate_conftest_for_summary(args) return args def _generate_conftest_for_summary(args: List): for arg in args: if os.path.exists(arg): test_path = arg # FIXME: several test paths maybe specified break else: logger.error(f"No valid test path specified! \nargs: {args}") sys.exit(1) conftest_content = '''# NOTICE: Generated By HttpRunner. import json import os import time import pytest from loguru import logger from httprunner.utils import get_platform, ExtendJSONEncoder @pytest.fixture(scope="session", autouse=True) def session_fixture(request): """setup and teardown each task""" logger.info(f"start running testcases ...") start_at = time.time() yield logger.info(f"task finished, generate task summary for --save-tests") summary = { "success": True, "stat": { "testcases": {"total": 0, "success": 0, "fail": 0}, "teststeps": {"total": 0, "failures": 0, "successes": 0}, }, "time": {"start_at": start_at, "duration": time.time() - start_at}, "platform": get_platform(), "details": [], } for item in request.node.items: testcase_summary = item.instance.get_summary() summary["success"] &= testcase_summary.success summary["stat"]["testcases"]["total"] += 1 summary["stat"]["teststeps"]["total"] += len(testcase_summary.step_datas) if testcase_summary.success: summary["stat"]["testcases"]["success"] += 1 summary["stat"]["teststeps"]["successes"] += len( testcase_summary.step_datas ) else: summary["stat"]["testcases"]["fail"] += 1 summary["stat"]["teststeps"]["successes"] += ( len(testcase_summary.step_datas) - 1 ) summary["stat"]["teststeps"]["failures"] += 1 testcase_summary_json = testcase_summary.dict() testcase_summary_json["records"] = testcase_summary_json.pop("step_datas") summary["details"].append(testcase_summary_json) summary_path = r"{{SUMMARY_PATH_PLACEHOLDER}}" summary_dir = os.path.dirname(summary_path) os.makedirs(summary_dir, exist_ok=True) with open(summary_path, "w", encoding="utf-8") as f: json.dump(summary, f, indent=4, ensure_ascii=False, cls=ExtendJSONEncoder) logger.info(f"generated task summary: {summary_path}") ''' project_meta = load_project_meta(test_path) project_root_dir = project_meta.RootDir conftest_path = os.path.join(project_root_dir, "conftest.py") test_path = os.path.abspath(test_path) logs_dir_path = os.path.join(project_root_dir, "logs") test_path_relative_path = convert_relative_project_root_dir(test_path) if os.path.isdir(test_path): file_foder_path = os.path.join(logs_dir_path, test_path_relative_path) dump_file_name = "all.summary.json" else: file_relative_folder_path, test_file = os.path.split(test_path_relative_path) file_foder_path = os.path.join(logs_dir_path, file_relative_folder_path) test_file_name, _ = os.path.splitext(test_file) dump_file_name = f"{test_file_name}.summary.json" summary_path = os.path.join(file_foder_path, dump_file_name) conftest_content = conftest_content.replace( "{{SUMMARY_PATH_PLACEHOLDER}}", summary_path ) dir_path = os.path.dirname(conftest_path) if not os.path.exists(dir_path): os.makedirs(dir_path) with open(conftest_path, "w", encoding="utf-8") as f: f.write(conftest_content) logger.info("generated conftest.py to generate summary.json") def ensure_path_sep(path: Text) -> Text: """ ensure compatibility with different path separators of Linux and Windows """ if "/" in path: path = os.sep.join(path.split("/")) if "\\" in path: path = os.sep.join(path.split("\\")) return path
1.75
2
tests/test_mr_uplift.py
Ibotta/mr_uplift
48
680
import numpy as np import pytest from mr_uplift.dataset.data_simulation import get_no_noise_data, get_simple_uplift_data, get_observational_uplift_data_1 from mr_uplift.mr_uplift import MRUplift, get_t_data from mr_uplift.keras_model_functionality import prepare_data_optimized_loss import sys import pandas as pd class TestMRUplift(object): def test_get_t_data(self): num_obs_1 = 10 num_obs_2 = 3 test_1 = get_t_data(0, num_obs_1) test_2 = get_t_data(np.array([0, 1]), num_obs_2) test_1_values = np.zeros(num_obs_1).reshape(-1, 1) test_2_values = np.concatenate([np.zeros(num_obs_2).reshape(-1, 1), np.ones(num_obs_2).reshape(-1, 1)], axis=1) assert np.mean(test_1 == test_1_values) == 1 assert np.mean(test_2 == test_2_values) == 1 def test_model_mean_outputs(self): true_ATE = np.array([[0, 0], [1, .5]]) rmse_tolerance = .05 num_obs = 10000 y_no_noise, x_no_noise, tmt_no_noise = get_no_noise_data(num_obs) uplift_model = MRUplift() uplift_model.fit(x_no_noise, y_no_noise, tmt_no_noise.reshape(-1, 1), n_jobs=1) oos_ice = uplift_model.predict_ice(response_transformer = True) assert np.sqrt(np.mean((oos_ice.mean(axis=1) -true_ATE)**2)) < rmse_tolerance def test_model_pred_oos_shapes(self): num_obs = 1000 y, x, t = get_simple_uplift_data(num_obs) t = np.concatenate([t.reshape(-1, 1), np.random.binomial(1, .5, num_obs).reshape(-1, 1)], axis=1) param_grid = dict(num_nodes=[8], dropout=[.1], activation=[ 'relu'], num_layers=[1], epochs=[1], batch_size=[1000]) uplift_model = MRUplift() uplift_model.fit(x, y, t, param_grid = param_grid, n_jobs=1) x_1 = x.copy() x_1 = pd.DataFrame(x_1) x_1.columns = ['var_'+str(x) for x in range(x.shape[1])] y_1 = y.copy() y_1 = pd.DataFrame(y_1) y_1.columns = ['var_'+str(x) for x in range(y.shape[1])] uplift_model_named = MRUplift() uplift_model_named.fit(x_1, y_1, t, param_grid = param_grid, n_jobs=1) assert uplift_model.predict_ice().shape == ( np.unique(t, axis=0).shape[0], num_obs * .7, y.shape[1]) assert uplift_model.predict_ice(x=x).shape == (np.unique(t,axis=0).shape[0], num_obs, y.shape[1]) assert uplift_model.get_erupt_curves() assert uplift_model.get_erupt_curves(x = x, y = y, t = t) assert uplift_model_named.get_erupt_curves() def test_model_pred_oos_shapes_single_col_tmt(self): num_obs = 1000 y, x, t = get_simple_uplift_data(num_obs) t = t.reshape(-1, 1) param_grid = dict(num_nodes=[8], dropout=[.1], activation=[ 'relu'], num_layers=[1], epochs=[1], batch_size=[1000]) uplift_model = MRUplift() uplift_model.fit(x, y, t, param_grid = param_grid, n_jobs=1) assert uplift_model.predict_ice().shape == ( np.unique(t, axis=0).shape[0], num_obs * .7, y.shape[1]) assert uplift_model.predict_ice(x=x).shape == (np.unique(t,axis=0).shape[0], num_obs, y.shape[1]) assert uplift_model.get_erupt_curves() assert uplift_model.get_erupt_curves(x = x, y = y, t = t) def test_model_pred_oos_shapes_single_col_tmt_propensity(self): num_obs = 1000 y, x, t = get_simple_uplift_data(num_obs) t = t.reshape(-1, 1) param_grid = dict(num_nodes=[8], dropout=[.1], activation=['relu'], num_layers=[2], epochs=[1], batch_size=[100], alpha = [.5], copy_several_times = [1]) uplift_model = MRUplift() uplift_model.fit(x, y, t, param_grid = param_grid, n_jobs=1, optimized_loss = True, use_propensity = True) assert uplift_model.predict_ice().shape == ( np.unique(t, axis=0).shape[0], num_obs * .7, y.shape[1]) assert uplift_model.predict_ice(x=x).shape == (np.unique(t,axis=0).shape[0], num_obs, y.shape[1]) assert uplift_model.get_erupt_curves() assert uplift_model.get_erupt_curves(x = x, y = y, t = t) def test_prepare_data_optimized_loss_one_col_tmt(self): num_obs = 1000 y, x, t = get_simple_uplift_data(num_obs) t = t.reshape(len(t),1) unique_treatments = np.unique(t, axis = 0) masks = np.ones(num_obs).reshape(num_obs,1) x, utility_weights, missing_utility, missing_y_mat, masks, weights = prepare_data_optimized_loss(x,y,t, masks ,unique_treatments) assert(utility_weights.shape == (num_obs, y.shape[1])) assert(missing_y_mat.shape == (num_obs, unique_treatments.shape[0], y.shape[1])) for q in range(unique_treatments.shape[0]): assert( ((missing_utility[:,q]==0) == (missing_y_mat[:,q,0] == -999)).mean() ==1 ) def test_prepare_data_optimized_loss_two_col_tmt(self): num_obs = 1000 y, x, t = get_simple_uplift_data(num_obs) t = np.concatenate([t.reshape(-1, 1), np.random.binomial(1, .5, num_obs).reshape(-1, 1)], axis=1) unique_treatments = np.unique(t, axis = 0) masks = np.ones(num_obs*len(unique_treatments)).reshape(num_obs,len(unique_treatments)) x, utility_weights, missing_utility, missing_y_mat, masks, weights = prepare_data_optimized_loss(x,y,t,masks, unique_treatments) assert(utility_weights.shape == (num_obs, y.shape[1])) assert(missing_y_mat.shape == (num_obs, unique_treatments.shape[0], y.shape[1])) for q in range(unique_treatments.shape[0]): assert( ((missing_utility[:,q]==0) == (missing_y_mat[:,q,0] == -999)).mean() ==1 ) def test_model_optim_mean_outputs(self): true_ATE = np.array([[0, 0], [1, .5]]) rmse_tolerance = .05 num_obs = 10000 param_grid = dict(num_nodes=[8], dropout=[.1], activation=['relu'], num_layers=[2], epochs=[30], batch_size=[100]) y_no_noise, x_no_noise, tmt_no_noise = get_no_noise_data(num_obs) uplift_model = MRUplift() uplift_model.fit(x_no_noise, y_no_noise, tmt_no_noise.reshape(-1, 1), n_jobs=1, param_grid = param_grid, optimized_loss = False) oos_ice = uplift_model.predict_ice(response_transformer = True) assert np.sqrt(np.mean((oos_ice.mean(axis=1) - true_ATE)**2)) < rmse_tolerance def test_model_get_random_erupts(self): true_ATE = np.array([[0, 0], [1, .5]]) rmse_tolerance = .05 num_obs = 10000 param_grid = dict(num_nodes=[8], dropout=[.1], activation=['relu'], num_layers=[2], epochs=[30], batch_size=[100], alpha = [.5], copy_several_times = [2]) y_no_noise, x_no_noise, tmt_no_noise = get_no_noise_data(num_obs) uplift_model = MRUplift() uplift_model.fit(x_no_noise, y_no_noise, tmt_no_noise.reshape(-1, 1), n_jobs=1, param_grid = param_grid, optimized_loss = True) oos_re = uplift_model.get_random_erupts() uplift_model_propensity = MRUplift() uplift_model_propensity.fit(x_no_noise, y_no_noise, tmt_no_noise.reshape(-1, 1), n_jobs=1, param_grid = param_grid, optimized_loss = True, use_propensity = True) oos_re_propensity = uplift_model_propensity.get_random_erupts() assert oos_re['mean'].iloc[0] > 0 assert oos_re_propensity['mean'].iloc[0] > 0 def test_varimp(self): num_obs = 10000 param_grid = dict(num_nodes=[8], dropout=[.1], activation=['relu'], num_layers=[2], epochs=[30], batch_size=[100]) y, x, t = get_simple_uplift_data(num_obs) uplift_model = MRUplift() uplift_model.fit(x, y, t.reshape(-1, 1), n_jobs=1, param_grid = param_grid) varimp = uplift_model.permutation_varimp(objective_weights = np.array([.7,-.3,0]).reshape(1,-1)) param_grid = dict(num_nodes=[8], dropout=[.1], activation=['relu'], num_layers=[2], epochs=[30], batch_size=[100], alpha = [.5], copy_several_times = [2]) uplift_model_propensity = MRUplift() uplift_model_propensity.fit(x, y, t.reshape(-1, 1), n_jobs=1, param_grid = param_grid, optimized_loss = True, use_propensity = True) varimp_propensity = uplift_model_propensity.permutation_varimp(objective_weights = np.array([.7,-.3,0]).reshape(1,-1)) assert varimp['permutation_varimp_metric'].iloc[0]>varimp['permutation_varimp_metric'].iloc[1] assert varimp_propensity['permutation_varimp_metric'].iloc[0]>varimp_propensity['permutation_varimp_metric'].iloc[1] def test_model_propensity(self): num_obs = 10000 TOLERANCE = .98 y, x, t, rule_assignment = get_observational_uplift_data_1(num_obs) param_grid = dict(num_nodes=[8], dropout=[.1], activation=['relu'], num_layers=[1], epochs=[20], batch_size=[512], alpha = [.9999,.99], copy_several_times = [1]) uplift_model = MRUplift() uplift_model.fit(x, y[:,0].reshape(-1,1), t, param_grid = param_grid, n_jobs=1, optimized_loss = True, use_propensity = True, test_size = 0) uplift_model.best_params_net y_test, x_test, t_test, rule_assignment_test = get_observational_uplift_data_1(num_obs) experiment_groups = np.zeros(num_obs)+2 experiment_groups[np.where(x_test[:,-2]<.5)[0]] = 1 experiment_groups[np.where(x_test[:,-2]<.33)[0]] = 0 experiment_groups[np.where(x_test[:,-1]>.8)[0]] = 3 optim_treatments_no_cuttoff = uplift_model.predict_optimal_treatments(x = x_test, use_propensity_score_cutoff = False) optim_treatments_cuttoff = uplift_model.predict_optimal_treatments(x = x_test, use_propensity_score_cutoff = True) optim_treatments_cuttoff_cat = optim_treatments_cuttoff.argmax(axis = 1) optim_treatments_no_cuttoff_cat = optim_treatments_no_cuttoff.argmax(axis = 1) correct_tmts_1 = np.array([x in [0,1] for x in optim_treatments_cuttoff_cat[np.where(experiment_groups == 0)[0]] ]).mean() correct_tmts_2 = np.array([x in [1,2] for x in optim_treatments_cuttoff_cat[np.where(experiment_groups == 1)[0]] ]).mean() correct_tmts_3 = np.array([x in [0,2] for x in optim_treatments_cuttoff_cat[np.where(experiment_groups == 2)[0]] ]).mean() correct_tmts_4 = np.array([x in [0] for x in optim_treatments_cuttoff_cat[np.where(experiment_groups == 3)[0]] ]).mean() correct_tmts_experiment_groups_1 = ((optim_treatments_cuttoff_cat[np.where(experiment_groups == 1)[0]] == 1) == x_test[np.where(experiment_groups == 1)[0],0]).mean() correct_tmts_no_cutoff = np.mean((optim_treatments_no_cuttoff_cat==1 ) == x_test[:,0]) assert correct_tmts_1>TOLERANCE assert correct_tmts_2>TOLERANCE assert correct_tmts_3>TOLERANCE assert correct_tmts_4>TOLERANCE assert correct_tmts_experiment_groups_1>TOLERANCE assert np.array_equal(optim_treatments_cuttoff_cat,optim_treatments_no_cuttoff_cat) is False assert correct_tmts_no_cutoff>TOLERANCE
1.671875
2
examples/pylab_examples/image_masked.py
pierre-haessig/matplotlib
16
688
#!/usr/bin/env python '''imshow with masked array input and out-of-range colors. The second subplot illustrates the use of BoundaryNorm to get a filled contour effect. ''' from pylab import * from numpy import ma import matplotlib.colors as colors delta = 0.025 x = y = arange(-3.0, 3.0, delta) X, Y = meshgrid(x, y) Z1 = bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0) Z2 = bivariate_normal(X, Y, 1.5, 0.5, 1, 1) Z = 10 * (Z2-Z1) # difference of Gaussians # Set up a colormap: palette = cm.gray palette.set_over('r', 1.0) palette.set_under('g', 1.0) palette.set_bad('b', 1.0) # Alternatively, we could use # palette.set_bad(alpha = 0.0) # to make the bad region transparent. This is the default. # If you comment out all the palette.set* lines, you will see # all the defaults; under and over will be colored with the # first and last colors in the palette, respectively. Zm = ma.masked_where(Z > 1.2, Z) # By setting vmin and vmax in the norm, we establish the # range to which the regular palette color scale is applied. # Anything above that range is colored based on palette.set_over, etc. subplot(1,2,1) im = imshow(Zm, interpolation='bilinear', cmap=palette, norm = colors.Normalize(vmin = -1.0, vmax = 1.0, clip = False), origin='lower', extent=[-3,3,-3,3]) title('Green=low, Red=high, Blue=bad') colorbar(im, extend='both', orientation='horizontal', shrink=0.8) subplot(1,2,2) im = imshow(Zm, interpolation='nearest', cmap=palette, norm = colors.BoundaryNorm([-1, -0.5, -0.2, 0, 0.2, 0.5, 1], ncolors=256, clip = False), origin='lower', extent=[-3,3,-3,3]) title('With BoundaryNorm') colorbar(im, extend='both', spacing='proportional', orientation='horizontal', shrink=0.8) show()
2.21875
2
Sec_10_expr_lambdas_fun_integradas/f_generators.py
PauloAlexSilva/Python
0
736
"""" Generator Expression Em aulas anteriores foi abordado: - List Comprehension; - Dictionary Comprehension; - Set Comprehension. Não foi abordado: - Tuple Comprehension ... porque elas se chamam Generators nomes = ['Carlos', 'Camila', 'Carla', 'Cristiana', 'Cristina', 'Vanessa'] print(any8[nomes[0] == 'C' for nome in nomes]) # Poderia ter sido feito usando os Generators nomes = ['Carlos', 'Camila', 'Carla', 'Cristiana', 'Cristina', 'Vanessa'] print(any(nome[0] == 'C' for nome in nomes)) # List Comprehension res = [nome[0] == 'C' for nome in nomes] print(type(res)) print(res) # [True, True, True, True, True, False] # Generator - mais efeciente res2 = (nome[0] == 'C' for nome in nomes) print(type(res2)) print(res2) # O que faz a função de getsizeof()? -> retorna a quantidade de bytes em memória do elemento # passado como parâmetro from sys import getsizeof # Mostra quantos bytes a string 'Paulo' está ocupando em memória. # Quanto maior a string mais espaço ocupa. print(getsizeof('Paulo')) print(getsizeof('Quanto maior a string mais espaço ocupa.')) print(getsizeof(9)) print(getsizeof(91)) print(getsizeof(12345667890)) print(getsizeof(True)) from sys import getsizeof # Gerando uma lista de números com List Comprehension list_comp = getsizeof([x * 10 for x in range(1000)]) # Gerando uma lista de números com Set Comprehension set_comp = getsizeof({x * 10 for x in range(1000)}) # Gerando uma lista de números com Dictionary Comprehension dic_comp = getsizeof({x: x * 10 for x in range(1000)}) # Gerando uma lista de números com Generator gen = getsizeof(x * 10 for x in range(1000)) print('Para fazer a mesma gastamos em memória: ') print(f'List Comprehension: {list_comp} bytes!') print(f'Set Comprehension: {set_comp} bytes!') print(f'Dictionary Comprehension: {dic_comp} bytes!') print(f'Generator Expression: {gen} bytes!') Para fazer a mesma gastamos em memória: List Comprehension: 8856 bytes! Set Comprehension: 32984 bytes! Dictionary Comprehension: 36960 bytes! Generator Expression: 112 bytes! """ # Posso iterar no Generator Expression? Sim gen = (x * 10 for x in range(1000)) print(gen) print(type(gen)) for num in gen: print(num)
2.65625
3
advanced/itertools_funcs.py
ariannasg/python3-essential-training
1
776
#!usr/bin/env python3 import itertools # itertools is a module that's not technically a set of built-in functions but # it is part of the standard library that comes with python. # it's useful for for creating and using iterators. def main(): print('some infinite iterators') # cycle iterator can be used to cycle over a collection over and over seq1 = ["Joe", "John", "Mike"] cycle1 = itertools.cycle(seq1) print(next(cycle1)) print(next(cycle1)) print(next(cycle1)) print(next(cycle1)) print(next(cycle1)) # use count to create a simple counter count1 = itertools.count(100, 3) print(next(count1)) print(next(count1)) print(next(count1)) print('some non-infinite iterators') values = [10, 5, 20, 30, 40, 50, 40, 30] # accumulate creates an iterator that accumulates/aggregates values print(list(itertools.accumulate(values))) # this defaults to addition print(list(itertools.accumulate(values, max))) print(list(itertools.accumulate(values, min))) # use chain to connect sequences together x = itertools.chain('ABCD', '1234') print(list(x)) # dropwhile and takewhile will return values until # a certain condition is met that stops them. they are similar to the # filter built-in function. # dropwhile will drop the values from the sequence as long as the # condition of the function is true and then returns the rest of values print(list(itertools.dropwhile(is_less_than_forty, values))) # takewhile will keep the values from the sequence as long as the # condition of the function is true and then stops giving data print(list(itertools.takewhile(is_less_than_forty, values))) def is_less_than_forty(x): return x < 40 if __name__ == "__main__": main() # CONSOLE OUTPUT: # some infinite iterators # Joe # John # Mike # Joe # John # 100 # 103 # 106 # some non-infinite iterators # [10, 15, 35, 65, 105, 155, 195, 225] # [10, 10, 20, 30, 40, 50, 50, 50] # [10, 5, 5, 5, 5, 5, 5, 5] # ['A', 'B', 'C', 'D', '1', '2', '3', '4'] # [40, 50, 40, 30] # [10, 5, 20, 30]
3.5
4
python/tvm/contrib/nvcc.py
ntanhbk44/tvm
0
784
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=invalid-name """Utility to invoke nvcc compiler in the system""" from __future__ import absolute_import as _abs import subprocess import os import warnings import tvm._ffi from tvm.runtime import ndarray as nd from . import utils from .._ffi.base import py_str def compile_cuda(code, target="ptx", arch=None, options=None, path_target=None): """Compile cuda code with NVCC from env. Parameters ---------- code : str The cuda code. target : str The target format arch : str The architecture options : str or list of str The additional options path_target : str, optional Output file. Return ------ cubin : bytearray The bytearray of the cubin """ temp = utils.tempdir() if target not in ["cubin", "ptx", "fatbin"]: raise ValueError("target must be in cubin, ptx, fatbin") temp_code = temp.relpath("my_kernel.cu") temp_target = temp.relpath("my_kernel.%s" % target) with open(temp_code, "w") as out_file: out_file.write(code) if arch is None: if nd.gpu(0).exist: # auto detect the compute arch argument arch = "sm_" + "".join(nd.gpu(0).compute_version.split(".")) else: raise ValueError("arch(sm_xy) is not passed, and we cannot detect it from env") file_target = path_target if path_target else temp_target cmd = ["nvcc"] cmd += ["--%s" % target, "-O3"] if isinstance(arch, list): cmd += arch else: cmd += ["-arch", arch] if options: if isinstance(options, str): cmd += [options] elif isinstance(options, list): cmd += options else: raise ValueError("options must be str or list of str") cmd += ["-o", file_target] cmd += [temp_code] # NOTE: ccbin option can be used to tell nvcc where to find the c++ compiler # just in case it is not in the path. On Windows it is not in the path by default. # However, we cannot use TVM_CXX_COMPILER_PATH because the runtime env. # Because it is hard to do runtime compiler detection, we require nvcc is configured # correctly by default. # if cxx_compiler_path != "": # cmd += ["-ccbin", cxx_compiler_path] proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) (out, _) = proc.communicate() if proc.returncode != 0: msg = code msg += "\nCompilation error:\n" msg += py_str(out) raise RuntimeError(msg) data = bytearray(open(file_target, "rb").read()) if not data: raise RuntimeError("Compilation error: empty result is generated") return data def find_cuda_path(): """Utility function to find cuda path Returns ------- path : str Path to cuda root. """ if "CUDA_PATH" in os.environ: return os.environ["CUDA_PATH"] cmd = ["which", "nvcc"] proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) (out, _) = proc.communicate() out = py_str(out) if proc.returncode == 0: return os.path.realpath(os.path.join(str(out).strip(), "../..")) cuda_path = "/usr/local/cuda" if os.path.exists(os.path.join(cuda_path, "bin/nvcc")): return cuda_path raise RuntimeError("Cannot find cuda path") def get_cuda_version(cuda_path): """Utility function to get cuda version Parameters ---------- cuda_path : str Path to cuda root. Returns ------- version : float The cuda version """ version_file_path = os.path.join(cuda_path, "version.txt") if not os.path.exists(version_file_path): # Debian/Ubuntu repackaged CUDA path version_file_path = os.path.join(cuda_path, "lib", "cuda", "version.txt") try: with open(version_file_path) as f: version_str = f.readline().replace("\n", "").replace("\r", "") return float(version_str.split(" ")[2][:2]) except FileNotFoundError: pass cmd = [os.path.join(cuda_path, "bin", "nvcc"), "--version"] proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) (out, _) = proc.communicate() out = py_str(out) if proc.returncode == 0: release_line = [l for l in out.split("\n") if "release" in l][0] release_fields = [s.strip() for s in release_line.split(",")] release_version = [f[1:] for f in release_fields if f.startswith("V")][0] major_minor = ".".join(release_version.split(".")[:2]) return float(major_minor) raise RuntimeError("Cannot read cuda version file") @tvm._ffi.register_func("tvm_callback_libdevice_path") def find_libdevice_path(arch): """Utility function to find libdevice Parameters ---------- arch : int The compute architecture in int Returns ------- path : str Path to libdevice. """ cuda_path = find_cuda_path() lib_path = os.path.join(cuda_path, "nvvm/libdevice") if not os.path.exists(lib_path): # Debian/Ubuntu repackaged CUDA path lib_path = os.path.join(cuda_path, "lib/nvidia-cuda-toolkit/libdevice") selected_ver = 0 selected_path = None cuda_ver = get_cuda_version(cuda_path) if cuda_ver in (9.0, 9.1, 10.0, 10.1, 10.2, 11.0, 11.1, 11.2): path = os.path.join(lib_path, "libdevice.10.bc") else: for fn in os.listdir(lib_path): if not fn.startswith("libdevice"): continue ver = int(fn.split(".")[-3].split("_")[-1]) if selected_ver < ver <= arch: selected_ver = ver selected_path = fn if selected_path is None: raise RuntimeError("Cannot find libdevice for arch {}".format(arch)) path = os.path.join(lib_path, selected_path) return path def callback_libdevice_path(arch): try: return find_libdevice_path(arch) except RuntimeError: warnings.warn("Cannot find libdevice path") return "" def get_target_compute_version(target=None): """Utility function to get compute capability of compilation target. Looks for the arch in three different places, first in the target attributes, then the global scope, and finally the GPU device (if it exists). Parameters ---------- target : tvm.target.Target, optional The compilation target Returns ------- compute_version : str compute capability of a GPU (e.g. "8.0") """ # 1. Target if target: if "arch" in target.attrs: compute_version = target.attrs["arch"] major, minor = compute_version.split("_")[1] return major + "." + minor # 2. Global scope from tvm.autotvm.env import AutotvmGlobalScope # pylint: disable=import-outside-toplevel if AutotvmGlobalScope.current.cuda_target_arch: major, minor = AutotvmGlobalScope.current.cuda_target_arch.split("_")[1] return major + "." + minor # 3. GPU if tvm.gpu(0).exist: return tvm.gpu(0).compute_version warnings.warn( "No CUDA architecture was specified or GPU detected." "Try specifying it by adding '-arch=sm_xx' to your target." ) return None def parse_compute_version(compute_version): """Parse compute capability string to divide major and minor version Parameters ---------- compute_version : str compute capability of a GPU (e.g. "6.0") Returns ------- major : int major version number minor : int minor version number """ split_ver = compute_version.split(".") try: major = int(split_ver[0]) minor = int(split_ver[1]) return major, minor except (IndexError, ValueError) as err: # pylint: disable=raise-missing-from raise RuntimeError("Compute version parsing error: " + str(err)) def have_fp16(compute_version): """Either fp16 support is provided in the compute capability or not Parameters ---------- compute_version: str compute capability of a GPU (e.g. "6.0") """ major, minor = parse_compute_version(compute_version) # fp 16 support in reference to: # https://docs.nvidia.com/cuda/cuda-c-programming-guide/#arithmetic-instructions if major == 5 and minor == 3: return True if major >= 6: return True return False def have_int8(compute_version): """Either int8 support is provided in the compute capability or not Parameters ---------- compute_version : str compute capability of a GPU (e.g. "6.1") """ major, _ = parse_compute_version(compute_version) if major >= 6: return True return False def have_tensorcore(compute_version=None, target=None): """Either TensorCore support is provided in the compute capability or not Parameters ---------- compute_version : str, optional compute capability of a GPU (e.g. "7.0"). target : tvm.target.Target, optional The compilation target, will be used to determine arch if compute_version isn't specified. """ if compute_version is None: if tvm.gpu(0).exist: compute_version = tvm.gpu(0).compute_version else: if target is None or "arch" not in target.attrs: warnings.warn( "Tensorcore will be disabled due to no CUDA architecture specified." "Try specifying it by adding '-arch=sm_xx' to your target." ) return False compute_version = target.attrs["arch"] # Compute version will be in the form "sm_{major}{minor}" major, minor = compute_version.split("_")[1] compute_version = major + "." + minor major, _ = parse_compute_version(compute_version) if major >= 7: return True return False def have_cudagraph(): """Either CUDA Graph support is provided""" try: cuda_path = find_cuda_path() cuda_ver = get_cuda_version(cuda_path) if cuda_ver < 10.0: return False return True except RuntimeError: return False def have_bf16(compute_version): """Either bf16 support is provided in the compute capability or not Parameters ---------- compute_version : str compute capability of a GPU (e.g. "8.0") """ major, _ = parse_compute_version(compute_version) if major >= 8: return True return False
1.398438
1
tensorflow/contrib/data/python/kernel_tests/optimization/map_and_filter_fusion_test.py
Smokrow/tensorflow
1
792
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the MapAndFilterFusion optimization.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensorflow.contrib.data.python.ops import optimization from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.ops import math_ops from tensorflow.python.platform import test class MapAndFilterFusionTest(test.TestCase, parameterized.TestCase): @staticmethod def map_functions(): identity = lambda x: x increment = lambda x: x + 1 def increment_and_square(x): y = x + 1 return y * y functions = [identity, increment, increment_and_square] tests = [] for i, fun1 in enumerate(functions): for j, fun2 in enumerate(functions): tests.append(( "Test{}{}".format(i, j), [fun1, fun2], )) for k, fun3 in enumerate(functions): tests.append(( "Test{}{}{}".format(i, j, k), [fun1, fun2, fun3], )) swap = lambda x, n: (n, x) tests.append(( "Swap1", [lambda x: (x, 42), swap], )) tests.append(( "Swap2", [lambda x: (x, 42), swap, swap], )) return tuple(tests) @parameterized.named_parameters(*map_functions.__func__()) def testMapFusion(self, functions): dataset = dataset_ops.Dataset.range(5).apply( optimization.assert_next(["Map", "Prefetch"])) for function in functions: dataset = dataset.map(function) dataset = dataset.prefetch(0).apply(optimization.optimize(["map_fusion"])) iterator = dataset.make_one_shot_iterator() get_next = iterator.get_next() with self.test_session() as sess: for x in range(5): result = sess.run(get_next) r = x for function in functions: if isinstance(r, tuple): r = function(*r) # Pass tuple as multiple arguments. else: r = function(r) self.assertAllEqual(r, result) with self.assertRaises(errors.OutOfRangeError): sess.run(get_next) @staticmethod def map_and_filter_functions(): identity = lambda x: x increment = lambda x: x + 1 minus_five = lambda x: x - 5 def increment_and_square(x): y = x + 1 return y * y take_all = lambda x: constant_op.constant(True) is_zero = lambda x: math_ops.equal(x, 0) is_odd = lambda x: math_ops.equal(x % 2, 0) greater = lambda x: math_ops.greater(x + 5, 0) functions = [identity, increment, minus_five, increment_and_square] filters = [take_all, is_zero, is_odd, greater] tests = [] for x, fun in enumerate(functions): for y, predicate in enumerate(filters): tests.append(("Mixed{}{}".format(x, y), fun, predicate)) # Multi output tests.append(("Multi1", lambda x: (x, x), lambda x, y: constant_op.constant(True))) tests.append( ("Multi2", lambda x: (x, 2), lambda x, y: math_ops.equal(x * math_ops.cast(y, dtypes.int64), 0))) return tuple(tests) @parameterized.named_parameters(*map_and_filter_functions.__func__()) def testMapFilterFusion(self, function, predicate): dataset = dataset_ops.Dataset.range(10).apply( optimization.assert_next( ["Map", "FilterByLastComponent"])).map(function).filter(predicate).apply( optimization.optimize(["map_and_filter_fusion"])) self._testMapAndFilter(dataset, function, predicate) def _testMapAndFilter(self, dataset, function, predicate): iterator = dataset.make_one_shot_iterator() get_next = iterator.get_next() with self.test_session() as sess: for x in range(10): r = function(x) if isinstance(r, tuple): b = predicate(*r) # Pass tuple as multiple arguments. else: b = predicate(r) if sess.run(b): result = sess.run(get_next) self.assertAllEqual(r, result) with self.assertRaises(errors.OutOfRangeError): sess.run(get_next) def testAdditionalInputs(self): a = constant_op.constant(3, dtype=dtypes.int64) b = constant_op.constant(4, dtype=dtypes.int64) some_tensor = math_ops.mul(a, b) function = lambda x: x * x def predicate(y): return math_ops.less(math_ops.cast(y, dtypes.int64), some_tensor) # We are currently not supporting functions with additional inputs. dataset = dataset_ops.Dataset.range(10).apply( optimization.assert_next( ["Map", "Filter"])).map(function).filter(predicate).apply( optimization.optimize(["map_and_filter_fusion"])) self._testMapAndFilter(dataset, function, predicate) @staticmethod def filter_functions(): take_all = lambda x: constant_op.constant(True) is_zero = lambda x: math_ops.equal(x, 0) greater = lambda x: math_ops.greater(x + 5, 0) tests = [] filters = [take_all, is_zero, greater] identity = lambda x: x for x, predicate_1 in enumerate(filters): for y, predicate_2 in enumerate(filters): tests.append(("Mixed{}{}".format(x, y), identity, [predicate_1, predicate_2])) for z, predicate_3 in enumerate(filters): tests.append(("Mixed{}{}{}".format(x, y, z), identity, [predicate_1, predicate_2, predicate_3])) take_all_multiple = lambda x, y: constant_op.constant(True) # Multi output tests.append(("Multi1", lambda x: (x, x), [take_all_multiple, take_all_multiple])) tests.append(("Multi2", lambda x: (x, 2), [ take_all_multiple, lambda x, y: math_ops.equal(x * math_ops.cast(y, dtypes.int64), 0) ])) return tuple(tests) @parameterized.named_parameters(*filter_functions.__func__()) def testFilterFusion(self, map_function, predicates): dataset = dataset_ops.Dataset.range(5).apply( optimization.assert_next(["Map", "Filter", "Prefetch"])).map(map_function) for predicate in predicates: dataset = dataset.filter(predicate) dataset = dataset.prefetch(0).apply( optimization.optimize(["filter_fusion"])) iterator = dataset.make_one_shot_iterator() get_next = iterator.get_next() with self.test_session() as sess: for x in range(5): r = map_function(x) filtered = False for predicate in predicates: if isinstance(r, tuple): b = predicate(*r) # Pass tuple as multiple arguments. else: b = predicate(r) if not sess.run(b): filtered = True break if not filtered: result = sess.run(get_next) self.assertAllEqual(r, result) with self.assertRaises(errors.OutOfRangeError): sess.run(get_next) if __name__ == "__main__": test.main()
2.015625
2
feed/serializers/extensions.py
cul-it/arxiv-rss
4
800
"""Classes derived from the Feedgen extension classes.""" from typing import Dict, List, Optional from lxml import etree from lxml.etree import Element from flask import current_app from feedgen.ext.base import BaseEntryExtension, BaseExtension from feed.domain import Author, Media class ArxivExtension(BaseExtension): """Extension of the Feedgen class to allow us to change its behavior.""" def extend_atom(self: BaseExtension, atom_feed: Element) -> Element: """Allow the extension to modify the initial feed tree for Atom. Parameters ---------- atom_feed : Element The feed's root element. Returns ------- atom_feed : Element The feed's root element. """ return atom_feed def extend_rss(self: BaseExtension, rss_feed: Element) -> Element: """Allow the extension to modify the initial feed tree for RSS. Parameters ---------- rss_feed : Element The feed's root element. Returns ------- rss_feed : Element The feed's root element. """ return rss_feed def extend_ns(self: BaseExtension) -> Dict[str, str]: """ Define the feed's namespaces. Returns ------- namespaces : Dict[str, str] Definitions of the "arxiv" namespaces. """ return { "arxiv": "http://arxiv.org/schemas/atom", "content": "http://purl.org/rss/1.0/modules/content/", "taxo": "http://purl.org/rss/1.0/modules/taxonomy/", "syn": "http://purl.org/rss/1.0/modules/syndication/", "admin": "http://webns.net/mvcb/", "media": "http://search.yahoo.com/mrss", } class ArxivAtomExtension(BaseEntryExtension): """Atom only extension.""" def extend_ns(self: BaseExtension) -> Dict[str, str]: """ Define the feed's namespaces. Returns ------- namespaces : Dict[str, str] Definitions of the "arxiv" namespaces. """ return { "arxiv": "http://arxiv.org/schemas/atom", } class ArxivEntryExtension(BaseEntryExtension): """Extension of the Entry class to allow us to change its behavior.""" def __init__(self: BaseEntryExtension): """Initialize the member values to all be empty.""" self.__arxiv_authors: List[Author] = [] self.__arxiv_media: List[Media] = [] self.__arxiv_comment: Optional[str] = None self.__arxiv_primary_category: Optional[str] = None self.__arxiv_doi: Optional[dict] = None self.__arxiv_affiliation: Optional[str] = None self.__arxiv_journal_ref: Optional[str] = None self.__arxiv_affiliations: Dict = {} def __add_media(self, entry: Element) -> None: for media in self.__arxiv_media: group = etree.SubElement( entry, "{http://search.yahoo.com/mrss}group" ) title = etree.SubElement( group, "{http://search.yahoo.com/mrss}title" ) title.text = media.title etree.SubElement( group, "{http://search.yahoo.com/mrss}content", attrib={"url": media.url, "type": media.type}, ) def extend_atom(self, entry: Element) -> Element: """ Allow the extension to modify the entry element for Atom serialization. Parameters ---------- entry : Element The FeedEntry to modify. Returns ------- entry : Element The modified entry. """ if self.__arxiv_comment: comment_element = etree.SubElement( entry, "{http://arxiv.org/schemas/atom}comment" ) comment_element.text = self.__arxiv_comment if self.__arxiv_primary_category: etree.SubElement( entry, "{http://arxiv.org/schemas/atom}primary_category", attrib=self.__arxiv_primary_category, ) if self.__arxiv_journal_ref: journal_ref_element = etree.SubElement( entry, "{http://arxiv.org/schemas/atom}journal_ref" ) journal_ref_element.text = self.__arxiv_journal_ref if self.__arxiv_doi: for doi in self.__arxiv_doi: doi_element = etree.SubElement( entry, "{http://arxiv.org/schemas/atom}doi" ) doi_element.text = doi # Check each of the entry's author nodes for entry_child in entry: if entry_child.tag == "author": author = entry_child for author_child in author: # If the author's name is in the affiliation dictionary, # add Elements for all of its affiliations. if author_child.tag == "name": name = author_child.text affiliations = self.__arxiv_affiliations.get(name, []) for affiliation in affiliations: element = etree.SubElement( author, "{http://arxiv.org/schemas/atom}affiliation", ) element.text = affiliation self.__add_media(entry=entry) return entry def extend_rss(self, entry: Element) -> Element: """Allow the extension to modify the entry element for RSS. Parameters ---------- entry : Element The FeedEntry to modify. Returns ------- entry : Element The modified entry. """ base_server: str = current_app.config["BASE_SERVER"] for entry_child in entry: if entry_child.tag == "description": description = "<p>Authors: " first = True for author in self.__arxiv_authors: if first: first = False else: description += ", " name = ( f"{author.last_name}," f"+{author.initials.replace(' ', '+')}" ) description += ( f'<a href="http://{base_server}/search/?query={name}&' f'searchtype=author">{author.full_name}</a>' ) description += f"</p><p>{entry_child.text}</p>" entry_child.text = description self.__add_media(entry=entry) return entry def author(self, author: Author) -> None: """Add an author value to this entry. Parameters ---------- author : Author Paper author. """ self.__arxiv_authors.append(author) def media(self, media: Media) -> None: """Add a media item. Parameters ---------- media: Dict[str, str] Dictionary with url and type attributes. """ self.__arxiv_media.append(media) def comment(self, text: str) -> None: """Assign the comment value to this entry. Parameters ---------- text : str The new comment text. """ self.__arxiv_comment = text def primary_category(self, text: str) -> None: """Assign the primary_category value to this entry. Parameters ---------- text : str The new primary_category name. """ self.__arxiv_primary_category = text def journal_ref(self, text: str) -> None: """Assign the journal_ref value to this entry. Parameters ---------- text : str The new journal_ref value. """ self.__arxiv_journal_ref = text def doi(self, doi_list: Dict[str, str]) -> None: """Assign the set of DOI definitions for this entry. Parameters ---------- doi_list : Dict[str, str] A dictionary of DOI assignments. """ self.__arxiv_doi = doi_list def affiliation(self, full_name: str, affiliations: List[str]) -> None: """Assign an affiliation for one author of this entry. Parameters ---------- full_name : str An author's full name. affiliations : List[str] The code for the author's affiliated institution. """ self.__arxiv_affiliations[full_name] = affiliations
1.453125
1
minus80/RawFile.py
brohammer/Minus80
0
808
import gzip #pragma: no cover import bz2 #pragma: no cover import lzma #pragma: no cover class RawFile(object):#pragma: no cover def __init__(self,filename): self.filename = filename if filename.endswith('.gz'): self.handle = gzip.open(filename,'rt') elif filename.endswith('bz2'): self.handle = bz2.open(filename,'rt') elif filename.endswith('xz'): self.handle = lzma.open(filenaem,'rt') else: self.handle = open(filename,'r') def __enter__(self): return self.handle def __exit__(self,dtype,value,traceback): self.handle.close()
1.804688
2
pybyte/session.py
ms7m/py-byte
4
816
import requests class ByteSession(object): def __init__(self, token, providedSession=False): self._userToken = token if providedSession == False: self._session = requests.session() else: self._session = providedSession self._session.headers = { "Authorization": token, "User-Agent": "byte/0.2 (co.byte.video; build:145; iOS 13.3.0) Alamofire/4.9.1" } def session(self): return self._session
1.3125
1
authors/apps/notifications/views.py
andela/ah-backend-spaces-
2
824
from rest_framework import status from rest_framework.generics import ( RetrieveUpdateAPIView, CreateAPIView, RetrieveUpdateDestroyAPIView ) from rest_framework.permissions import AllowAny, IsAuthenticated from rest_framework.response import Response from rest_framework.views import APIView from ..authentication.backends import JWTAuthentication from ..authentication.models import User from .models import Notifications from .renderers import ( NotificationsJSONRenderer ) from .serializers import ( NotificationsAPIViewSerializer, GetNotificationsAPIViewSerializer ) class NotificationsAPIView(RetrieveUpdateAPIView): permission_classes = (IsAuthenticated,) renderer_classes = (NotificationsJSONRenderer,) def put(self, request): """ This class method is used to update a users article """ serializer_class = NotificationsAPIViewSerializer notification = request.data.get('notification', {}) user_data = JWTAuthentication().authenticate(request) # append user_id from token to article variable for later validations in serializers notification["user_id"] = user_data[1] serializer = serializer_class(data=notification) serializer.is_valid(raise_exception=True) # update the notification statue to True serializer.update_read_status(serializer.data["notifications"]) return Response(serializer.data, status=status.HTTP_201_CREATED) def get(self, request): """ retrieve all notifications of a user """ # decode users authentication token user_data = JWTAuthentication().authenticate(request) # get user notifications details from the Notifications table in the database notifications = Notifications.objects.filter(notification_owner=user_data[1]).values( "id", "article_id", "notification_title", "notification_body", "notification_owner", "read_status" ) # create a list of notifications # the action below is done by use of list comprehension list_of_notifications = [i for i in notifications] return Response({"notifications": list_of_notifications}, status=status.HTTP_200_OK)
1.695313
2
v1/status_updates/urls.py
DucPhamTV/Bank
94
864
from rest_framework.routers import SimpleRouter from .views.upgrade_notice import UpgradeNoticeViewSet router = SimpleRouter(trailing_slash=False) router.register('upgrade_notice', UpgradeNoticeViewSet, basename='upgrade_notice')
0.722656
1
AGC004/AGC004a.py
VolgaKurvar/AtCoder
0
880
# AGC004a def main(): import sys input = sys.stdin.readline sys.setrecursionlimit(10**6) a, b, c = map(int, input().split()) if a % 2 == 0 or b % 2 == 0 or c % 2 == 0: print(0) exit(0) print(min(a*b, b*c, c*a)) if __name__ == '__main__': main()
1.890625
2
fairseq/models/bart/model.py
samsontmr/fairseq
172
888
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension """ import torch.nn as nn from fairseq import utils from fairseq.models import ( register_model, register_model_architecture, ) from fairseq.models.transformer import TransformerModel from fairseq.modules.transformer_sentence_encoder import init_bert_params from .hub_interface import BARTHubInterface @register_model('bart') class BARTModel(TransformerModel): @classmethod def hub_models(cls): return { 'bart.large': 'http://dl.fbaipublicfiles.com/fairseq/models/bart.large.tar.gz', 'bart.large.mnli': 'http://dl.fbaipublicfiles.com/fairseq/models/bart.large.mnli.tar.gz', } def __init__(self, args, encoder, decoder): super().__init__(args, encoder, decoder) # We follow BERT's random weight initialization self.apply(init_bert_params) self.classification_heads = nn.ModuleDict() @staticmethod def add_args(parser): super(BARTModel, BARTModel).add_args(parser) parser.add_argument( '--max-source-positions', default=1024, type=int, metavar='N', help='max number of tokens in the source sequence' ) parser.add_argument( '--max-target-positions', default=1024, type=int, metavar='N', help='max number of tokens in the target sequence' ) parser.add_argument( '--pooler-dropout', type=float, metavar='D', help='dropout probability in the masked_lm pooler layers' ) parser.add_argument( '--pooler-activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use for pooler layer' ) @property def supported_targets(self): return {'self'} def forward( self, src_tokens, src_lengths, prev_output_tokens, features_only=False, classification_head_name=None, **kwargs ): if classification_head_name is not None: features_only = True encoder_out = self.encoder( src_tokens, src_lengths=src_lengths, **kwargs, ) x, extra = self.decoder( prev_output_tokens, encoder_out=encoder_out, features_only=features_only, **kwargs, ) if classification_head_name is not None: sentence_representation = x[ src_tokens.eq(self.encoder.dictionary.eos()), : ].view(x.size(0), -1, x.size(-1))[:, -1, :] x = self.classification_heads[classification_head_name]( sentence_representation ) return x, extra @classmethod def from_pretrained( cls, model_name_or_path, checkpoint_file='model.pt', data_name_or_path='.', bpe='gpt2', **kwargs, ): from fairseq import hub_utils x = hub_utils.from_pretrained( model_name_or_path, checkpoint_file, data_name_or_path, archive_map=cls.hub_models(), bpe=bpe, load_checkpoint_heads=True, **kwargs, ) return BARTHubInterface(x['args'], x['task'], x['models'][0]) def register_classification_head(self, name, num_classes=None, inner_dim=None, **kwargs): """Register a classification head.""" print("Registering classification head: {0}".format(name)) if name in self.classification_heads: prev_num_classes = self.classification_heads[name].out_proj.out_features prev_inner_dim = self.classification_heads[name].dense.out_features if num_classes != prev_num_classes or inner_dim != prev_inner_dim: print( 'WARNING: re-registering head "{}" with num_classes {} (prev: {}) ' 'and inner_dim {} (prev: {})'.format( name, num_classes, prev_num_classes, inner_dim, prev_inner_dim ) ) self.classification_heads[name] = BARTClassificationHead( self.args.encoder_embed_dim, inner_dim or self.args.encoder_embed_dim, num_classes, self.args.pooler_activation_fn, self.args.pooler_dropout, ) def upgrade_state_dict_named(self, state_dict, name): super().upgrade_state_dict_named(state_dict, name) prefix = name + '.' if name != '' else '' current_head_names = [] if not hasattr(self, 'classification_heads') else \ self.classification_heads.keys() # Handle new classification heads present in the state dict. keys_to_delete = [] for k in state_dict.keys(): if not k.startswith(prefix + 'classification_heads.'): continue head_name = k[len(prefix + 'classification_heads.'):].split('.')[0] num_classes = state_dict[prefix + 'classification_heads.' + head_name + '.out_proj.weight'].size(0) inner_dim = state_dict[prefix + 'classification_heads.' + head_name + '.dense.weight'].size(0) if getattr(self.args, 'load_checkpoint_heads', False): if head_name not in current_head_names: self.register_classification_head(head_name, num_classes, inner_dim) else: if head_name not in current_head_names: print( 'WARNING: deleting classification head ({}) from checkpoint ' 'not present in current model: {}'.format(head_name, k) ) keys_to_delete.append(k) elif ( num_classes != self.classification_heads[head_name].out_proj.out_features or inner_dim != self.classification_heads[head_name].dense.out_features ): print( 'WARNING: deleting classification head ({}) from checkpoint ' 'with different dimensions than current model: {}'.format(head_name, k) ) keys_to_delete.append(k) for k in keys_to_delete: del state_dict[k] # Copy any newly-added classification heads into the state dict # with their current weights. if hasattr(self, 'classification_heads'): cur_state = self.classification_heads.state_dict() for k, v in cur_state.items(): if prefix + 'classification_heads.' + k not in state_dict: print('Overwriting', prefix + 'classification_heads.' + k) state_dict[prefix + 'classification_heads.' + k] = v class BARTClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__( self, input_dim, inner_dim, num_classes, activation_fn, pooler_dropout, ): super().__init__() self.dense = nn.Linear(input_dim, inner_dim) self.activation_fn = utils.get_activation_fn(activation_fn) self.dropout = nn.Dropout(p=pooler_dropout) self.out_proj = nn.Linear(inner_dim, num_classes) def forward(self, features, **kwargs): x = features x = self.dropout(x) x = self.dense(x) x = self.activation_fn(x) x = self.dropout(x) x = self.out_proj(x) return x @register_model_architecture('bart', 'bart_large') def bart_large_architecture(args): args.encoder_embed_path = getattr(args, 'encoder_embed_path', None) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4*1024) args.encoder_layers = getattr(args, 'encoder_layers', 12) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', True) args.decoder_embed_path = getattr(args, 'decoder_embed_path', None) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim) args.decoder_layers = getattr(args, 'decoder_layers', 12) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16) args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False) args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', True) args.attention_dropout = getattr(args, 'attention_dropout', 0.) args.relu_dropout = getattr(args, 'relu_dropout', 0.) args.dropout = getattr(args, 'dropout', 0.1) args.max_target_positions = getattr(args, 'max_target_positions', 1024) args.max_source_positions = getattr(args, 'max_source_positions', 1024) args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None) args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0) args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', True) args.share_all_embeddings = getattr(args, 'share_all_embeddings', True) args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim) args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim) args.no_scale_embedding = getattr(args, 'no_scale_embedding', True) args.layernorm_embedding = getattr(args, 'layernorm_embedding', True) args.activation_fn = getattr(args, 'activation_fn', 'gelu') args.pooler_activation_fn = getattr(args, 'pooler_activation_fn', 'tanh') args.pooler_dropout = getattr(args, 'pooler_dropout', 0.0)
2.375
2
wce_triage/ops/create_image_runner.py
pfrouleau/wce-triage-v2
3
912
#!/usr/bin/env python3 # # Create disk image # import re, sys, traceback from .tasks import task_fetch_partitions, task_refresh_partitions, task_mount, task_remove_persistent_rules, task_remove_logs, task_fsck, task_shrink_partition, task_expand_partition, task_unmount from .partclone_tasks import task_create_disk_image from .ops_ui import console_ui from ..components.disk import create_storage_instance from .runner import Runner from ..lib.disk_images import make_disk_image_name from .json_ui import json_ui from ..lib.util import init_triage_logger, is_block_device # "Waiting", "Prepare", "Preflight", "Running", "Success", "Failed"] my_messages = { "Waiting": "Saving disk is waiting.", "Prepare": "Savign disk is preparing.", "Preflight": "Saving disk is preparing.", "Running": "{step} of {steps}: Running {task}", "Success": "Saving disk completed successfully.", "Failed": "Saving disk failed." } # class ImageDiskRunner(Runner): '''Runner for creating disk image. does fsck, shrink partition, create disk image and resize the file system back to the max. For now, this is only dealing with the EXT4 linux partition. ''' # FIXME: If I want to make this to a generic clone app, I need to deal with all of partitions on the disk. # One step at a time. def __init__(self, ui, runner_id, disk, destdir, suggestedname=None, partition_id='Linux'): super().__init__(ui, runner_id) self.time_estimate = 600 self.disk = disk self.partition_id = partition_id self.destdir = destdir self.imagename = make_disk_image_name(destdir, suggestedname) pass def prepare(self): super().prepare() # self.tasks.append(task_mount_nfs_destination(self, "Mount the destination volume")) self.tasks.append(task_fetch_partitions("Fetch partitions", self.disk)) self.tasks.append(task_refresh_partitions("Refresh partition information", self.disk)) self.tasks.append(task_mount("Mount the target disk", disk=self.disk, partition_id=self.partition_id)) self.tasks.append(task_remove_persistent_rules("Remove persistent rules", disk=self.disk, partition_id=self.partition_id)) self.tasks.append(task_remove_logs("Remove/Clean Logs", disk=self.disk, partition_id=self.partition_id)) task = task_unmount("Unmount target", disk=self.disk, partition_id=self.partition_id) task.set_teardown_task() self.tasks.append(task) self.tasks.append(task_fsck("fsck partition", disk=self.disk, partition_id=self.partition_id)) self.tasks.append(task_shrink_partition("Shrink partition to smallest", disk=self.disk, partition_id=self.partition_id)) self.tasks.append(task_create_disk_image("Create disk image", disk=self.disk, partition_id=self.partition_id, imagename=self.imagename)) task = task_expand_partition("Expand the partion back", disk=self.disk, partition_id=self.partition_id) task.set_teardown_task() self.tasks.append(task) pass pass if __name__ == "__main__": tlog = init_triage_logger() if len(sys.argv) == 1: print( 'Unloader: devicename part destdir') sys.exit(0) # NOTREACHED pass devname = sys.argv[1] if not is_block_device(devname): print( '%s is not a block device.' % devname) sys.exit(1) # NOTREACHED pass part = sys.argv[2] # This is a partition id destdir = sys.argv[3] # Destination directory disk = create_storage_instance(devname) # Preflight is for me to see the tasks. http server runs this with json_ui. do_it = True if destdir == "preflight": ui = console_ui() do_it = False pass elif destdir == "testflight": ui = console_ui() do_it = True pass else: ui = json_ui(wock_event="saveimage", message_catalog=my_messages) pass if re.match(part, '\d+'): part = int(part) pass runner_id = disk.device_name runner = ImageDiskRunner(ui, runner_id, disk, destdir, partition_id=part) try: runner.prepare() runner.preflight() runner.explain() runner.run() sys.exit(0) # NOTREACHED except Exception as exc: sys.stderr.write(traceback.format_exc(exc) + "\n") sys.exit(1) # NOTREACHED pass pass
2.03125
2
bin/focus_scan.py
desihub/desicmx
3
928
#!/usr/bin/env python import astropy.io.fits as fits import numpy as np import os import matplotlib.pyplot as plt import argparse def _fname(expid, night, basedir='/n/home/datasystems/users/ameisner/reduced/focus', ccds=False): fname = basedir + '/' + night + '/' + str(expid).zfill(8) + '/gfa-' + str(expid).zfill(8) + '_psfs.fits' if ccds: fname = fname.replace('_psfs.fits', '_ccds.fits') return fname def _actual_expid_list(expids, night, basedir='/n/home/datasystems/users/ameisner/reduced/focus'): keep = [] for i, expid in enumerate(expids): fname = _fname(expid, night, basedir=basedir, ccds=True) if not os.path.exists(fname): continue tab = fits.getdata(fname) # try to handle case where observer accidentally lists the 'setup focus scan' # 1 second exposure as the start of the focus scan if (i == 0) & (tab[0]['EXPTIME'] < 1.1): print('SKIPPING DUMMY SETUP EXPOSURE') continue program = tab[0]['PROGRAM'].strip() if program != 'focus scan': break keep.append(expid) return keep def focus_plots(night, expids, basedir='/n/home/datasystems/users/ameisner/reduced/focus', outdir='/n/home/desiobserver/focus_scan_pngs', no_popups=False): expids = _actual_expid_list(expids, night, basedir=basedir) if len(expids) == 0: print('NO FOCUS SCAN EXPOSURES TO ANALYZE ??') assert(False) plt.figure(1, figsize=(12.0*(len(expids)/7.0), 9)) extnames = ['GUIDE0', 'GUIDE2', 'GUIDE3', 'GUIDE5', 'GUIDE7', 'GUIDE8'] focus_z = [] fwhm_pix = [] # PSF stamps plot plt.subplots_adjust(hspace=0.01, wspace=0.01) for i, expid in enumerate(expids): fname = _fname(expid, night, basedir=basedir) print(fname) fname_ccds = _fname(expid, night, basedir=basedir, ccds=True) if not os.path.exists(fname): continue ccds = fits.getdata(fname_ccds) if np.sum(np.isfinite(ccds['PSF_FWHM_PIX'])) != 0: fwhm_pix.append(np.median(ccds['PSF_FWHM_PIX'][np.isfinite(ccds['PSF_FWHM_PIX'])])) focus_z.append(float(ccds[0]['FOCUS'].split(',')[2])) hdul = fits.open(fname) extnames_present = [hdu.header['EXTNAME'] for hdu in hdul] for j, extname in enumerate(extnames): if extname not in extnames_present: continue print(i, j) plt.subplot(6, len(expids), len(expids)*j + i + 1) plt.xticks([]) plt.yticks([]) im = fits.getdata(fname, extname=extname) plt.imshow(im, interpolation='nearest', origin='lower', cmap='gray_r', vmin=0.01) plt.text(5, 44, str(expid) + '; ' + extname, color='r', fontsize=9) plt.text(10, 3.5, 'z = ' + str(int(float(ccds[0]['FOCUS'].split(',')[2]))), color='r') if np.isfinite(ccds[j]['XCENTROID_PSF']) and np.isfinite(ccds[j]['YCENTROID_PSF']): plt.scatter([ccds[j]['XCENTROID_PSF']], [ccds[j]['YCENTROID_PSF']], marker='.', c='r') expid_min = int(np.min(expids)) print(focus_z) print(fwhm_pix) plt.savefig(os.path.join(outdir, 'stamps_focus_scan-' + str(expid_min).zfill(8)+'.png'), bbox_inches='tight') #plt.cla() plt.figure(200) asec_per_pix = 0.205 focus_z = np.array(focus_z) fwhm_asec = np.array(fwhm_pix)*asec_per_pix plt.scatter(focus_z, fwhm_asec) plt.xlabel('focus z (micron)') plt.ylabel('FWHM (asec)') coeff = np.polyfit(focus_z, fwhm_asec, 2) xsamp = np.arange(np.min(focus_z), np.max(focus_z)) ysamp = coeff[0]*(np.power(xsamp, 2)) + coeff[1]*xsamp + coeff[2] plt.title('focus scan starting with EXPID = ' + str(expid_min)) plt.plot(xsamp, ysamp) zmin = -coeff[1]/(2*coeff[0]) min_fwhm_fit_asec = coeff[0]*(zmin**2) + coeff[1]*zmin + coeff[2] yrange = [np.min(fwhm_asec), np.max(fwhm_asec)] plt.text(focus_z[2], yrange[0] + 0.8*(yrange[1]-yrange[0]), 'best FWHM (meas) : ' + '{:.2f}'.format(np.min(fwhm_asec))) plt.text(focus_z[2], yrange[0] + 0.7*(yrange[1]-yrange[0]), 'best FWHM (fit) : ' + '{:.2f}'.format(min_fwhm_fit_asec)) plt.text(focus_z[2], yrange[0] + 0.9*(yrange[1]-yrange[0]), 'best focus : ' + str(int(np.round(zmin)))) plt.savefig(os.path.join(outdir, 'fit_focus_scan-' + str(expid_min).zfill(8) + '.png'), bbox_inches='tight') if not no_popups: plt.show() def _test(): night = '20200131' expids = 45446 + np.arange(7) focus_plots(night, expids, basedir='/project/projectdirs/desi/users/ameisner/GFA/run/psf_flux_weighted_centroid', outdir='.') def _test_missing_cam(): night = '20200131' expids = 45485 + np.arange(7) focus_plots(night, expids, basedir='/project/projectdirs/desi/users/ameisner/GFA/run/psf_flux_weighted_centroid') if __name__ == "__main__": descr = 'GFA focus sequence plots/analysis' parser = argparse.ArgumentParser(description=descr) parser.add_argument('first_expid', type=int, nargs=1) parser.add_argument('night', type=str, nargs=1) parser.add_argument('--basedir', default='/n/home/datasystems/users/ameisner/reduced/focus', type=str, help='base directory for GFA reductions') parser.add_argument('--outdir', default='/n/home/desiobserver/focus_scan_pngs', type=str, help='output directory for plot PNGs') parser.add_argument('--no_popups', default=False, action='store_true', help='write PNGs without popping up plot windows') args = parser.parse_args() expids = args.first_expid + np.arange(16, dtype=int) print(expids) print(args.night[0]) print(args.basedir) outdir = args.outdir if os.path.exists(args.outdir) else '.' focus_plots(args.night[0], expids, basedir=args.basedir, outdir=outdir, no_popups=args.no_popups)
1.546875
2
src/unicon/plugins/confd/csp/__init__.py
tahigash/unicon.plugins
1
936
from unicon.plugins.confd import ConfdServiceList, ConfdConnection, ConfdConnectionProvider from .statemachine import CspStateMachine from .settings import CspSettings from . import service_implementation as csp_svc class CspServiceList(ConfdServiceList): def __init__(self): super().__init__() delattr(self, 'cli_style') self.reload = csp_svc.Reload class CspSingleRPConnection(ConfdConnection): os = 'confd' series = 'csp' chassis_type = 'single_rp' state_machine_class = CspStateMachine connection_provider_class = ConfdConnectionProvider subcommand_list = CspServiceList settings = CspSettings()
0.855469
1
main.py
BenG49/sudoku
0
944
from sudoku import Sudoku def main(): s = Sudoku.parse( ''' ------------- | |2 | | | | 6 |4 3| | | 5| 7 | ------------- | 7 | 2|8 | |51 | 4|9 | | 9| 3| | ------------- | | 9| | | 2| | 98| | 83|1 |2 | ------------- ''' ) print(s) print(s.solve()) if __name__ == '__main__': main()
1.945313
2
tests/model/test_guest.py
bcurnow/rfid-security-svc
0
968
import pytest from unittest.mock import patch import rfidsecuritysvc.model.guest as model from rfidsecuritysvc.model.color import Color from rfidsecuritysvc.model.guest import Guest from rfidsecuritysvc.model.sound import Sound from rfidsecuritysvc.exception import SoundNotFoundError def test_Guest(assert_model, default_sound, default_color): assert_model(_model(1, 'first', 'last', default_sound, default_color), Guest(1, 'first', 'last', default_sound, default_color)) @patch('rfidsecuritysvc.model.guest.table') def test_get(table): table.get.return_value = _default().test_to_row() assert model.get(1) == _default() table.get.assert_called_once_with(1) @patch('rfidsecuritysvc.model.guest.table') def test_get_notfound(table): table.get.return_value = None assert model.get(1) is None table.get.assert_called_once_with(1) @patch('rfidsecuritysvc.model.guest.table') def test_list(table): table.list.return_value = [ _default().test_to_row(), _default(2).test_to_row(), ] models = model.list() table.list.assert_called_once() assert models == [_default(), _default(2)] @patch('rfidsecuritysvc.model.guest.table') def test_list_noresults(table): table.list.return_value = [] models = model.list() table.list.assert_called_once() assert models == [] @patch('rfidsecuritysvc.model.guest.sound_model') @patch('rfidsecuritysvc.model.guest.table') def test_create(table, sound, default_sound): sound.get.return_value = default_sound table.create.return_value = None assert model.create('first', 'last', default_sound.id, 0xABCDEF) is None sound.get.assert_called_once_with(default_sound.id) table.create.assert_called_once_with('first', 'last', default_sound.id, 0xABCDEF) @patch('rfidsecuritysvc.model.guest.sound_model') @patch('rfidsecuritysvc.model.guest.table') def test_create_SoundNotFoundError(table, sound, default_sound): sound.get.return_value = None with pytest.raises(SoundNotFoundError): model.create('first', 'last', default_sound.id, 0xABCDEF) sound.get.assert_called_once_with(default_sound.id) table.create.assert_not_called() @patch('rfidsecuritysvc.model.guest.sound_model') @patch('rfidsecuritysvc.model.guest.table') def test_create_no_prefs(table, sound, default_sound): table.create.return_value = None assert model.create('first', 'last', None, None) is None sound.get.assert_not_called() table.create.assert_called_once_with('first', 'last', None, None) @patch('rfidsecuritysvc.model.guest.table') def test_delete(table): table.delete.return_value = 1 assert model.delete(1) == 1 table.delete.assert_called_with(1) @patch('rfidsecuritysvc.model.guest.sound_model') @patch('rfidsecuritysvc.model.guest.table') def test_update(table, sound, default_sound): sound.get.return_value = default_sound table.update.return_value = 1 assert model.update(1, 'first', 'last', default_sound.id, 0xABCDEF) == 1 sound.get.assert_called_once_with(default_sound.id) table.update.assert_called_once_with(1, 'first', 'last', default_sound.id, 0xABCDEF) @patch('rfidsecuritysvc.model.guest.sound_model') @patch('rfidsecuritysvc.model.guest.table') def test_update_no_prefs(table, sound, default_sound): table.update.return_value = 1 assert model.update(1, 'first', 'last', None, None) == 1 sound.get.assert_not_called() table.update.assert_called_once_with(1, 'first', 'last', None, None) @patch('rfidsecuritysvc.model.guest.sound_model') @patch('rfidsecuritysvc.model.guest.table') def test_update_SoundNotFoundError(table, sound, default_sound): table.update.return_value = 1 sound.get.return_value = None with pytest.raises(SoundNotFoundError): model.update(1, 'first', 'last', default_sound.id, 0xABCDEF) sound.get.assert_called_once_with(default_sound.id) table.update.assert_not_called() def test__model_no_color(creatable_guest): row = creatable_guest.test_to_row() row['color'] = None g = model.__model(row) assert g.color is None def test__model_no_sound(creatable_guest): row = creatable_guest.test_to_row() row['sound'] = None g = model.__model(row) assert g.sound is None def _default(index=1): return _model(index, f'first {index}', f'last {index}', Sound(index, f'sound_name {index}', '2021-09-25 23:13:25'), Color(0xABCDEF)) def _model(id, first_name, last_name, sound, color): return Guest(id, first_name, last_name, sound, color)
1.367188
1
metrics.py
AndreasLH/Image-Colourization
1
992
from math import log10, sqrt import cv2 import numpy as np def PSNR(original, compressed): ''' Calculates the Peak signal to noise ratio between a ground truth image and predicted image. see https://www.geeksforgeeks.org/python-peak-signal-to-noise-ratio-psnr/ for reference Parameters ---------- true image (cv2 image) predicted image (cv2 image) Returns ------- PSNR score ''' mse = np.mean((original - compressed) ** 2) if(mse == 0): # MSE is zero means no noise is present in the signal . # Therefore PSNR have no importance. return 100 max_pixel = 255.0 psnr = 20 * log10(max_pixel / sqrt(mse)) return psnr def colourfulnessMetric(img): """ Created on Mon Nov 15 10:55:16 2021 @author: Yucheng Parameters ---------- img : cv2 RGB image Returns ------- M : colourness metric ----------------------------- |not colourful | 0 | |slightly colorful | 15 | |moderately colourful | 33 | |averagely colourful | 45 | |quite colourful | 59 | |highly colourful | 82 | |extremely colourful | 109 | ----------------------------- """ # Get RGB components R,G,B = cv2.split(img.astype("float")) # colourfulness metric from Hasler et al., section 7 rg = R - G yb = (1/2) * (R+G) - B sigma_rgyb = np.sqrt(np.var(rg) + np.var(yb)) mu_rgyb = np.sqrt(np.mean(rg)**2 + np.mean(yb)**2) M = sigma_rgyb + 0.3 * mu_rgyb return M def main(): import matplotlib.pyplot as plt original = cv2.imread("test_imgs/original_image.png") compressed = cv2.imread("test_imgs/compressed_image1.png", 1) value = PSNR(original, compressed) print(f"PSNR value is {value} dB") img2 = cv2.imread("rainbow.jpg") # opens as BGR img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB) plt.imshow(img2[:,:,:]) plt.show() M = colourfulnessMetric(img2) print(M) if __name__ == "__main__": main()
2.765625
3
superset/typing.py
GodelTech/superset
7
1008
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from datetime import datetime from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union from flask import Flask from flask_caching import Cache from werkzeug.wrappers import Response CacheConfig = Union[Callable[[Flask], Cache], Dict[str, Any]] DbapiDescriptionRow = Tuple[ str, str, Optional[str], Optional[str], Optional[int], Optional[int], bool ] DbapiDescription = Union[List[DbapiDescriptionRow], Tuple[DbapiDescriptionRow, ...]] DbapiResult = Sequence[Union[List[Any], Tuple[Any, ...]]] FilterValue = Union[datetime, float, int, str] FilterValues = Union[FilterValue, List[FilterValue], Tuple[FilterValue]] FormData = Dict[str, Any] Granularity = Union[str, Dict[str, Union[str, float]]] AdhocMetric = Dict[str, Any] Metric = Union[AdhocMetric, str] OrderBy = Tuple[Metric, bool] QueryObjectDict = Dict[str, Any] VizData = Optional[Union[List[Any], Dict[Any, Any]]] VizPayload = Dict[str, Any] # Flask response. Base = Union[bytes, str] Status = Union[int, str] Headers = Dict[str, Any] FlaskResponse = Union[ Response, Base, Tuple[Base, Status], Tuple[Base, Status, Headers], ]
1.21875
1
train/general_train_example/1_parse.py
ss433s/sosweety
0
1016
import os, sys import json # 获取当前路径, 通过anchor文件获取项目root路径 this_file_path = os.path.split(os.path.realpath(__file__))[0] this_path = this_file_path root_path = this_file_path while this_path: if os.path.exists(os.path.join(this_path, 'sosweety_root_anchor.py')): root_path = this_path break par_path = os.path.dirname(this_path) # print(par_path) if par_path == this_path: break else: this_path = par_path sys.path.append(root_path) from modules.sParser.sParser import sParser from modules.knowledgebase.kb import KnowledgeBase train_dir = 'data/train_zh_wiki' train_dir = os.path.join(root_path, train_dir) if not os.path.exists(train_dir): os.makedirs(train_dir) # 解析出parse result file parse_result_dir = 'parse_result' parse_result_dir = os.path.join(train_dir, parse_result_dir) if not os.path.exists(parse_result_dir): os.makedirs(parse_result_dir) pos_tags_file_name = 'pos_tags_file' pos_tags_file_path = os.path.join(parse_result_dir, pos_tags_file_name) KB = KnowledgeBase() parser = sParser(KB) with open(pos_tags_file_path, 'w') as pos_tags_file: # 打开语料文件 file_path = 'data/corpus/zh_wiki/wiki_test' file_path = os.path.join(root_path, file_path) file = open(file_path) line = file.readline() count = 0 while line: count += 1 if count % 5000 == 0: print('parsed %s sentence' % count) text = line.strip() try: ss_pos_tags = parser.text2ss_pos_tags(text) for pos_tags in ss_pos_tags: pos_tags_file.write(json.dumps(pos_tags, ensure_ascii=False) + '\n') except Exception: print('line %s decode error' % count) line = file.readline() file.close()
1.710938
2
timm/models/layers/__init__.py
kkahatapitiya/pytorch-image-models
0
1040
from .activations import * from .adaptive_avgmax_pool import \ adaptive_avgmax_pool2d, select_adaptive_pool2d, AdaptiveAvgMaxPool2d, SelectAdaptivePool2d from .blur_pool import BlurPool2d from .classifier import ClassifierHead, create_classifier from .cond_conv2d import CondConv2d, get_condconv_initializer from .config import is_exportable, is_scriptable, is_no_jit, set_exportable, set_scriptable, set_no_jit,\ set_layer_config from .conv2d_same import Conv2dSame, conv2d_same from .conv_bn_act import ConvBnAct from .create_act import create_act_layer, get_act_layer, get_act_fn from .create_attn import get_attn, create_attn from .create_conv2d import create_conv2d from .create_norm_act import get_norm_act_layer, create_norm_act, convert_norm_act from .drop import DropBlock2d, DropPath, drop_block_2d, drop_path from .eca import EcaModule, CecaModule, EfficientChannelAttn, CircularEfficientChannelAttn from .evo_norm import EvoNormBatch2d, EvoNormSample2d from .gather_excite import GatherExcite from .global_context import GlobalContext from .helpers import to_ntuple, to_2tuple, to_3tuple, to_4tuple, make_divisible from .inplace_abn import InplaceAbn from .involution import Involution from .linear import Linear from .mixed_conv2d import MixedConv2d from .mlp import Mlp, GluMlp, GatedMlp, ConvMlpGeneral, ConvMlpGeneralv2 from .non_local_attn import NonLocalAttn, BatNonLocalAttn from .norm import GroupNorm, LayerNorm2d from .norm_act import BatchNormAct2d, GroupNormAct from .padding import get_padding, get_same_padding, pad_same from .patch_embed import PatchEmbed from .pool2d_same import AvgPool2dSame, create_pool2d from .squeeze_excite import SEModule, SqueezeExcite, EffectiveSEModule, EffectiveSqueezeExcite from .selective_kernel import SelectiveKernel from .separable_conv import SeparableConv2d, SeparableConvBnAct from .space_to_depth import SpaceToDepthModule from .split_attn import SplitAttn from .split_batchnorm import SplitBatchNorm2d, convert_splitbn_model from .std_conv import StdConv2d, StdConv2dSame, ScaledStdConv2d, ScaledStdConv2dSame from .test_time_pool import TestTimePoolHead, apply_test_time_pool from .weight_init import trunc_normal_, variance_scaling_, lecun_normal_
1.28125
1
sztuczna_inteligencja/3-lab/backtrackingSolve.py
Magikis/Uniwersity
12
1048
# import cProfile # import pstats # import io from picture import * # pr = cProfile.Profile() # pr.enable() def out(p): for i in range(2): print([len(x) for x in p.perms[i]]) if __name__ == '__main__': p = Picture() p.genPerms() p.detuctAll() p.backtrackLoop() p.saveOtput() # pr.disable() # s = io.StringIO() # sortby = 'cumulative' # ps = pstats.Stats(pr, stream=s).sort_stats(sortby) # ps.print_stats() # print(s.getvalue())
0.992188
1
algorithm_training/abc87.py
hirotosuzuki/algorithm_training
0
1064
class TaskA: def run(self): V, A, B, C = map(int, input().split()) pass class TaskB: def run(self): A = int(input()) B = int(input()) C = int(input()) X = int(input()) counter = 0 for a in range(A+1): for b in range(B+1): for c in range(C+1): total = 500 * a + 100 * b + 50 * c if total == X: counter += 1 print(counter) class TaskC: def run(self): pass if __name__ == "__main__": task = TaskB() task.run()
2.609375
3
implementations/python3/tests/CAPDU.py
sebastien-riou/SATL
4
1120
import os import pysatl from pysatl import CAPDU if __name__ == "__main__": def check(hexstr, expected): capdu = CAPDU.from_hexstr(hexstr) if capdu != expected: raise Exception("Mismatch for input '"+hexstr+"'\nActual: "+str(capdu)+"\nExpected: "+str(expected)) def gencase(* ,LC ,LE): assert(LC < 0x10000) assert(LE <= 0x10000) data = os.getrandom(LC) hexstr = "00112233" case4 = LC>0 and LE>0 case4e = case4 and (LC>0xFF or LE>0x100) if LC>0: if LC>0xFF or case4e: hexstr += "00%04X"%LC else: hexstr += "%02X" % LC hexstr += pysatl.Utils.hexstr(data, separator="") if LE>0: if case4e: if LE == 0x10000: hexstr += "0000" else: hexstr += "%04X"%LE elif LE == 0x10000: hexstr += "000000" elif LE>0x100: hexstr += "00%04X"%LE elif LE == 0x100: hexstr += "00" else: hexstr += "%02X" % LE expected = hexstr capdu = CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33, DATA=data, LE=LE) hexstr = capdu.to_hexstr() if hexstr != expected: raise Exception("Mismatch for LC=%d, LE=%d"%(LC,LE)+"\nActual: "+hexstr+"\nExpected: "+expected) b = capdu.to_bytes() assert(type(b) is bytes) return (hexstr, capdu) #check __repr__ expected = "pysatl.CAPDU.from_hexstr('00112233015502')" capdu=None exec("capdu="+expected) assert(expected==repr(capdu)) #check well formed inputs check("00112233", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33)) check("00 11 22 33", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33)) check("0x00,0x11,0x22,0x33", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33)) #check we tolerate less well formed inputs check("00-11,22_33", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33)) check("""0x00 0x11 0x22 0x33""", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33)) check("1 2 304", CAPDU(CLA=0x01, INS=0x02, P1=0x03, P2=0x04)) LC_cases = [0,1,2,254,255,256,257,65534,65535] LE_cases = LC_cases + [65536] for LC in LC_cases: for LE in LE_cases: print(LC,LE) check(*gencase(LC=LC, LE=LE))
2.046875
2
Systerm/meta.py
ZytroCode/Systerm
1
1136
"""Meta is a module contains objects that will customize the behavior of python.""" from abc import ABC from abc import ABCMeta from abc import abstractmethod from typing import Any from typing import Callable import Systerm # Metaclass class Metaclass(ABCMeta): """A metaclass to customize the behavior of all classes.""" def __new__(self, name: str, bases: tuple[type, ...], attrs: dict[str, Any], **keys: Any) -> type: """The static constructor for the Metaclass. Parameters: name - str The name of the class bases - tuple[type, ...] A tuple of classes to inherit attrs - dict[str, Any] A dictionary of attributes **keys - Any Keyword arguments to pass in """ # Creating a new class cls = super().__new__(self, name, bases, dict(attrs), **keys) cls.__setattr__ = self.setattr # Custom magic methods cls.__namespaces__ = {} cls.__magics__ = {} cls.__attributes__ = {} cls.__publics__ = {} cls.__privates__ = {} cls.__protecteds__ = {} # Setting objects for name in dir(cls): value = getattr(cls, name) # Adds attributes to __magics__ if name.startswith("__") and name.endswith("__"): cls.__magics__[name] = value # Adds attributes to other namespace else: # Adds attributes to __privates__ if name.startswith("__"): cls.__privates__[name] = value # Adds attributes to __protecteds__ elif name.startswith("_"): cls.__protecteds__[name] = value # Adds attributes to __publics__ else: cls.__publics__[name] = value cls.__attributes__[name] = value # Adds attributes to namespace cls.__namespaces__[name] = value return cls def setattr(self, name: str, value: object) -> None: # Adds attributes to __magics__ if name.startswith("__") and name.endswith("__"): self.__magics__[name] = value # Adds attributes to other namespace else: # Adds attributes to __privates__ if name.startswith("__"): self.__privates__[name] = value # Adds attributes to __protecteds__ elif name.startswith("_"): self.__protecteds__[name] = value # Adds attributes to __publics__ else: self.__publics__[name] = value self.__attributes__[name] = value # Adds attributes to namespace self.__namespaces__[name] = value # Object class class Object(object, metaclass=Metaclass): pass # List class class List(list, metaclass=Metaclass): pass # Dictionary class class Dictionary(dict, metaclass=Metaclass): def __getattr__(self, name: str) -> None: try: return self[name] except KeyError as e: try: return super().__getattr__(name) except AttributeError: raise e def __setattr__(self, name: str, value: object) -> None: self[name] = value # Recreating ABC ABC = Metaclass(ABC.__name__, ABC.__bases__, {name: getattr(ABC, name) for name in dir(ABC)}) def get_namespaces(object: Object) -> Dictionary: """Gets the namespaces of an object.""" return object.__namespaces__ def get_magics(object: Object) -> Dictionary: """Gets the magic methods of an object.""" return object.__magics__ def get_attributes(object: Object) -> Dictionary: """Gets the attributes of an object.""" return object.__attributes__ def get_publics(object: Object) -> Dictionary: """Gets the public namespaces of an object.""" return object.__publics__ def get_privates(object: Object) -> Dictionary: """Gets the private namespaces of an object.""" return object.__privates__ def get_protecteds(object: Object) -> Dictionary: """Gets the protected namespaces of an object.""" return object.__protecteds__ # Initializing Systerm.module from Systerm._setup import init_module module = init_module() # MetaMod class class MetaMod(module.Module): pass module.modules[__name__].__class__ = MetaMod
2.890625
3
python/delta/tests/test_exceptions.py
vibhaska/delta
1
1144
# # Copyright (2020) The Delta Lake Project Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import unittest import delta.exceptions as exceptions from delta.testing.utils import DeltaTestCase class DeltaExceptionTests(DeltaTestCase): def _raise_concurrent_exception(self, exception_type): e = exception_type("") self.spark.sparkContext._jvm.scala.util.Failure(e).get() def test_capture_concurrent_write_exception(self): e = self.spark._jvm.io.delta.exceptions.ConcurrentWriteException self.assertRaises(exceptions.ConcurrentWriteException, lambda: self._raise_concurrent_exception(e)) def test_capture_metadata_changed_exception(self): e = self.spark._jvm.io.delta.exceptions.MetadataChangedException self.assertRaises(exceptions.MetadataChangedException, lambda: self._raise_concurrent_exception(e)) def test_capture_protocol_changed_exception(self): e = self.spark._jvm.io.delta.exceptions.ProtocolChangedException self.assertRaises(exceptions.ProtocolChangedException, lambda: self._raise_concurrent_exception(e)) def test_capture_concurrent_append_exception(self): e = self.spark._jvm.io.delta.exceptions.ConcurrentAppendException self.assertRaises(exceptions.ConcurrentAppendException, lambda: self._raise_concurrent_exception(e)) def test_capture_concurrent_delete_read_exception(self): e = self.spark._jvm.io.delta.exceptions.ConcurrentDeleteReadException self.assertRaises(exceptions.ConcurrentDeleteReadException, lambda: self._raise_concurrent_exception(e)) def test_capture_concurrent_delete_delete_exception(self): e = self.spark._jvm.io.delta.exceptions.ConcurrentDeleteDeleteException self.assertRaises(exceptions.ConcurrentDeleteDeleteException, lambda: self._raise_concurrent_exception(e)) def test_capture_concurrent_transaction_exception(self): e = self.spark._jvm.io.delta.exceptions.ConcurrentTransactionException self.assertRaises(exceptions.ConcurrentTransactionException, lambda: self._raise_concurrent_exception(e)) if __name__ == "__main__": try: import xmlrunner testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=4) except ImportError: testRunner = None unittest.main(testRunner=testRunner, verbosity=4)
1.382813
1
soil/build/lib/soil/openstack/snapshot.py
JackDan9/soil
1
1152
# Copyright 2020 Soil, Inc. from soil.openstack.base import DataBase from soil.openstack.base import SourceBase class SnapshotData(DataBase): """A class for openstack snapshot data""" def __init__(self, data): self.data = data['snapshot'] class Snapshot(SourceBase): """A class for openstack snapshot""" def __init__(self, plugin, source_id): super(Snapshot, self).__init__(plugin, source_id) self._snapshot_obj = None @property def snapshot_obj(self): if self._snapshot_obj is not None: return self._snapshot_obj self._snapshot_obj = SnapshotData(self.show()) return self._snapshot_obj def show(self): return self.plugin.cinder.show_snapshot(self.source_id) def delete(self): self.plugin.cinder.delete_snapshot(self.source_id) def is_created(self): snapshot_info = self.show() status = snapshot_info['snapshot']['status'] if status in ('available', ): return True self._check_failed_status(status) return False def is_delete(self): pass
1.429688
1
etl/load/elasticsearch.py
bilalelhoudaigui/plant-brapi-etl-data-lookup-gnpis
3
1216
# Load json bulk files into elasticsearch import json import os import time import traceback import elasticsearch from etl.common.store import list_entity_files from etl.common.utils import get_folder_path, get_file_path, create_logger, first, replace_template class ElasticSearchException(Exception): pass # Init Elasticsearch and test connection def init_es_client(url, logger): es_client = elasticsearch.Elasticsearch([url]) try: info = es_client.info() logger.debug('Connected to node "{}" of cluster "{}" on "{}"'.format(info['name'], info['cluster_name'], url)) except elasticsearch.exceptions.ConnectionError as e: logger.error('Connection error: Elasticsearch unavailable on "{}".\nPlease check your configuration'.format(url)) raise e return es_client def check_error(response): if response.get('errors'): raise ElasticSearchException(response) def create_index(es_client, index_name, logger): logger.debug('Creating index "{}"...'.format(index_name)) check_error(es_client.indices.create(index_name)) def delete_index(es_client, index_name, logger): logger.debug('Deleting index "{}"...'.format(index_name)) check_error(es_client.indices.delete(index_name)) def create_template(es_client, es_config, document_type, base_index_name, logger): template_name = 'template_elixir_' + base_index_name template_pattern = base_index_name + '-d*' mapping = es_config['document-mappings'].get(document_type+"_mapping") if not mapping: return logger.debug('Creating template "{}" on pattern "{}"...'.format(template_name, template_pattern)) template_body = {'template': template_pattern, 'mappings': mapping} if 'index-settings' in es_config: template_body['settings'] = es_config['index-settings'] check_error(es_client.indices.put_template(name=template_name, body=template_body)) def bulk_index(es_client, index_name, file_path, logger): file_name = os.path.basename(file_path) logger.debug('Bulk indexing file "{}" in index "{}"...'.format(file_name, index_name)) with open(file_path, 'r') as file: check_error(es_client.bulk(index=index_name, body=file.read(), timeout='2000ms')) def create_alias(es_client, alias_name, base_index_name, logger): logger.debug('Creating alias "{}" for index "{}"'.format(alias_name, base_index_name)) check_error(es_client.indices.put_alias(alias_name, base_index_name)) def get_indices(es_client, base_index_name): indices = es_client.cat.indices(base_index_name + '-d*', params={'h': 'index'}) index_names = list(map(lambda i: i['index'], indices)) index_names.sort(reverse=True) return index_names def load_source(source, config, source_bulk_dir, log_dir): """ Full Elasticsearch documents indexing """ source_name = source['schema:identifier'] action = 'load-elasticsearch-' + source_name log_file = get_file_path([log_dir, action], ext='.log', recreate=True) logger = create_logger(source_name, log_file, config['options']['verbose']) load_config = config['load-elasticsearch'] es_client = init_es_client(load_config['url'], logger) logger.info("Loading '{}' into elasticsearch '{}'...".format(source_bulk_dir, load_config['url'])) try: if not os.path.exists(source_bulk_dir): raise FileNotFoundError( 'No such file or directory: \'{}\'.\n' 'Please make sure you have run the BrAPI extraction and Elasticsearch document transformation' ' before trying to launch the transformation process.' .format(source_bulk_dir)) bulk_files = list(list_entity_files(source_bulk_dir)) all_document_types = set(map(first, bulk_files)) document_types = load_config.get('document-types') or all_document_types document_types = document_types.intersection(all_document_types) index_by_document = dict() logger.info("Preparing index with template mapping...") timestamp = int(time.time()) for document_type in document_types: base_index_name = replace_template( load_config['index-template'], {'source': source['schema:identifier'], 'documentType': document_type} ).lower() create_template(es_client, load_config, document_type, base_index_name, logger) index_name = base_index_name + '-d' + str(timestamp) create_index(es_client, index_name, logger) index_by_document[document_type] = base_index_name, index_name logger.info("Bulk indexing...") for document_type, file_path in bulk_files: if document_type in index_by_document: base_index_name, index_name = index_by_document[document_type] bulk_index(es_client, index_name, file_path, logger) logger.info("Creating index aliases and deleting old indices...") for document_type, (base_index_name, index_name) in index_by_document.items(): create_alias(es_client, index_name, base_index_name, logger) new_index, *old_indices = get_indices(es_client, base_index_name) for old_index in old_indices[1:]: delete_index(es_client, old_index, logger) logger.info("SUCCEEDED Loading {}.".format(source_name)) except Exception as e: logger.debug(traceback.format_exc()) logger.debug(getattr(e, 'long_message', '')) logger.info("FAILED Loading {} Elasticsearch documents.\n" "=> Check the logs ({}) for more details." .format(source_name, log_file)) def main(config): log_dir = config['log-dir'] bulk_dir = os.path.join(config['data-dir'], 'json-bulk') if not os.path.exists(bulk_dir): raise Exception('No json bulk folder found in ' + bulk_dir) sources = config['sources'] for (source_name, source) in sources.items(): source_bulk_dir = get_folder_path([bulk_dir, source_name]) load_source(source, config, source_bulk_dir, log_dir)
1.609375
2
drybell/drybell_lfs_spark.py
jsnlp/snorkel-tutorials
315
1224
from pyspark.sql import Row from snorkel.labeling.lf import labeling_function from snorkel.labeling.lf.nlp_spark import spark_nlp_labeling_function from snorkel.preprocess import preprocessor from drybell_lfs import load_celebrity_knowledge_base ABSTAIN = -1 NEGATIVE = 0 POSITIVE = 1 @preprocessor() def combine_text(x): return Row(title=x.title, body=x.body, article=f"{x.title} {x.body}") @spark_nlp_labeling_function(text_field="article", pre=[combine_text]) def article_mentions_person(x): for ent in x.doc.ents: if ent.label_ == "PERSON": return ABSTAIN return NEGATIVE @spark_nlp_labeling_function( text_field="article", pre=[combine_text], resources=dict(celebrity_knowledge_base=load_celebrity_knowledge_base()), ) def person_in_db(x, celebrity_knowledge_base): for ent in x.doc.ents: if ent.label_ == "PERSON" and ent.text.lower() in celebrity_knowledge_base: return POSITIVE return ABSTAIN @labeling_function() def body_contains_fortune(x): return POSITIVE if "fortune" in x.body else ABSTAIN
1.875
2
tests/test_structure_learning.py
thunderbug1/pyanom
0
1240
import io import unittest import numpy as np class TestGraphicalLasso(unittest.TestCase): """Basic test cases.""" def _getTarget(self): from pyanom.structure_learning import GraphicalLasso return GraphicalLasso def _makeOne(self, *args, **kwargs): return self._getTarget()(*args, **kwargs) @classmethod def setUpClass(self): self.X_normal = np.array([[0.975586009, -0.745997359, -0.229331244], [-0.460992487, -1.304668238, -0.599247488], [-0.503171745, -1.308368748, -1.451411048], [-0.904446243, -0.287837582, 0.197153592], [-1.106120624, 0.243612535, 1.051237763], [0.371920628, 1.690566027, -0.468645532], [-0.861682655, 1.472544046, -0.846863556], [0.632918214, 1.35895507, -1.217528827], [0.017011646, 1.556247275, -0.149119024], [-1.129336215, 0.486811944, 0.012272206], [0.498967152, -0.530065628, -2.14011938], [0.402460108, -0.474465633, -0.041584595], [-0.847994655, -1.281269721, -0.430338406], [-0.583857254, 0.228815073, -1.321443286], [0.963425438, -1.136873938, 0.990406269], [-1.342349795, -0.147133485, 1.286410605], [-0.546153552, 0.134343445, -0.380672316], [-2.264867999, 0.227795362, 1.477762968], [0.070095074, -0.770899782, 2.100831522], [0.425213005, 0.796156033, 1.676164975]]) self.X_error = np.array([[-0.273095586, 0.356336588, 1.595876828], [-0.708547003, -0.572139833, 0.858932219], [-1.125947228, -1.049026454, 0.35980022], [0.653070988, -0.052417831, 0.787284547], [-1.059131881, 1.621161051, -1.295306533], [0.499065038, -1.064179225, 1.243325767], [0.452740621, -0.737171777, 0.352807563], [0.626897927, -1.100559392, -0.905560876], [1.338835274, 2.083549348, -1.280796042], [0.264928015, 10, 2.544472412], [-0.754827534, -1.031919195, 1.227285333], [-0.774019674, 0.241245625, -0.989132941], [1.298381426, 0.19445334, 2.267355363], [1.46892843, 1.24946146, 0.322341667], [1.057265661, -0.846614104, -0.355396321], [0.810670486, -0.719804484, -0.943762163], [1.169028226, 0.492444331, 0.234015505], [-0.307091024, -1.56195639, 0.509095939], [0.849156845, 0.533674261, 0.069183014], [0.102812565, 8, 1.545239732]]) def test_outlier_analysis_score_shape(self): target = self._makeOne() target.fit(self.X_normal) pred = target.outlier_analysis_score(self.X_error) self.assertEqual(pred.shape, (20, 3)) def test_incorrect_feature_size(self): X_normal = np.array([-0.056523959, - 0.881470896, -0.249935965, 0.186624902, -0.30183287, 2.000815584, 0.710538188, 0.591089702, 0.099804538, 0.114730483]).reshape(-1, 1) X_error = np.array([0.660985506, -1.450512173, -1.27733756, -1.420294211, 0.737179562, 1.481425898, -0.170147132, -1.527687346, 0.580282631, -3.722489636]).reshape(-1, 1) target = self._makeOne() with self.assertRaises(ValueError): target.fit(X_normal) def test_anomaly_analysis_score_shape(self): target = self._makeOne() target.fit(self.X_normal) pred, pmatrix = target.anomaly_analysis_score(self.X_error) self.assertEqual(pred.shape, (3, )) self.assertEqual(pmatrix.shape, (3, 3)) if __name__ == '__main__': unittest.main()
1.390625
1
consumer/tests/test__index_handler.py
eHealthAfrica/aether-elasticsearch-consumer
0
1256
# Copyright (C) 2019 by eHealth Africa : http://www.eHealthAfrica.org # # See the NOTICE file distributed with this work for additional information # regarding copyright ownership. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import json import pytest import requests import responses from time import sleep from elasticsearch.exceptions import NotFoundError from aet.logger import get_logger from app import index_handler from . import * # noqa # fixtures LOG = get_logger('TEST-IDX') # convenience function for jsonpath @responses.activate @pytest.mark.unit def test__handle_http(): responses.add( responses.GET, 'http://bad-url', json={'error': 'not found'}, status=404 ) res = requests.get('http://bad-url') with pytest.raises(requests.exceptions.HTTPError): index_handler.handle_http(res) @pytest.mark.unit def test__get_es_index_from_autoconfig(SubscriptionDefinition, ComplexSchema): es_options = SubscriptionDefinition.get('es_options') tenant = 'dev' name = 'a-topic' alias = es_options.get('alias_name') index = index_handler.get_es_index_from_subscription( es_options, name, tenant, ComplexSchema ) LOG.debug(json.dumps(index, indent=2)) assert(first('$.name', index) == f'{tenant}.{name}') geo_name = es_options['geo_point_name'] assert(first( f'$.body.mappings._doc.properties.{geo_name}', index) is not None) @pytest.mark.unit def test__get_index_for_topic(SubscriptionDefinition, ComplexSchema): name = 'Person' es_options = SubscriptionDefinition.get('es_options') geo_name = es_options.get('geo_point_name') auto_ts = es_options.get('auto_timestamp') index = index_handler.get_index_for_topic(name, geo_name, auto_ts, ComplexSchema) index = index.get('mappings', None) assert(len(index) == 1) assert(first('$._doc', index) is not None) assert(first(f'$._doc.properties.{geo_name}.type', index) == 'geo_point') assert(first(f'$._doc._meta.aet_auto_ts', index) == auto_ts) @pytest.mark.unit def test__get_es_types_from_schema(ComplexSchema): res = index_handler.get_es_types_from_schema(ComplexSchema) assert(first('$.beds.type', res) == 'integer') assert(first('$.username.type', res) == 'keyword') assert(first('$._start.type', res) == 'date') assert(first('$.geometry.type', res) == 'object') assert(first('$.meta.type', res) == 'object') assert(first('$.mandatory_date.type', res) == 'date') assert(first('$.mandatory_date.format', res) == 'date') assert(first('$.optional_dt.type', res) == 'date') assert(first('$.optional_dt.format', res) == 'epoch_millis') assert(len(list(res.keys())) == 55) @pytest.mark.unit def test__make_kibana_index(AutoGenSchema): name = 'kibana-index-name' res = index_handler.make_kibana_index(name, AutoGenSchema) assert(res.get('attributes', {}).get('title') == name) @pytest.mark.unit def test___find_timestamp(ComplexSchema): result = index_handler._find_timestamp(ComplexSchema) assert(result == 'timestamp') @pytest.mark.unit def test___format_lookups(ComplexSchema): formatted = index_handler._format_lookups(ComplexSchema) assert( json.dumps( formatted.get( 'operational_status'), sort_keys=True) == json.dumps( SAMPLE_FIELD_LOOKUP.get( 'operational_status'), sort_keys=True) ) @pytest.mark.unit def test___format_single_lookup(ComplexSchema): matching = ComplexSchema.get_node('MySurvey.operational_status') res = index_handler._format_single_lookup(matching) assert( json.dumps(res, sort_keys=True) == json.dumps(SAMPLE_FIELD_LOOKUP.get( 'operational_status'), sort_keys=True) ) @pytest.mark.unit def test__get_alias_from_namespace(): namespace = 'A_Gather_Form_V1' res = index_handler.get_alias_from_namespace(namespace) assert(res == 'A_Gather_Form') @pytest.mark.integration def test__update_es_index(TestElasticsearch, PolySchemaA, PolySchemaB): # register index with mapping es = TestElasticsearch.get_session() doc_id = 'poly-test-doc' doc = { 'id': doc_id, 'poly': '1001' } index_a = index_handler.get_es_index_from_subscription( es_options={}, name='test1', tenant='test-tenant', schema=PolySchemaA ) index_name = index_a.get('name') index_b = index_handler.get_es_index_from_subscription( es_options={}, name='test1', tenant='test-tenant', schema=PolySchemaB ) alias = index_handler.get_alias_from_namespace(PolySchemaA.name) # register schema A index_handler.update_es_index(es, index_a, 'test-tenant', alias) # put doc es.create( index=index_name, id=doc_id, body=doc ) es.indices.refresh(index=index_name) res = es.search(index=index_name, body={ "query": {"term": {"poly": "1001"}} }) assert(res.get('hits').get('max_score') < 1.0) # find imperfect by string res = es.search(index=index_name, body={ "query": {"term": {"poly": 1001}} }) assert(res.get('hits').get('max_score') < 1.0) # find imperfect by string # migrate to schema B index_handler.update_es_index(es, index_b, 'test-tenant', alias) es.indices.refresh(index=index_name) res = es.search(index=index_name, body={ "query": {"term": {"poly": "1001"}} }) assert(res.get('hits').get('max_score') == 1.0) # find by string res = es.search(index=index_name, body={ "query": {"term": {"poly": 1001}} }) assert(res.get('hits').get('max_score') == 1.0) # find by int
1.515625
2
First_course/test5_base.py
laetrid/learning
0
1264
#!/usr/bin/env python sw1_show_cdp_neighbors = ''' SW1>show cdp neighbors Capability Codes: R - Router, T - Trans Bridge, B - Source Route Bridge S - Switch, H - Host, I - IGMP, r - Repeater, P - Phone Device ID Local Intrfce Holdtme Capability Platform Port ID R1 Fas 0/11 153 R S I 881 Fas 1 R2 Fas 0/12 123 R S I 881 Fas 1 R3 Fas 0/13 129 R S I 881 Fas 1 R4 Fas 0/14 173 R S I 881 Fas 1 R5 Fas 0/15 144 R S I 881 Fas 1 ''' sw1_show_cdp_neighbors_detail = ''' SW1> show cdp neighbors detail -------------------------- Device ID: R1 Entry address(es): IP address: 10.1.1.1 Platform: Cisco 881, Capabilities: Router Switch IGMP Interface: FastEthernet0/11, Port ID (outgoing port): FastEthernet1 Holdtime: 153 sec Version : Cisco IOS Software, C880 Software (C880DATA-UNIVERSALK9-M), Version 15.0(1)M4, RELEASE SOFTWARE (fc1) Technical Support: http://www.cisco.com/techsupport Copyright (c) 1986-2010 by Cisco Systems, Inc. Compiled Fri 29-Oct-10 00:02 by prod_rel_team advertisement version: 2 VTP Management Domain: '' Native VLAN: 1 Duplex: full Management address(es): -------------------------- Device ID: R2 Entry address(es): IP address: 10.1.1.2 Platform: Cisco 881, Capabilities: Router Switch IGMP Interface: FastEthernet0/12, Port ID (outgoing port): FastEthernet1 Holdtime: 123 sec Version : Cisco IOS Software, C880 Software (C880DATA-UNIVERSALK9-M), Version 15.0(1)M4, RELEASE SOFTWARE (fc1) Technical Support: http://www.cisco.com/techsupport Copyright (c) 1986-2010 by Cisco Systems, Inc. Compiled Fri 29-Oct-10 00:02 by prod_rel_team advertisement version: 2 VTP Management Domain: '' Native VLAN: 1 Duplex: full Management address(es): -------------------------- Device ID: R3 Entry address(es): IP address: 10.1.1.3 Platform: Cisco 881, Capabilities: Router Switch IGMP Interface: FastEthernet0/13, Port ID (outgoing port): FastEthernet1 Holdtime: 129 sec Version : Cisco IOS Software, C880 Software (C880DATA-UNIVERSALK9-M), Version 15.0(1)M4, RELEASE SOFTWARE (fc1) Technical Support: http://www.cisco.com/techsupport Copyright (c) 1986-2010 by Cisco Systems, Inc. Compiled Fri 29-Oct-10 00:02 by prod_rel_team advertisement version: 2 VTP Management Domain: '' Native VLAN: 1 Duplex: full Management address(es): -------------------------- Device ID: R4 Entry address(es): IP address: 10.1.1.4 Platform: Cisco 881, Capabilities: Router Switch IGMP Interface: FastEthernet0/14, Port ID (outgoing port): FastEthernet1 Holdtime: 173 sec Version : Cisco IOS Software, C880 Software (C880DATA-UNIVERSALK9-M), Version 15.0(1)M4, RELEASE SOFTWARE (fc1) Technical Support: http://www.cisco.com/techsupport Copyright (c) 1986-2010 by Cisco Systems, Inc. Compiled Fri 29-Oct-10 00:02 by prod_rel_team advertisement version: 2 VTP Management Domain: '' Native VLAN: 1 Duplex: full Management address(es): -------------------------- Device ID: R5 Entry address(es): IP address: 10.1.1.5 Platform: Cisco 881, Capabilities: Router Switch IGMP Interface: FastEthernet0/15, Port ID (outgoing port): FastEthernet1 Holdtime: 144 sec Version : Cisco IOS Software, C880 Software (C880DATA-UNIVERSALK9-M), Version 15.0(1)M4, RELEASE SOFTWARE (fc1) Technical Support: http://www.cisco.com/techsupport Copyright (c) 1986-2010 by Cisco Systems, Inc. Compiled Fri 29-Oct-10 00:02 by prod_rel_team advertisement version: 2 VTP Management Domain: '' Native VLAN: 1 Duplex: full Management address(es): ''' r1_show_cdp_neighbors = ''' R1>show cdp neighbors Capability Codes: R - Router, T - Trans Bridge, B - Source Route Bridge S - Switch, H - Host, I - IGMP, r - Repeater Device ID Local Intrfce Holdtme Capability Platform Port ID SW1 Fas 1 150 S I WS-C2950- Fas 0/11 ''' r1_show_cdp_neighbors_detail = ''' R1>show cdp neighbors detail ------------------------- Device ID: SW1 Entry address(es): IP address: 10.1.1.22 Platform: cisco WS-C2950-24, Capabilities: Switch IGMP Interface: FastEthernet1, Port ID (outgoing port): FastEthernet0/11 Holdtime : 145 sec Version : Cisco Internetwork Operating System Software IOS (tm) C2950 Software (C2950-I6Q4L2-M), Version 12.1(22)EA8a, RELEASE SOFTWARE (fc1) Copyright (c) 1986-2006 by cisco Systems, Inc. Compiled Fri 28-Jul-06 15:16 by weiliu advertisement version: 2 Protocol Hello: OUI=0x00000C, Protocol ID=0x0112; payload len=27, value=00000000FFFFFFFF010221FF0000000000000019E845CE80FF0000 VTP Management Domain: '' Native VLAN: 1 Duplex: full ''' r2_show_cdp_neighbors = ''' R2>show cdp neighbors Capability Codes: R - Router, T - Trans Bridge, B - Source Route Bridge S - Switch, H - Host, I - IGMP, r - Repeater Device ID Local Intrfce Holdtme Capability Platform Port ID SW1 Fas 1 150 S I WS-C2950- Fas 0/12 ''' r2_show_cdp_neighbors_detail = ''' R2>show cdp neighbors detail ------------------------- Device ID: SW1 Entry address(es): IP address: 10.1.1.22 Platform: cisco WS-C2950-24, Capabilities: Switch IGMP Interface: FastEthernet1, Port ID (outgoing port): FastEthernet0/12 Holdtime : 145 sec Version : Cisco Internetwork Operating System Software IOS (tm) C2950 Software (C2950-I6Q4L2-M), Version 12.1(22)EA8a, RELEASE SOFTWARE (fc1) Copyright (c) 1986-2006 by cisco Systems, Inc. Compiled Fri 28-Jul-06 15:16 by weiliu advertisement version: 2 Protocol Hello: OUI=0x00000C, Protocol ID=0x0112; payload len=27, value=00000000FFFFFFFF010221FF0000000000000019E845CE80FF0000 VTP Management Domain: '' Native VLAN: 1 Duplex: full ''' r3_show_cdp_neighbors = ''' R3>show cdp neighbors Capability Codes: R - Router, T - Trans Bridge, B - Source Route Bridge S - Switch, H - Host, I - IGMP, r - Repeater Device ID Local Intrfce Holdtme Capability Platform Port ID SW1 Fas 1 150 S I WS-C2950- Fas 0/13 ''' r3_show_cdp_neighbors_detail = ''' R3>show cdp neighbors detail ------------------------- Device ID: SW1 Entry address(es): IP address: 10.1.1.22 Platform: cisco WS-C2950-24, Capabilities: Switch IGMP Interface: FastEthernet1, Port ID (outgoing port): FastEthernet0/13 Holdtime : 145 sec Version : Cisco Internetwork Operating System Software IOS (tm) C2950 Software (C2950-I6Q4L2-M), Version 12.1(22)EA8a, RELEASE SOFTWARE (fc1) Copyright (c) 1986-2006 by cisco Systems, Inc. Compiled Fri 28-Jul-06 15:16 by weiliu advertisement version: 2 Protocol Hello: OUI=0x00000C, Protocol ID=0x0112; payload len=27, value=00000000FFFFFFFF010221FF0000000000000019E845CE80FF0000 VTP Management Domain: '' Native VLAN: 1 Duplex: full ''' r4_show_cdp_neighbors = ''' R4>show cdp neighbors Capability Codes: R - Router, T - Trans Bridge, B - Source Route Bridge S - Switch, H - Host, I - IGMP, r - Repeater Device ID Local Intrfce Holdtme Capability Platform Port ID SW1 Fas 1 150 S I WS-C2950- Fas 0/14 ''' r4_show_cdp_neighbors_detail = ''' R4>show cdp neighbors detail ------------------------- Device ID: SW1 Entry address(es): IP address: 10.1.1.22 Platform: cisco WS-C2950-24, Capabilities: Switch IGMP Interface: FastEthernet1, Port ID (outgoing port): FastEthernet0/14 Holdtime : 145 sec Version : Cisco Internetwork Operating System Software IOS (tm) C2950 Software (C2950-I6Q4L2-M), Version 12.1(22)EA8a, RELEASE SOFTWARE (fc1) Copyright (c) 1986-2006 by cisco Systems, Inc. Compiled Fri 28-Jul-06 15:16 by weiliu advertisement version: 2 Protocol Hello: OUI=0x00000C, Protocol ID=0x0112; payload len=27, value=00000000FFFFFFFF010221FF0000000000000019E845CE80FF0000 VTP Management Domain: '' Native VLAN: 1 Duplex: full ''' r5_show_cdp_neighbors = ''' R5>show cdp neighbors Capability Codes: R - Router, T - Trans Bridge, B - Source Route Bridge S - Switch, H - Host, I - IGMP, r - Repeater Device ID Local Intrfce Holdtme Capability Platform Port ID SW1 Fas 1 150 S I WS-C2950- Fas 0/15 ''' r5_show_cdp_neighbors_detail = ''' R5>show cdp neighbors detail ------------------------- Device ID: SW1 Entry address(es): IP address: 10.1.1.22 Platform: cisco WS-C2950-24, Capabilities: Switch IGMP Interface: FastEthernet1, Port ID (outgoing port): FastEthernet0/15 Holdtime : 145 sec Version : Cisco Internetwork Operating System Software IOS (tm) C2950 Software (C2950-I6Q4L2-M), Version 12.1(22)EA8a, RELEASE SOFTWARE (fc1) Copyright (c) 1986-2006 by cisco Systems, Inc. Compiled Fri 28-Jul-06 15:16 by weiliu advertisement version: 2 Protocol Hello: OUI=0x00000C, Protocol ID=0x0112; payload len=27, value=00000000FFFFFFFF010221FF0000000000000019E845CE80FF0000 VTP Management Domain: '' Native VLAN: 1 Duplex: full '''
1.078125
1
racer/methods/genetic_programming/parameterized.py
max-eth/racer
1
1280
import copy import numpy as np from racer.utils import load_pickle from racer.methods.genetic_programming.program_tree import ProgramTree class ParameterizedTree(ProgramTree): # This makes the assumption that all children of the underlying tree are in a field .children and that the underlying tree has the field .name def __init__(self, underlying_tree, init_fct=None, _copy=True): if _copy: underlying_tree = copy.deepcopy(underlying_tree) # safety first if hasattr(underlying_tree, "children"): underlying_tree.children = [ ParameterizedTree(underlying_tree=child, _copy=False) for child in underlying_tree.children ] self.underlying_tree = underlying_tree if init_fct is None: self.set_params([1, 0]) else: self.set_params(init_fct()) def set_params(self, params): self.weight, self.bias = params self.name = self.underlying_tree.name + " * {} + {}".format( self.weight, self.bias ) def get_params(self): return [self.weight, self.bias] def __call__(self, *x): return self.underlying_tree(*x) * self.weight + self.bias def __len__(self): return len(self.underlying_tree) def display(self, prefix): res = prefix + self.name + "\n" if hasattr(self.underlying_tree, "children"): for child in self.underlying_tree.children: res += child.display(prefix=" " + prefix) return res def _set_dirty(self): raise Exception("Parameterized trees should not be mutated") def in_order(self): yield self if hasattr(self.underlying_tree, "children"): for child in self.underlying_tree.children: for node in child.in_order(): yield node class ParameterizedIndividual: def __init__(self, parameterized_trees): self.parameterized_trees = parameterized_trees @staticmethod def from_individual(ind): return ParameterizedIndividual( parameterized_trees=[ParameterizedTree(tree) for tree in ind.trees] ) @staticmethod def from_pickled_individual(fname): return ParameterizedIndividual.from_individual(load_pickle(fname)) def __call__(self, *x): return [tree(*x) for tree in self.parameterized_trees] def __len__(self): return sum(len(tree) for tree in self.parameterized_trees) def set_flat_parameters(self, params): n_used = 0 for tree in self.parameterized_trees: for node in tree.in_order(): node.set_params(list(params[n_used : n_used + 2])) n_used += 2 def get_flat_parameters(self): params = [] for tree in self.parameterized_trees: for node in tree.in_order(): params += node.get_params() return np.array(params)
2.09375
2
src/api/models/enums/apschedulerevents.py
jedicontributors/pythondataintegrator
14
1296
EVENT_SCHEDULER_STARTED = EVENT_SCHEDULER_START = 2 ** 0 EVENT_SCHEDULER_SHUTDOWN = 2 ** 1 EVENT_SCHEDULER_PAUSED = 2 ** 2 EVENT_SCHEDULER_RESUMED = 2 ** 3 EVENT_EXECUTOR_ADDED = 2 ** 4 EVENT_EXECUTOR_REMOVED = 2 ** 5 EVENT_JOBSTORE_ADDED = 2 ** 6 EVENT_JOBSTORE_REMOVED = 2 ** 7 EVENT_ALL_JOBS_REMOVED = 2 ** 8 EVENT_JOB_ADDED = 2 ** 9 EVENT_JOB_REMOVED = 2 ** 10 EVENT_JOB_MODIFIED = 2 ** 11 EVENT_JOB_EXECUTED = 2 ** 12 EVENT_JOB_ERROR = 2 ** 13 EVENT_JOB_MISSED = 2 ** 14 EVENT_JOB_SUBMITTED = 2 ** 15 EVENT_JOB_MAX_INSTANCES = 2 ** 16 EVENT_ALL = (EVENT_SCHEDULER_STARTED | EVENT_SCHEDULER_SHUTDOWN | EVENT_SCHEDULER_PAUSED | EVENT_SCHEDULER_RESUMED | EVENT_EXECUTOR_ADDED | EVENT_EXECUTOR_REMOVED | EVENT_JOBSTORE_ADDED | EVENT_JOBSTORE_REMOVED | EVENT_ALL_JOBS_REMOVED | EVENT_JOB_ADDED | EVENT_JOB_REMOVED | EVENT_JOB_MODIFIED | EVENT_JOB_EXECUTED | EVENT_JOB_ERROR | EVENT_JOB_MISSED | EVENT_JOB_SUBMITTED | EVENT_JOB_MAX_INSTANCES)
0.578125
1
visnav/algo/orig/tools.py
oknuutti/hw_visnav
0
1304
import math import time import numpy as np import numba as nb import quaternion # adds to numpy # noqa # pylint: disable=unused-import import sys import scipy from astropy.coordinates import SkyCoord from scipy.interpolate import RectBivariateSpline from scipy.interpolate import NearestNDInterpolator # from scipy.spatial.ckdtree import cKDTree from visnav.settings import * class PositioningException(Exception): pass class Stopwatch: # from https://www.safaribooksonline.com/library/view/python-cookbook-3rd/9781449357337/ch13s13.html def __init__(self, elapsed=0.0, func=time.perf_counter): self._elapsed = elapsed self._func = func self._start = None @property def elapsed(self): return self._elapsed + ((self._func() - self._start) if self.running else 0) def start(self): if self._start is not None: raise RuntimeError('Already started') self._start = self._func() def stop(self): if self._start is None: raise RuntimeError('Not started') end = self._func() self._elapsed += end - self._start self._start = None def reset(self): self._elapsed = 0.0 @property def running(self): return self._start is not None def __enter__(self): self.start() return self def __exit__(self, *args): self.stop() def sphere_angle_radius(loc, r): return np.arcsin(r / np.linalg.norm(loc, axis=1)) def dist_across_and_along_vect(A, b): """ A: array of vectors, b: axis vector """ lat, lon, r = cartesian2spherical(*b) q = ypr_to_q(lat, lon, 0).conj() R = quaternion.as_rotation_matrix(q) Ab = R.dot(A.T).T d = Ab[:, 0:1] r = np.linalg.norm(Ab[:, 1:3], axis=1).reshape((-1, 1)) return r, d def point_vector_dist(A, B, dist_along_v=False): """ A: point, B: vector """ # (length of b)**2 normB2 = (B ** 2).sum(-1).reshape((-1, 1)) # a dot b vector product (project a on b but also times length of b) diagAB = (A * B).sum(-1).reshape((-1, 1)) # A projected along B (projection = a dot b/||b|| * b/||b||) A_B = (diagAB / normB2) * B # vector from projected A to A, it is perpendicular to B AB2A = A - A_B # diff vector lengths normD = np.sqrt((AB2A ** 2).sum(-1)).reshape((-1, 1)) return (normD, diagAB / np.sqrt(normB2)) if dist_along_v else normD def sc_asteroid_max_shift_error(A, B): """ Calculate max error between two set of vertices when projected to camera, A = estimated vertex positions B = true vertex positions Error is a vector perpendicular to B, i.e. A - A|| """ # diff vector lengths normD = point_vector_dist(A, B) # max length of diff vectors return np.max(normD) @nb.njit(nb.f8[:](nb.f8[:], nb.f8[:])) def cross3d(left, right): # for short vectors cross product is faster in pure python than with numpy.cross x = ((left[1] * right[2]) - (left[2] * right[1])) y = ((left[2] * right[0]) - (left[0] * right[2])) z = ((left[0] * right[1]) - (left[1] * right[0])) return np.array((x, y, z)) def normalize_v(v): norm = np.linalg.norm(v) return v / norm if norm != 0 else v @nb.njit(nb.types.f8[:](nb.types.f8[:])) def normalize_v_f8(v): norm = np.linalg.norm(v) return v / norm if norm != 0 else v def generate_field_fft(shape, sd=(0.33, 0.33, 0.34), len_sc=(0.5, 0.5 / 4, 0.5 / 16)): from visnav.algo.image import ImageProc sds = sd if getattr(sd, '__len__', False) else [sd] len_scs = len_sc if getattr(len_sc, '__len__', False) else [len_sc] assert len(shape) == 2, 'only 2d shapes are valid' assert len(sds) == len(len_scs), 'len(sd) differs from len(len_sc)' n = np.prod(shape) kernel = np.sum( np.stack([1 / len_sc * sd * n * ImageProc.gkern2d(shape, 1 / len_sc) for sd, len_sc in zip(sds, len_scs)], axis=2), axis=2) f_img = np.random.normal(0, 1, shape) + np.complex(0, 1) * np.random.normal(0, 1, shape) f_img = np.real(np.fft.ifft2(np.fft.fftshift(kernel * f_img))) return f_img @nb.njit(nb.types.f8[:](nb.types.f8[:], nb.types.f8[:], nb.types.f8[:])) def _surf_normal(x1, x2, x3): # a, b, c = np.array(x1, dtype=np.float64), np.array(x2, dtype=np.float64), np.array(x3, dtype=np.float64) return normalize_v_f8(cross3d(x2-x1, x3-x1)) def surf_normal(x1, x2, x3): a, b, c = np.array(x1, dtype=np.float64), np.array(x2, dtype=np.float64), np.array(x3, dtype=np.float64) return _surf_normal(a, b, c) # return normalize_v_f8(cross3d(b-a, c-a)) def vector_projection(a, b): return a.dot(b) / b.dot(b) * b def vector_rejection(a, b): return a - vector_projection(a, b) def angle_between_v(v1, v2): # Notice: only returns angles between 0 and 180 deg try: v1 = np.reshape(v1, (1, -1)) v2 = np.reshape(v2, (-1, 1)) n1 = v1 / np.linalg.norm(v1) n2 = v2 / np.linalg.norm(v2) cos_angle = n1.dot(n2) except TypeError as e: raise Exception('Bad vectors:\n\tv1: %s\n\tv2: %s' % (v1, v2)) from e return math.acos(np.clip(cos_angle, -1, 1)) def angle_between_v_mx(a, B, normalize=True): Bn = B / np.linalg.norm(B, axis=1).reshape((-1, 1)) if normalize else B an = normalize_v(a).reshape((-1, 1)) if normalize else a return np.arccos(np.clip(Bn.dot(an), -1.0, 1.0)) def angle_between_mx(A, B): return angle_between_rows(A, B) def angle_between_rows(A, B, normalize=True): assert A.shape[1] == 3 and B.shape[1] == 3, 'matrices need to be of shape (n, 3) and (m, 3)' if A.shape[0] == B.shape[0]: # from https://stackoverflow.com/questions/50772176/calculate-the-angle-between-the-rows-of-two-matrices-in-numpy/50772253 cos_angles = np.einsum('ij,ij->i', A, B) if normalize: p2 = np.einsum('ij,ij->i', A, A) p3 = np.einsum('ij,ij->i', B, B) cos_angles /= np.sqrt(p2 * p3) else: if normalize: A = A / np.linalg.norm(A, axis=1).reshape((-1, 1)) B = B / np.linalg.norm(B, axis=1).reshape((-1, 1)) cos_angles = B.dot(A.T) return np.arccos(np.clip(cos_angles, -1.0, 1.0)) def rand_q(angle): r = normalize_v(np.random.normal(size=3)) return angleaxis_to_q(np.hstack((angle, r))) def angle_between_q(q1, q2): # from https://chrischoy.github.io/research/measuring-rotation/ qd = q1.conj() * q2 return abs(wrap_rads(2 * math.acos(qd.normalized().w))) def angle_between_q_arr(q1, q2): qd = quaternion.as_float_array(q1.conj() * q2) qd = qd / np.linalg.norm(qd, axis=1).reshape((-1, 1)) return np.abs(wrap_rads(2 * np.arccos(qd[:, 0]))) def angle_between_ypr(ypr1, ypr2): q1 = ypr_to_q(*ypr1) q2 = ypr_to_q(*ypr2) return angle_between_q(q1, q2) def distance_mx(A, B): assert A.shape[1] == B.shape[1], 'matrices must have same amount of columns' k = A.shape[1] O = np.repeat(A.reshape((-1, 1, k)), B.shape[0], axis=1) - np.repeat(B.reshape((1, -1, k)), A.shape[0], axis=0) D = np.linalg.norm(O, axis=2) return D def q_to_unitbase(q): U0 = quaternion.as_quat_array([[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1.]]) Uq = q * U0 * q.conj() return quaternion.as_float_array(Uq)[:, 1:] def equatorial_to_ecliptic(ra, dec): """ translate from equatorial ra & dec to ecliptic ones """ sc = SkyCoord(ra, dec, unit='deg', frame='icrs', obstime='J2000') \ .transform_to('barycentrictrueecliptic') return sc.lat.value, sc.lon.value def q_to_angleaxis(q, compact=False): theta = math.acos(np.clip(q.w, -1, 1)) * 2.0 v = normalize_v(np.array([q.x, q.y, q.z])) if compact: return theta * v else: return np.array((theta,) + tuple(v)) def angleaxis_to_q(rv): """ first angle, then axis """ if len(rv) == 4: theta = rv[0] v = normalize_v(np.array(rv[1:])) elif len(rv) == 3: theta = math.sqrt(sum(x ** 2 for x in rv)) v = np.array(rv) / (1 if theta == 0 else theta) else: raise Exception('Invalid angle-axis vector: %s' % (rv,)) w = math.cos(theta / 2) v = v * math.sin(theta / 2) return np.quaternion(w, *v).normalized() def ypr_to_q(lat, lon, roll): # Tait-Bryan angles, aka yaw-pitch-roll, nautical angles, cardan angles # intrinsic euler rotations z-y'-x'', pitch=-lat, yaw=lon return ( np.quaternion(math.cos(lon / 2), 0, 0, math.sin(lon / 2)) * np.quaternion(math.cos(-lat / 2), 0, math.sin(-lat / 2), 0) * np.quaternion(math.cos(roll / 2), math.sin(roll / 2), 0, 0) ) def eul_to_q(angles, order='xyz', reverse=False): assert len(angles) == len(order), 'len(angles) != len(order)' q = quaternion.one idx = {'x': 0, 'y': 1, 'z': 2} for angle, axis in zip(angles, order): w = math.cos(angle / 2) v = [0, 0, 0] v[idx[axis]] = math.sin(angle / 2) dq = np.quaternion(w, *v) q = (dq * q) if reverse else (q * dq) return q def q_to_ypr(q): # from https://math.stackexchange.com/questions/687964/getting-euler-tait-bryan-angles-from-quaternion-representation q0, q1, q2, q3 = quaternion.as_float_array(q) roll = np.arctan2(q2 * q3 + q0 * q1, .5 - q1 ** 2 - q2 ** 2) lat = -np.arcsin(np.clip(-2 * (q1 * q3 - q0 * q2), -1, 1)) lon = np.arctan2(q1 * q2 + q0 * q3, .5 - q2 ** 2 - q3 ** 2) return lat, lon, roll def mean_q(qs, ws=None): """ returns a (weighted) mean of a set of quaternions idea is to rotate a bit in the direction of new quaternion from the sum of previous rotations NOTE: not tested properly, might not return same mean quaternion if order of input changed """ wtot = 0 qtot = quaternion.one for q, w in zip(qs, np.ones((len(qs),)) if ws is None else ws): ddaa = q_to_angleaxis(qtot.conj() * q) ddaa[0] = wrap_rads(ddaa[0]) * w / (w + wtot) qtot = angleaxis_to_q(ddaa) * qtot wtot += w return qtot def q_times_v(q, v): qv = np.quaternion(0, *v) qv2 = q * qv * q.conj() return np.array([qv2.x, qv2.y, qv2.z]) def q_times_mx(q, mx): qqmx = q * mx2qmx(mx) * q.conj() aqqmx = quaternion.as_float_array(qqmx) return aqqmx[:, 1:] def mx2qmx(mx): qmx = np.zeros((mx.shape[0], 4)) qmx[:, 1:] = mx return quaternion.as_quat_array(qmx) def wrap_rads(a): return (a + math.pi) % (2 * math.pi) - math.pi def wrap_degs(a): return (a + 180) % 360 - 180 def eccentric_anomaly(eccentricity, mean_anomaly, tol=1e-6): # from http://www.jgiesen.de/kepler/kepler.html E = mean_anomaly if eccentricity < 0.8 else math.pi F = E - eccentricity * math.sin(mean_anomaly) - mean_anomaly; for i in range(30): if abs(F) < tol: break E = E - F / (1.0 - eccentricity * math.cos(E)) F = E - eccentricity * math.sin(E) - mean_anomaly return round(E / tol) * tol def solar_elongation(ast_v, sc_q): sco_x, sco_y, sco_z = q_to_unitbase(sc_q) if USE_ICRS: sc = SkyCoord(x=ast_v[0], y=ast_v[1], z=ast_v[2], frame='icrs', unit='m', representation_type='cartesian', obstime='J2000') \ .transform_to('hcrs') \ .represent_as('cartesian') ast_v = np.array([sc.x.value, sc.y.value, sc.z.value]) # angle between camera axis and the sun, 0: right ahead, pi: behind elong = angle_between_v(-ast_v, sco_x) # direction the sun is at when looking along camera axis nvec = np.cross(sco_x, ast_v) direc = angle_between_v(nvec, sco_z) # decide if direction needs to be negative or not if np.cross(nvec, sco_z).dot(sco_x) < 0: direc = -direc return elong, direc def find_nearest_lesser(array, value): I = np.where(array < value) idx = (np.abs(array - value)).argmin() return array[I[idx]], I[idx] def find_nearest_greater(array, value): I = np.where(array > value) idx = (np.abs(array - value)).argmin() return array[I[idx]], I[idx] def find_nearest(array, value): idx = (np.abs(array - value)).argmin() return array[idx], idx def find_nearest_arr(array, value, ord=None, fun=None): diff = array - value idx = np.linalg.norm(diff if fun is None else list(map(fun, diff)), ord=ord, axis=1).argmin() return array[idx], idx def find_nearest_n(array, value, r, ord=None, fun=None): diff = array - value d = np.linalg.norm(diff if fun is None else list(map(fun, diff)), ord=ord, axis=1) idxs = np.where(d < r) return idxs[0] def find_nearest_each(haystack, needles, ord=None): assert len(haystack.shape) == 2 and len(needles.shape) == 2 and haystack.shape[1] == needles.shape[1], \ 'wrong shapes for haystack and needles, %s and %s, respectively' % (haystack.shape, needles.shape) c = haystack.shape[1] diff_mx = np.repeat(needles.reshape((-1, 1, c)), haystack.shape[0], axis=1) - np.repeat( haystack.reshape((1, -1, c)), needles.shape[0], axis=0) norm_mx = np.linalg.norm(diff_mx, axis=2, ord=ord) idxs = norm_mx.argmin(axis=1) return haystack[idxs], idxs def cartesian2spherical(x, y, z): r = math.sqrt(x ** 2 + y ** 2 + z ** 2) theta = math.acos(z / r) phi = math.atan2(y, x) lat = math.pi / 2 - theta lon = phi return np.array([lat, lon, r]) def spherical2cartesian(lat, lon, r): theta = math.pi / 2 - lat phi = lon x = r * math.sin(theta) * math.cos(phi) y = r * math.sin(theta) * math.sin(phi) z = r * math.cos(theta) return np.array([x, y, z]) def spherical2cartesian_arr(A, r=None): theta = math.pi / 2 - A[:, 0] phi = A[:, 1] r = (r or A[:, 2]) x = r * np.sin(theta) y = x * np.sin(phi) x *= np.cos(phi) # x = r * np.sin(theta) * np.cos(phi) # y = r * np.sin(theta) * np.sin(phi) z = r * np.cos(theta) return np.vstack([x, y, z]).T def discretize_v(v, tol=None, lat_range=(-math.pi / 2, math.pi / 2), points=None): """ simulate feature database by giving closest light direction with given tolerance """ if tol is not None and points is not None or tol is None and points is None: assert False, 'Give either tol or points' elif tol is not None: points = bf2_lat_lon(tol, lat_range=lat_range) lat, lon, r = cartesian2spherical(*v) (nlat, nlon), idx = find_nearest_arr( points, np.array((lat, lon)), ord=2, fun=wrap_rads, ) ret = spherical2cartesian(nlat, nlon, r) return ret, idx def discretize_q(q, tol=None, lat_range=(-math.pi / 2, math.pi / 2), points=None): """ simulate feature database by giving closest lat & roll with given tolerance and set lon to zero as feature detectors are rotation invariant (in opengl coords) """ if tol is not None and points is not None or tol is None and points is None: assert False, 'Give either tol or points' elif tol is not None: points = bf2_lat_lon(tol, lat_range=lat_range) lat, lon, roll = q_to_ypr(q) (nlat, nroll), idx = find_nearest_arr( points, np.array((lat, roll)), ord=2, fun=wrap_rads, ) nq0 = ypr_to_q(nlat, 0, nroll) return nq0, idx def bf_lat_lon(tol, lat_range=(-math.pi / 2, math.pi / 2)): # tol**2 == (step/2)**2 + (step/2)**2 -- 7deg is quite nice in terms of len(lon)*len(lat) == 1260 step = math.sqrt(2) * tol lat_steps = np.linspace(*lat_range, num=math.ceil((lat_range[1] - lat_range[0]) / step), endpoint=False)[1:] lon_steps = np.linspace(-math.pi, math.pi, num=math.ceil(2 * math.pi / step), endpoint=False) return lat_steps, lon_steps def bf2_lat_lon(tol, lat_range=(-math.pi / 2, math.pi / 2)): # tol**2 == (step/2)**2 + (step/2)**2 -- 7deg is quite nice in terms of len(lon)*len(lat) == 1260 step = math.sqrt(2) * tol lat_steps = np.linspace(*lat_range, num=math.ceil((lat_range[1] - lat_range[0]) / step), endpoint=False)[1:] # similar to https://www.cmu.edu/biolphys/deserno/pdf/sphere_equi.pdf points = [] for lat in lat_steps: Mphi = math.ceil(2 * math.pi * math.cos(lat) / step) lon_steps = np.linspace(-math.pi, math.pi, num=Mphi, endpoint=False) points.extend(zip([lat] * len(lon_steps), lon_steps)) return points def robust_mean(arr, discard_percentile=0.2, ret_n=False, axis=None): J = np.logical_not(np.isnan(arr)) if axis is not None: J = np.all(J, axis=1 if axis == 0 else 0) if axis == 0: arr = arr[J, :] elif axis == 1: arr = arr[:, J] else: arr = arr[J] low = np.percentile(arr, discard_percentile, axis=axis) high = np.percentile(arr, 100 - discard_percentile, axis=axis) I = np.logical_and(low < arr, arr < high) if axis is not None: I = np.all(I, axis=1 if axis == 0 else 0) m = np.mean(arr[:, I] if axis == 1 else arr[I], axis=axis) return (m, np.sum(I, axis=axis)) if ret_n else m def robust_std(arr, discard_percentile=0.2, mean=None, axis=None): corr = 1 if mean is None: mean, n = robust_mean(arr, discard_percentile=discard_percentile, ret_n=True, axis=axis) corr = n / (n - 1) return np.sqrt(robust_mean((arr - mean) ** 2, discard_percentile=discard_percentile, axis=axis) * corr) def mv_normal(mean, cov=None, L=None, size=None): if size is None: final_shape = [] elif isinstance(size, (int, np.integer)): final_shape = [size] else: final_shape = size final_shape = list(final_shape[:]) final_shape.append(mean.shape[0]) if L is None and cov is None \ or L is not None and cov is not None: raise ValueError("you must provide either cov or L (cholesky decomp result)") if len(mean.shape) != 1: raise ValueError("mean must be 1 dimensional") if L is not None: if (len(L.shape) != 2) or (L.shape[0] != L.shape[1]): raise ValueError("L must be 2 dimensional and square") if mean.shape[0] != L.shape[0]: raise ValueError("mean and L must have same length") if cov is not None: if (len(cov.shape) != 2) or (cov.shape[0] != cov.shape[1]): raise ValueError("cov must be 2 dimensional and square") if mean.shape[0] != cov.shape[0]: raise ValueError("mean and cov must have same length") L = np.linalg.cholesky(cov) from numpy.random import standard_normal z = standard_normal(final_shape).reshape(mean.shape[0], -1) x = L.dot(z).T x += mean x.shape = tuple(final_shape) return x, L def point_cloud_vs_model_err(points: np.ndarray, model) -> np.ndarray: faces = np.array([f[0] for f in model.faces], dtype='uint') vertices = np.array(model.vertices) errs = get_model_errors(points, vertices, faces) return errs # @nb.njit(nb.f8[:](nb.f8[:, :], nb.f8[:, :]), nogil=True) @nb.njit(nb.f8(nb.f8[:, :], nb.f8[:, :]), nogil=True, cache=True) def poly_line_intersect(poly, line): # extend_line = True eps = 1e-6 none = np.inf # np.zeros(1) v0v1 = poly[1, :] - poly[0, :] v0v2 = poly[2, :] - poly[0, :] dir = line[1, :] - line[0, :] line_len = math.sqrt(np.sum(dir ** 2)) if line_len < eps: return none dir = dir / line_len pvec = cross3d(dir, v0v2).ravel() det = np.dot(v0v1, pvec) if abs(det) < eps: return none # backface culling if False and det < 0: return none # frontface culling if False and det > 0: return none inv_det = 1.0 / det tvec = line[0, :] - poly[0, :] u = tvec.dot(pvec) * inv_det if u + eps < 0 or u - eps > 1: return none qvec = cross3d(tvec, v0v1).ravel() v = dir.dot(qvec) * inv_det if v + eps < 0 or u + v - eps > 1: return none t = v0v2.dot(qvec) * inv_det if True: # return error directly return t - line_len else: # return actual 3d intersect point if not extend_line and t - eps > line_len: return none return line[0, :] + t * dir # INVESTIGATE: parallel = True does not speed up at all (or marginally) for some reason even though all cores are in use @nb.njit(nb.f8(nb.u4[:, :], nb.f8[:, :], nb.f8[:, :]), nogil=True, parallel=False, cache=True) def intersections(faces, vertices, line): # pts = np.zeros((10, 3)) # i = 0 min_err = np.ones(faces.shape[0]) * np.inf for k in nb.prange(1, faces.shape[0]): err = poly_line_intersect(vertices[faces[k, :], :], line) min_err[k] = err # if abs(err) < min_err: # min_err = err # if len(pt) == 3: # pts[i, :] = pt # i += 1 # if i >= pts.shape[0]: # print('too many intersects') # i -= 1 i = np.argmin(np.abs(min_err)) return min_err[i] # pts[0:i, :] # @nb.jit(nb.f8[:](nb.f8[:, :], nb.f8[:, :], nb.i4[:, :]), nogil=True, parallel=False) def get_model_errors(points, vertices, faces): count = len(points) show_progress(count // 10, 0) j = 0 devs = np.empty(points.shape[0]) for i in nb.prange(count): vx = points[i, :] err = intersections(faces, vertices, np.array(((0, 0, 0), vx))) if math.isinf(err): # len(pts) == 0: print('no intersections!') continue if False: idx = np.argmin([np.linalg.norm(pt - vx) for pt in pts]) err = np.linalg.norm(pts[idx]) - np.linalg.norm(vx) devs[i] = err if j < i // 10: show_progress(count // 10, i // 10) j = i // 10 return devs def crop_model(model, cam_v, cam_q, x_fov, y_fov): assert False, 'not implemented' def augment_model(model, multiplier=3, length_scales=(0, 0.1, 1), sds=(1e-5, 1.6e-4, 2.4e-4)): assert multiplier > 1 and multiplier % 1 == 0, 'multiplier must be integer and >1' from scipy.interpolate import LinearNDInterpolator try: from sklearn.gaussian_process.kernels import Matern, WhiteKernel except: print('Requires scikit-learn, install using "conda install scikit-learn"') sys.exit() points = np.array(model.vertices) max_rng = np.max(np.ptp(points, axis=0)) # white noise to ensure positive definite covariance matrix ls = dict(zip(length_scales, sds)) sd0 = ls.pop(0, 1e-5) kernel = WhiteKernel(noise_level=sd0 * max_rng) for l, s in ls.items(): kernel += s ** 2 * Matern(length_scale=l * max_rng, nu=1.5) assert False, 'not implemented' # TODO: how is the covariance mx constructed again? y_cov = kernel(points) # TODO: sample gp ??? how to tie existing points and generate the new points in between? aug_points, L = mv_normal(points, cov=y_cov) # TODO: how to interpolate faces? pass # interpolate texture # TODO: augment texture interp = LinearNDInterpolator(points, model.texcoords) aug_texcoords = interp(aug_points) data = model.as_dict() data['faces'] = aug_faces data['vertices'] = aug_points data['texcoords'] = aug_texcoords from visnav.iotools import objloader aug_model = objloader.ShapeModel(data=data) aug_model.recalc_norms() return aug_model, L def apply_noise(model, support=(None, None), L=(None, None), len_sc=SHAPE_MODEL_NOISE_LEN_SC, noise_lv=SHAPE_MODEL_NOISE_LV['lo'], only_z=False, tx_noise=0, tx_noise_len_sc=SHAPE_MODEL_NOISE_LEN_SC, tx_hf_noise=True): Sv, St = support Lv, Lt = L inplace = noise_lv == 0 and model.texfile is None if noise_lv > 0: noisy_points, avg_dev, Lv = points_with_noise(points=model.vertices, support=Sv, L=Lv, noise_lv=noise_lv, len_sc=len_sc, only_z=only_z) else: noisy_points, avg_dev, Lv = model.vertices, 0, None tex = model.tex if tx_noise > 0: if inplace: model.tex = np.ones(model.tex.shape) Lt = Lv if Lt is None and tx_noise == noise_lv and tx_noise_len_sc == len_sc else Lt tex, tx_avg_dev, Lt = texture_noise(model, support=St, L=Lt, noise_sd=tx_noise, len_sc=tx_noise_len_sc, hf_noise=tx_hf_noise) if inplace: model.tex = tex noisy_model = model else: data = model.as_dict() data['vertices'] = noisy_points if tx_noise > 0: data['tex'] = tex data['texfile'] = None from visnav.iotools import objloader noisy_model = objloader.ShapeModel(data=data) if noise_lv > 0: noisy_model.recalc_norms() else: noisy_model.normals = model.normals return noisy_model, avg_dev, (Lv, Lt) def texture_noise(model, support=None, L=None, noise_sd=SHAPE_MODEL_NOISE_LV['lo'], len_sc=SHAPE_MODEL_NOISE_LEN_SC, max_rng=None, max_n=1e4, hf_noise=True): tex = model.load_texture() if tex is None: print('tools.texture_noise: no texture loaded') return [None] * 3 r = np.sqrt(max_n / np.prod(tex.shape[:2])) ny, nx = (np.array(tex.shape[:2]) * r).astype(np.int) n = nx * ny tx_grid_xx, tx_grid_yy = np.meshgrid(np.linspace(0, 1, nx), np.linspace(0, 1, ny)) tx_grid = np.hstack((tx_grid_xx.reshape((-1, 1)), tx_grid_yy.reshape((-1, 1)))) support = support if support else model points = np.array(support.vertices) max_rng = np.max(np.ptp(points, axis=0)) if max_rng is None else max_rng # use vertices for distances, find corresponding vertex for each pixel y_cov = None if L is None: try: from sklearn.gaussian_process.kernels import Matern, WhiteKernel except: print('Requires scikit-learn, install using "conda install scikit-learn"') sys.exit() kernel = 1.0 * noise_sd * Matern(length_scale=len_sc * max_rng, nu=1.5) \ + 0.5 * noise_sd * Matern(length_scale=0.1 * len_sc * max_rng, nu=1.5) \ + WhiteKernel( noise_level=1e-5 * noise_sd * max_rng) # white noise for positive definite covariance matrix only # texture coordinates given so that x points left and *Y POINTS UP* tex_img_coords = np.array(support.texcoords) tex_img_coords[:, 1] = 1 - tex_img_coords[:, 1] _, idxs = find_nearest_each(haystack=tex_img_coords, needles=tx_grid) tx2vx = support.texture_to_vertex_map() y_cov = kernel(points[tx2vx[idxs], :] - np.mean(points, axis=0)) if 0: # for debugging distances import matplotlib.pyplot as plt import cv2 from visnav.algo.image import ImageProc orig_tx = cv2.imread(os.path.join(DATA_DIR, '67p+tex.png'), cv2.IMREAD_GRAYSCALE) gx, gy = np.gradient(points[tx2vx[idxs], :].reshape((ny, nx, 3)), axis=(1, 0)) gxy = np.linalg.norm(gx, axis=2) + np.linalg.norm(gy, axis=2) gxy = (gxy - np.min(gxy)) / (np.max(gxy) - np.min(gxy)) grad_img = cv2.resize((gxy * 255).astype('uint8'), orig_tx.shape) overlaid = ImageProc.merge((orig_tx, grad_img)) plt.figure(1) plt.imshow(overlaid) plt.show() # sample gp e0, L = mv_normal(np.zeros(n), cov=y_cov, L=L) e0 = e0.reshape((ny, nx)) # interpolate for final texture x = np.linspace(np.min(tx_grid_xx), np.max(tx_grid_xx), tex.shape[1]) y = np.linspace(np.min(tx_grid_yy), np.max(tx_grid_yy), tex.shape[0]) interp0 = RectBivariateSpline(tx_grid_xx[0, :], tx_grid_yy[:, 0], e0, kx=1, ky=1) err0 = interp0(x, y) if 0: import matplotlib.pyplot as plt import cv2 from visnav.algo.image import ImageProc orig_tx = cv2.imread(os.path.join(DATA_DIR, '67p+tex.png'), cv2.IMREAD_GRAYSCALE) err_ = err0 if 1 else e0 eimg = (err_ - np.min(err_)) / (np.max(err_) - np.min(err_)) eimg = cv2.resize((eimg * 255).astype('uint8'), orig_tx.shape) overlaid = ImageProc.merge((orig_tx, eimg)) plt.figure(1) plt.imshow(overlaid) plt.show() err1 = 0 if hf_noise: e1, L = mv_normal(np.zeros(n), L=L) e1 = e1.reshape((ny, nx)) interp1 = RectBivariateSpline(tx_grid_xx[0, :], tx_grid_yy[:, 0], e1, kx=1, ky=1) err_coef = interp1(x, y) lo, hi = np.min(err_coef), np.max(err_coef) err_coef = (err_coef - lo) / (hi - lo) len_sc = 10 err1 = generate_field_fft(tex.shape, (6 * noise_sd, 4 * noise_sd), (len_sc / 1000, len_sc / 4500)) if hf_noise else 0 err1 *= err_coef noisy_tex = tex + err0 + err1 noisy_tex /= np.max(noisy_tex) if 0: import matplotlib.pyplot as plt plt.figure(1) plt.imshow(noisy_tex) plt.figure(2) plt.imshow(err0) plt.figure(3) plt.imshow(err1) plt.show() return noisy_tex, np.std(err0 + err1), L class NearestKernelNDInterpolator(NearestNDInterpolator): def __init__(self, *args, k_nearest=None, kernel='gaussian', kernel_sc=None, kernel_eps=1e-12, query_eps=0.05, max_distance=None, **kwargs): """ Parameters ---------- kernel : one of the following functions of distance that give weight to neighbours: 'linear': (kernel_sc/(r + kernel_eps)) 'quadratic': (kernel_sc/(r + kernel_eps))**2 'cubic': (kernel_sc/(r + kernel_eps))**3 'gaussian': exp(-(r/kernel_sc)**2) k_nearest : if given, uses k_nearest neighbours for interpolation regardless of their distances """ choices = ('linear', 'quadratic', 'cubic', 'gaussian') assert kernel in choices, 'kernel must be one of %s' % (choices,) self._tree_options = kwargs.get('tree_options', {}) super(NearestKernelNDInterpolator, self).__init__(*args, **kwargs) if max_distance is None: if kernel_sc is None: d, _ = self.tree.query(self.points, k=k_nearest) kernel_sc = np.mean(d) * k_nearest / (k_nearest - 1) max_distance = kernel_sc * 3 assert kernel_sc is not None, 'kernel_sc need to be set' self.kernel = kernel self.kernel_sc = kernel_sc self.kernel_eps = kernel_eps self.k_nearest = k_nearest self.max_distance = max_distance self.query_eps = query_eps def _linear(self, r): if scipy.sparse.issparse(r): return self.kernel_sc / (r + self.kernel_eps) else: return self.kernel_sc / (r + self.kernel_eps) def _quadratic(self, r): if scipy.sparse.issparse(r): return np.power(self.kernel_sc / (r.data + self.kernel_eps), 2, out=r.data) else: return (self.kernel_sc / (r + self.kernel_eps)) ** 2 def _cubic(self, r): if scipy.sparse.issparse(r): return self.kernel_sc / (r + self.kernel_eps).power(3) else: return (self.kernel_sc / (r + self.kernel_eps)) ** 3 def _gaussian(self, r): if scipy.sparse.issparse(r): return np.exp((-r.data / self.kernel_sc) ** 2, out=r.data) else: return np.exp(-(r / self.kernel_sc) ** 2) def __call__(self, *args): """ Evaluate interpolator at given points. Parameters ---------- xi : ndarray of float, shape (..., ndim) Points where to interpolate data at. """ from scipy.interpolate.interpnd import _ndim_coords_from_arrays xi = _ndim_coords_from_arrays(args, ndim=self.points.shape[1]) xi = self._check_call_shape(xi) xi = self._scale_x(xi) r, idxs = self.tree.query(xi, self.k_nearest, eps=self.query_eps, distance_upper_bound=self.max_distance or np.inf) w = getattr(self, '_' + self.kernel)(r).reshape((-1, self.k_nearest, 1)) + self.kernel_eps w /= np.sum(w, axis=1).reshape((-1, 1, 1)) yt = np.vstack((self.values, [0])) # if idxs[i, j] == len(values), then i:th point doesnt have j:th match yi = np.sum(yt[idxs, :] * w, axis=1) return yi def points_with_noise(points, support=None, L=None, noise_lv=SHAPE_MODEL_NOISE_LV['lo'], len_sc=SHAPE_MODEL_NOISE_LEN_SC, max_rng=None, only_z=False): try: from sklearn.gaussian_process.kernels import Matern, WhiteKernel except: print('Requires scikit-learn, install using "conda install scikit-learn"') sys.exit() if support is None: support = points # [random.sample(list(range(len(points))), min(3000,len(points)))] n = len(support) mean = np.mean(points, axis=0) max_rng = np.max(np.ptp(points, axis=0)) if max_rng is None else max_rng y_cov = None if L is None: kernel = 0.6 * noise_lv * Matern(length_scale=len_sc * max_rng, nu=1.5) \ + 0.4 * noise_lv * Matern(length_scale=0.1 * len_sc * max_rng, nu=1.5) \ + WhiteKernel( noise_level=1e-5 * noise_lv * max_rng) # white noise for positive definite covariance matrix only y_cov = kernel(support - mean) # sample gp e0, L = mv_normal(np.zeros(n), cov=y_cov, L=L) err = np.exp(e0.astype(points.dtype)).reshape((-1, 1)) if len(err) == len(points): full_err = err if DEBUG: print('using orig gp sampled err') else: # interpolate sc = 0.05 * len_sc * max_rng interp = NearestKernelNDInterpolator(support - mean, err, k_nearest=12, kernel='gaussian', kernel_sc=sc, max_distance=sc * 6) full_err = interp(points - mean).astype(points.dtype) # maybe extrapolate nanidx = tuple(np.isnan(full_err).flat) if np.any(nanidx): assert False, 'shouldnt happen' # if DEBUG or not BATCH_MODE: # print('%sx nans'%np.sum(nanidx)) # naninterp = NearestNDInterpolator(support, err) # try: # full_err[nanidx,] = naninterp(points[nanidx, :]).astype(points.dtype) # except IndexError as e: # raise IndexError('%s,%s,%s'%(err.shape, full_err.shape, points.shape)) from e # extra high frequency noise # white_noise = 1 if True else np.exp(np.random.normal(scale=0.2*noise_lv*max_rng, size=(len(full_err),1))) if only_z: add_err_z = (max_rng / 2) * (full_err - 1) add_err = np.concatenate((np.zeros((len(full_err), 2)), add_err_z), axis=1) noisy_points = points + add_err devs = np.abs(noisy_points[:, 2] - points[:, 2]) / (max_rng / 2) assert np.isclose(devs.flatten(), np.abs(full_err - 1).flatten()).all(), 'something wrong' else: # noisy_points = (points-mean)*full_err*white_noise +mean # r = np.sqrt(np.sum((points - mean)**2, axis=-1)).reshape(-1, 1) # noisy_points = (points - mean) * (1 + np.log(full_err)/r) + mean noisy_points = (points - mean) * full_err + mean devs = np.sqrt(np.sum((noisy_points - points) ** 2, axis=-1) / np.sum((points - mean) ** 2, axis=-1)) if DEBUG or not BATCH_MODE: print('noise (lv=%.3f): %.3f, %.3f; avg=%.3f' % ( (noise_lv,) + tuple(np.percentile(devs, (68, 95))) + (np.mean(devs),))) if False: import matplotlib.pyplot as plt plt.figure(1, figsize=(8, 8)) # plt.plot(np.concatenate((points[:,0], err0[:,0], err[:,0], points[:,0]*err[:,0]))) plt.subplot(2, 2, 1) plt.plot(points[:, 0]) plt.title('original', fontsize=12) plt.subplot(2, 2, 2) plt.plot(err0[:, 0]) plt.title('norm-err', fontsize=12) plt.subplot(2, 2, 3) plt.plot(err[:, 0]) plt.title('exp-err', fontsize=12) plt.subplot(2, 2, 4) plt.plot(noisy_points[:, 0]) plt.title('noisy', fontsize=12) plt.tight_layout() plt.show() assert False, 'exiting' return noisy_points, np.mean(devs), L def foreground_idxs(array, max_val=None): iy, ix = np.where(array < max_val) idxs = np.concatenate(((iy,), (ix,)), axis=0).T return idxs def interp2(array, x, y, max_val=None, max_dist=30, idxs=None, discard_bg=False): assert y < array.shape[0] and x < array.shape[1], 'out of bounds %s: %s' % (array.shape, (y, x)) v = array[int(y):int(y) + 2, int(x):int(x) + 2] xf = x - int(x) yf = y - int(y) w = np.array(( ((1 - yf) * (1 - xf), (1 - yf) * xf), (yf * (1 - xf), yf * xf), )) # ignore background depths if max_val is not None: idx = v.reshape(1, -1) < max_val * 0.999 else: idx = ~np.isnan(v.reshape(1, -1)) w_sum = np.sum(w.reshape(1, -1)[idx]) if w_sum > 0: # ignore background values val = np.sum(w.reshape(1, -1)[idx] * v.reshape(1, -1)[idx]) / w_sum elif discard_bg: return float('nan') else: # no foreground values in 2x2 matrix, find nearest foreground value if idxs is None: idxs = foreground_idxs(array, max_val) fallback = len(idxs) == 0 if not fallback: dist = np.linalg.norm(idxs - np.array((y, x)), axis=1) i = np.argmin(dist) val = array[idxs[i, 0], idxs[i, 1]] # print('\n%s, %s, %s, %s, %s, %s, %s'%(v, x,y,dist[i],idxs[i,1],idxs[i,0],val)) fallback = dist[i] > max_dist if fallback: val = np.sum(w * v) / np.sum(w) return val def solve_rotation(src_q, dst_q): """ q*src_q*q.conj() == dst_q, solve for q """ # based on http://web.cs.iastate.edu/~cs577/handouts/quaternion.pdf # and https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation#Pairs_of_unit_quaternions_as_rotations_in_4D_space # NOTE: not certain if works.. M = np.zeros((4, 4)) for i in range(len(src_q)): si = src_q[i] Pi = np.array(( (si.w, -si.x, -si.y, -si.z), (si.x, si.w, si.z, -si.y), (si.y, -si.z, si.w, si.x), (si.z, si.y, -si.x, si.w), )) qi = dst_q[i] Qi = np.array(( (qi.w, -qi.x, -qi.y, -qi.z), (qi.x, qi.w, -qi.z, qi.y), (qi.y, qi.z, qi.w, -qi.x), (qi.z, -qi.y, qi.x, qi.w), )) M += Pi.T * Qi w, v = np.linalg.eig(M) i = np.argmax(w) res_q = np.quaternion(*v[:, i]) # alt = v.dot(w) # print('%s,%s'%(res_q, alt)) # res_q = np.quaternion(*alt).normalized() return res_q def solve_q_bf(src_q, dst_q): qs = [] d = [] for res_q in ( np.quaternion(0, 0, 0, 1).normalized(), np.quaternion(0, 0, 1, 0).normalized(), np.quaternion(0, 0, 1, 1).normalized(), np.quaternion(0, 0, -1, 1).normalized(), np.quaternion(0, 1, 0, 0).normalized(), np.quaternion(0, 1, 0, 1).normalized(), np.quaternion(0, 1, 0, -1).normalized(), np.quaternion(0, 1, 1, 0).normalized(), np.quaternion(0, 1, -1, 0).normalized(), np.quaternion(0, 1, 1, 1).normalized(), np.quaternion(0, 1, 1, -1).normalized(), np.quaternion(0, 1, -1, 1).normalized(), np.quaternion(0, 1, -1, -1).normalized(), np.quaternion(1, 0, 0, 1).normalized(), np.quaternion(1, 0, 0, -1).normalized(), np.quaternion(1, 0, 1, 0).normalized(), np.quaternion(1, 0, -1, 0).normalized(), np.quaternion(1, 0, 1, 1).normalized(), np.quaternion(1, 0, 1, -1).normalized(), np.quaternion(1, 0, -1, 1).normalized(), np.quaternion(1, 0, -1, -1).normalized(), np.quaternion(1, 1, 0, 0).normalized(), np.quaternion(1, -1, 0, 0).normalized(), np.quaternion(1, 1, 0, 1).normalized(), np.quaternion(1, 1, 0, -1).normalized(), np.quaternion(1, -1, 0, 1).normalized(), np.quaternion(1, -1, 0, -1).normalized(), np.quaternion(1, 1, 1, 0).normalized(), np.quaternion(1, 1, -1, 0).normalized(), np.quaternion(1, -1, 1, 0).normalized(), np.quaternion(1, -1, -1, 0).normalized(), np.quaternion(1, 1, 1, -1).normalized(), np.quaternion(1, 1, -1, 1).normalized(), np.quaternion(1, 1, -1, -1).normalized(), np.quaternion(1, -1, 1, 1).normalized(), np.quaternion(1, -1, 1, -1).normalized(), np.quaternion(1, -1, -1, 1).normalized(), np.quaternion(1, -1, -1, -1).normalized(), ): tq = res_q * src_q * res_q.conj() qs.append(res_q) # d.append(1-np.array((tq.w, tq.x, tq.y, tq.z)).dot(np.array((dst_q.w, dst_q.x, dst_q.y, dst_q.z)))**2) d.append(angle_between_q(tq, dst_q)) i = np.argmin(d) return qs[i] def hover_annotate(fig, ax, line, annotations): annot = ax.annotate("", xy=(0, 0), xytext=(-20, 20), textcoords="offset points", bbox=dict(boxstyle="round", fc="w"), arrowprops=dict(arrowstyle="->")) annot.set_visible(False) def update_annot(ind): idx = ind["ind"][0] try: # for regular plots x, y = line.get_data() annot.xy = (x[idx], y[idx]) except AttributeError: # for scatter plots annot.xy = tuple(line.get_offsets()[idx]) text = ", ".join([annotations[n] for n in ind["ind"]]) annot.set_text(text) annot.get_bbox_patch().set_alpha(0.4) def hover(event): vis = annot.get_visible() if event.inaxes == ax: cont, ind = line.contains(event) if cont: update_annot(ind) annot.set_visible(True) fig.canvas.draw_idle() else: if vis: annot.set_visible(False) fig.canvas.draw_idle() fig.canvas.mpl_connect("motion_notify_event", hover) def plot_vectors(pts3d, scatter=True, conseq=True, neg_z=True): import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D fig = plt.figure() ax = Axes3D(fig) if scatter: ax.scatter(pts3d[:, 0], pts3d[:, 1], pts3d[:, 2]) else: if conseq: ax.set_prop_cycle('color', map(lambda c: '%f' % c, np.linspace(1, 0, len(pts3d)))) for i, v1 in enumerate(pts3d): if v1 is not None: ax.plot((0, v1[0]), (0, v1[1]), (0, v1[2])) ax.set_xlabel('X') ax.set_ylabel('Y') ax.set_zlabel('Z') if neg_z: ax.view_init(90, -90) else: ax.view_init(-90, -90) plt.show() def numeric(s): try: float(s) except ValueError: return False return True def pseudo_huber_loss(a, delta): # from https://en.wikipedia.org/wiki/Huber_loss # first +1e-15 is to avoid divide by zero, second to avoid loss becoming zero if delta > 1e7 due to float precision return delta ** 2 * (np.sqrt(1 + a ** 2 / (delta ** 2 + 1e-15)) - 1 + 1e-15) def fixed_precision(val, precision, as_str=False): if val == 0: return ('%%.%df' % precision) % val if as_str else val d = math.ceil(math.log10(abs(val))) - precision c = 10 ** d fp_val = round(val / c) * c return ('%%.%df' % max(0, -d)) % fp_val if as_str else fp_val def plot_quats(quats, conseq=True, wait=True): import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D fig = plt.figure() ax = Axes3D(fig) ax.set_xlim(-1, 1) ax.set_ylim(-1, 1) ax.set_zlim(-1, 1) ax.set_xlabel('x') ax.set_ylabel('y') ax.set_zlabel('z') if conseq: ax.set_prop_cycle('color', map(lambda c: '%f' % c, np.linspace(1, 0, len(quats)))) for i, q in enumerate(quats): if q is not None: lat, lon, _ = q_to_ypr(q) v1 = spherical2cartesian(lat, lon, 1) v2 = (v1 + normalize_v(np.cross(np.cross(v1, np.array([0, 0, 1])), v1)) * 0.1) * 0.85 v2 = q_times_v(q, v2) ax.plot((0, v1[0], v2[0]), (0, v1[1], v2[1]), (0, v1[2], v2[2])) while (wait and not plt.waitforbuttonpress()): pass def plot_poses(poses, conseq=True, wait=True, arrow_len=1): import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D fig = plt.figure() ax = Axes3D(fig) ax.set_xlabel('x') ax.set_ylabel('y') ax.set_zlabel('z') if conseq: plt.hsv() # ax.set_prop_cycle('color', map(lambda c: '%f' % c, np.linspace(.7, 0, len(poses)))) for i, pose in enumerate(poses): if pose is not None: q = np.quaternion(*pose[3:]) lat, lon, _ = q_to_ypr(q) v1 = spherical2cartesian(lat, lon, 1) * arrow_len v2 = (v1 + normalize_v(np.cross(np.cross(v1, np.array([0, 0, 1])), v1)) * 0.1 * arrow_len) * 0.85 v2 = q_times_v(q, v2) ax.plot((pose[0], v1[0], v2[0]), (pose[1], v1[1], v2[1]), (pose[2], v1[2], v2[2])) while (wait and not plt.waitforbuttonpress()): pass # # Not sure if unitbase_to_q works, haven't deleted just in case still need: # # def unitbase_to_q(b_dst, b_src = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]): # # based on http://stackoverflow.com/questions/16648452/calculating-\ # # quaternion-for-transformation-between-2-3d-cartesian-coordinate-syst # # , which is based on http://dx.doi.org/10.1117/12.57955 # # M = np.zeros((3, 3)) # # for i, v in enumerate(b_src): # x = np.matrix(np.outer(v, b_dst[i])) # M = M + x # # N11 = M[0, 0] + M[1, 1] + M[2, 2] # N22 = M[0, 0] - M[1, 1] - M[2, 2] # N33 = -M[0, 0] + M[1, 1] - M[2, 2] # N44 = -M[0, 0] - M[1, 1] + M[2, 2] # N12 = M[1, 2] - M[2, 1] # N13 = M[2, 0] - M[0, 2] # N14 = M[0, 1] - M[1, 0] # N21 = N12 # N23 = M[0, 1] + M[1, 0] # N24 = M[2, 0] + M[0, 2] # N31 = N13 # N32 = N23 # N34 = M[1, 2] + M[2, 1] # N41 = N14 # N42 = N24 # N43 = N34 # # N=np.matrix([[N11, N12, N13, N14],\ # [N21, N22, N23, N24],\ # [N31, N32, N33, N34],\ # [N41, N42, N43, N44]]) # # values, vectors = np.linalg.eig(N) # quat = vectors[:, np.argmax(values)] # #quat = np.array(quat).reshape(-1,).tolist() # # return np.quaternion(*quat) import tracemalloc import os import linecache def display_top(top_stats, key_type='lineno', limit=10): # snapshot = snapshot.filter_traces(( # tracemalloc.Filter(False, "<frozen importlib._bootstrap>"), # tracemalloc.Filter(False, "<unknown>"), # )) # top_stats = snapshot.statistics(key_type, cumulative=True) print("Top %s lines" % limit) for index, stat in enumerate(top_stats[:limit], 1): frame = stat.traceback[0] # replace "/path/to/module/file.py" with "module/file.py" filename = os.sep.join(frame.filename.split(os.sep)[-2:]) print("#%s: %s:%s: %.1f MB (x%.0f)" % (index, filename, frame.lineno, stat.size / 1024 / 1024, stat.count)) line = linecache.getline(frame.filename, frame.lineno).strip() if line: print(' %s' % line) other = top_stats[limit:] if other: size = sum(stat.size for stat in other) print("%s other: %.1f MB" % (len(other), size / 1024 / 1024)) total = sum(stat.size for stat in top_stats) print("Total allocated size: %.1f MB" % (total / 1024 / 1024)) def show_progress(tot, i): digits = int(math.ceil(math.log10(tot + 1))) if i == 0: print('%s/%d' % ('0' * digits, tot), end='', flush=True) else: print(('%s%0' + str(digits) + 'd/%d') % ('\b' * (digits * 2 + 1), i + 1, tot), end='', flush=True) def smooth1d(xt, x, Y, weight_fun=lambda d: 0.9 ** abs(d)): if xt.ndim != 1 or x.ndim != 1: raise ValueError("smooth1d only accepts 1 dimension arrays for location") if x.shape[0] != Y.shape[0]: raise ValueError("different lenght x and Y") D = np.repeat(np.expand_dims(xt, 1), len(x), axis=1) - np.repeat(np.expand_dims(x, 0), len(xt), axis=0) weights = np.array(list(map(weight_fun, D.flatten()))).reshape(D.shape) Yt = np.sum(Y * weights, axis=1) / np.sum(weights, axis=1) return Yt
2.203125
2
tests/route_generator_test.py
CityPulse/dynamic-bus-scheduling
14
1312
#!/usr/local/bin/python # -*- coding: utf-8 -*- """ - LICENCE The MIT License (MIT) Copyright (c) 2016 <NAME> Ericsson AB (EU FP7 CityPulse Project) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - DESCRIPTION OF DOCUMENTS -- MongoDB Database Documents: address_document: { '_id', 'name', 'node_id', 'point': {'longitude', 'latitude'} } bus_line_document: { '_id', 'bus_line_id', 'bus_stops': [{'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}}] } bus_stop_document: { '_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'} } bus_stop_waypoints_document: { '_id', 'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}}, 'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}}, 'waypoints': [[edge_object_id]] } bus_vehicle_document: { '_id', 'bus_vehicle_id', 'maximum_capacity', 'routes': [{'starting_datetime', 'ending_datetime', 'timetable_id'}] } detailed_bus_stop_waypoints_document: { '_id', 'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}}, 'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}}, 'waypoints': [[edge_document]] } edge_document: { '_id', 'starting_node': {'osm_id', 'point': {'longitude', 'latitude'}}, 'ending_node': {'osm_id', 'point': {'longitude', 'latitude'}}, 'max_speed', 'road_type', 'way_id', 'traffic_density' } node_document: { '_id', 'osm_id', 'tags', 'point': {'longitude', 'latitude'} } point_document: { '_id', 'osm_id', 'point': {'longitude', 'latitude'} } timetable_document: { '_id', 'timetable_id', 'bus_line_id', 'bus_vehicle_id', 'timetable_entries': [{ 'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}}, 'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}}, 'departure_datetime', 'arrival_datetime', 'number_of_onboarding_passengers', 'number_of_deboarding_passengers', 'number_of_current_passengers', 'route': { 'total_distance', 'total_time', 'node_osm_ids', 'points', 'edges', 'distances_from_starting_node', 'times_from_starting_node', 'distances_from_previous_node', 'times_from_previous_node' } }], 'travel_requests': [{ '_id', 'client_id', 'bus_line_id', 'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}}, 'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}}, 'departure_datetime', 'arrival_datetime', 'starting_timetable_entry_index', 'ending_timetable_entry_index' }] } traffic_event_document: { '_id', 'event_id', 'event_type', 'event_level', 'point': {'longitude', 'latitude'}, 'datetime' } travel_request_document: { '_id', 'client_id', 'bus_line_id', 'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}}, 'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}}, 'departure_datetime', 'arrival_datetime', 'starting_timetable_entry_index', 'ending_timetable_entry_index' } way_document: { '_id', 'osm_id', 'tags', 'references' } -- Route Generator Responses: get_route_between_two_bus_stops: { 'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}}, 'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}}, 'route': { 'total_distance', 'total_time', 'node_osm_ids', 'points', 'edges', 'distances_from_starting_node', 'times_from_starting_node', 'distances_from_previous_node', 'times_from_previous_node' } } get_route_between_multiple_bus_stops: [{ 'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}}, 'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}}, 'route': { 'total_distance', 'total_time', 'node_osm_ids', 'points', 'edges', 'distances_from_starting_node', 'times_from_starting_node', 'distances_from_previous_node', 'times_from_previous_node' } }] get_waypoints_between_two_bus_stops: { 'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}}, 'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}}, 'waypoints': [[{ '_id', 'starting_node': {'osm_id', 'point': {'longitude', 'latitude'}}, 'ending_node': {'osm_id', 'point': {'longitude', 'latitude'}}, 'max_speed', 'road_type', 'way_id', 'traffic_density' }]] } get_waypoints_between_multiple_bus_stops: [{ 'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}}, 'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}}, 'waypoints': [[{ '_id', 'starting_node': {'osm_id', 'point': {'longitude', 'latitude'}}, 'ending_node': {'osm_id', 'point': {'longitude', 'latitude'}}, 'max_speed', 'road_type', 'way_id', 'traffic_density' }]] }] """ import time import os import sys sys.path.append(os.path.join(os.path.dirname(__file__), '../')) from src.common.logger import log from src.common.parameters import testing_bus_stop_names from src.route_generator.route_generator_client import get_route_between_two_bus_stops, \ get_route_between_multiple_bus_stops, get_waypoints_between_two_bus_stops, get_waypoints_between_multiple_bus_stops __author__ = '<NAME>' __email__ = '<EMAIL>' __credits__ = [ '<NAME> (Senior Researcher at Ericsson AB) - email: <EMAIL>' '<NAME> (Senior Researcher at Ericsson AB) - email: <EMAIL>' ] def test_get_route_between_two_bus_stops(starting_bus_stop=None, ending_bus_stop=None, starting_bus_stop_name=None, ending_bus_stop_name=None): """ :param starting_bus_stop: bus_stop_document :param ending_bus_stop: bus_stop_document :param starting_bus_stop_name: string :param ending_bus_stop_name: string """ log(module_name='route_generator_test', log_type='INFO', log_message='get_route_between_two_bus_stops: starting') start_time = time.time() # response = { # 'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}}, # 'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}}, # 'route': { # 'total_distance', 'total_time', 'node_osm_ids', 'points', 'edges', # 'distances_from_starting_node', 'times_from_starting_node', # 'distances_from_previous_node', 'times_from_previous_node' # } # } response = get_route_between_two_bus_stops( starting_bus_stop=starting_bus_stop, ending_bus_stop=ending_bus_stop, starting_bus_stop_name=starting_bus_stop_name, ending_bus_stop_name=ending_bus_stop_name ) starting_bus_stop = response.get('starting_bus_stop') ending_bus_stop = response.get('ending_bus_stop') route = response.get('route') if route is not None: total_distance = route.get('total_distance') total_time = route.get('total_time') node_osm_ids = route.get('node_osm_ids') points = route.get('points') edges = route.get('edges') distances_from_starting_node = route.get('distances_from_starting_node') times_from_starting_node = route.get('times_from_starting_node') distances_from_previous_node = route.get('distances_from_previous_node') times_from_previous_node = route.get('times_from_previous_node') output = '\nstarting_bus_stop: ' + str(starting_bus_stop) + \ '\nending_bus_stop: ' + str(ending_bus_stop) + \ '\ntotal_distance: ' + str(total_distance) + \ '\ntotal_time: ' + str(total_time) + \ '\nnode_osm_ids: ' + str(node_osm_ids) + \ '\npoints: ' + str(points) + \ '\nedges: ' + str(edges) + \ '\ndistances_from_starting_node: ' + str(distances_from_starting_node) + \ '\ntimes_from_starting_node: ' + str(times_from_starting_node) + \ '\ndistances_from_previous_node: ' + str(distances_from_previous_node) + \ '\ntimes_from_previous_node: ' + str(times_from_previous_node) else: output = '\nstarting_bus_stop: ' + str(starting_bus_stop) + \ '\nending_bus_stop: ' + str(ending_bus_stop) + \ '\nroute: None' print output elapsed_time = time.time() - start_time time.sleep(0.1) log(module_name='route_generator_test', log_type='INFO', log_message='test_get_route_between_two_bus_stops: finished - elapsed_time = ' + str(elapsed_time) + ' sec') def test_get_route_between_multiple_bus_stops(bus_stops=None, bus_stop_names=None): """ :param bus_stops: [bus_stop_document] :param bus_stop_names: [string] """ log(module_name='route_generator_test', log_type='INFO', log_message='get_route_between_multiple_bus_stops: starting') start_time = time.time() route_distance = 0 route_traveling_time = 0 # response = [{ # 'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}}, # 'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}}, # 'route': { # 'total_distance', 'total_time', 'node_osm_ids', 'points', 'edges', # 'distances_from_starting_node', 'times_from_starting_node', # 'distances_from_previous_node', 'times_from_previous_node' # } # }] response = get_route_between_multiple_bus_stops( bus_stops=bus_stops, bus_stop_names=bus_stop_names ) for intermediate_response in response: starting_bus_stop = intermediate_response.get('starting_bus_stop') ending_bus_stop = intermediate_response.get('ending_bus_stop') intermediate_route = intermediate_response.get('route') if intermediate_route is not None: total_distance = intermediate_route.get('total_distance') route_distance += total_distance total_time = intermediate_route.get('total_time') route_traveling_time += total_time node_osm_ids = intermediate_route.get('node_osm_ids') points = intermediate_route.get('points') edges = intermediate_route.get('edges') distances_from_starting_node = intermediate_route.get('distances_from_starting_node') times_from_starting_node = intermediate_route.get('times_from_starting_node') distances_from_previous_node = intermediate_route.get('distances_from_previous_node') times_from_previous_node = intermediate_route.get('times_from_previous_node') output = '\nstarting_bus_stop: ' + str(starting_bus_stop) + \ '\nending_bus_stop: ' + str(ending_bus_stop) + \ '\ntotal_distance: ' + str(total_distance) + \ '\ntotal_time: ' + str(total_time) + \ '\nnode_osm_ids: ' + str(node_osm_ids) + \ '\npoints: ' + str(points) + \ '\nedges: ' + str(edges) + \ '\ndistances_from_starting_node: ' + str(distances_from_starting_node) + \ '\ntimes_from_starting_node: ' + str(times_from_starting_node) + \ '\ndistances_from_previous_node: ' + str(distances_from_previous_node) + \ '\ntimes_from_previous_node: ' + str(times_from_previous_node) else: output = '\nstarting_bus_stop: ' + str(starting_bus_stop) + \ '\nending_bus_stop: ' + str(ending_bus_stop) + \ '\nroute: None' print output route_average_speed = (route_distance / 1000) / (route_traveling_time / 3600) print '\nroute_distance: ' + str(route_distance / 1000) + \ ' - route_traveling_time: ' + str(route_traveling_time / 60) + \ ' - route_average_speed: ' + str(route_average_speed) elapsed_time = time.time() - start_time time.sleep(0.1) log(module_name='route_generator_test', log_type='INFO', log_message='test_get_route_between_multiple_bus_stops: finished - elapsed_time = ' + str(elapsed_time) + ' sec') def test_get_waypoints_between_two_bus_stops(starting_bus_stop=None, ending_bus_stop=None, starting_bus_stop_name=None, ending_bus_stop_name=None): """ :param starting_bus_stop: bus_stop_document :param ending_bus_stop: bus_stop_document :param starting_bus_stop_name: string :param ending_bus_stop_name: string """ log(module_name='route_generator_test', log_type='INFO', log_message='test_get_waypoints_between_two_bus_stops: starting') start_time = time.time() # response = { # 'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}}, # 'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}}, # 'waypoints': [[{ # '_id', 'starting_node': {'osm_id', 'point': {'longitude', 'latitude'}}, # 'ending_node': {'osm_id', 'point': {'longitude', 'latitude'}}, # 'max_speed', 'road_type', 'way_id', 'traffic_density' # }]] # } response = get_waypoints_between_two_bus_stops( starting_bus_stop=starting_bus_stop, ending_bus_stop=ending_bus_stop, starting_bus_stop_name=starting_bus_stop_name, ending_bus_stop_name=ending_bus_stop_name ) starting_bus_stop = response.get('starting_bus_stop') ending_bus_stop = response.get('ending_bus_stop') waypoints = response.get('waypoints') output = '\nstarting_bus_stop: ' + str(starting_bus_stop) + \ '\nending_bus_stop: ' + str(ending_bus_stop) print output for separate_waypoints in waypoints: print 'waypoints: ' + str(separate_waypoints) elapsed_time = time.time() - start_time time.sleep(0.1) log(module_name='route_generator_test', log_type='INFO', log_message='test_get_waypoints_between_two_bus_stops: finished - elapsed_time = ' + str(elapsed_time) + ' sec') def test_get_waypoints_between_multiple_bus_stops(bus_stops=None, bus_stop_names=None): """ :param bus_stops: [bus_stop_document] :param bus_stop_names: [string] """ log(module_name='route_generator_test', log_type='INFO', log_message='test_get_waypoints_between_multiple_bus_stops: starting') start_time = time.time() # response = [{ # 'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}}, # 'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}}, # 'waypoints': [[{ # '_id', 'starting_node': {'osm_id', 'point': {'longitude', 'latitude'}}, # 'ending_node': {'osm_id', 'point': {'longitude', 'latitude'}}, # 'max_speed', 'road_type', 'way_id', 'traffic_density' # }]] # }] response = get_waypoints_between_multiple_bus_stops( bus_stops=bus_stops, bus_stop_names=bus_stop_names ) for intermediate_response in response: starting_bus_stop = intermediate_response.get('starting_bus_stop') ending_bus_stop = intermediate_response.get('ending_bus_stop') waypoints = intermediate_response.get('waypoints') output = '\nstarting_bus_stop: ' + str(starting_bus_stop) + \ '\nending_bus_stop: ' + str(ending_bus_stop) print output for separate_waypoints in waypoints: print 'waypoints: ' + str(separate_waypoints) elapsed_time = time.time() - start_time time.sleep(0.1) log(module_name='route_generator_test', log_type='INFO', log_message='test_get_waypoints_between_multiple_bus_stops: finished - elapsed_time = ' + str(elapsed_time) + ' sec') if __name__ == '__main__': selection = '' while True: selection = raw_input( '\n0. exit' '\n1. test_get_route_between_two_bus_stops' '\n2. test_get_route_between_multiple_bus_stops' '\n3. test_get_waypoints_between_two_bus_stops' '\n4. test_get_waypoints_between_multiple_bus_stops' '\nSelection: ' ) if selection == '0': break elif selection == '1': test_get_route_between_two_bus_stops( starting_bus_stop_name=testing_bus_stop_names[0], ending_bus_stop_name=testing_bus_stop_names[1] ) elif selection == '2': test_get_route_between_multiple_bus_stops( bus_stop_names=testing_bus_stop_names ) elif selection == '3': test_get_waypoints_between_two_bus_stops( starting_bus_stop_name=testing_bus_stop_names[0], ending_bus_stop_name=testing_bus_stop_names[1] ) elif selection == '4': test_get_waypoints_between_multiple_bus_stops( bus_stop_names=testing_bus_stop_names ) else: print 'Invalid input'
1.335938
1
preprocess.py
NNDEV1/NMTWithLuongAttention
4
1320
import tensorflow as tf import os import contractions import tensorflow as tf import pandas as pd import numpy as np import time import rich from rich.progress import track import spacy from config import params #Preprocessing Text class preprocess_text(): def __init__(self): pass def remove_pattern(self, text, pattern= r'[^a-zA-Z0-9.!?, ]', replace_with= ""): return re.sub(pattern, replace_with, text) def tokenize_sent(self, text, nlp): doc= nlp(text) return [sent.text for sent in doc.sents] def tokenize_words(self, text, nlp): doc= nlp(text) return " ".join(tok.text for tok in doc) def expand_contractions(self, text): return contractions.fix(text) def do_lemmatization(self, text, nlp): doc= nlp(text) return ' '.join(tok.lemma_ if tok.lemma_ != "-PRON-" else tok.text for tok in doc) def add_sos_eos(self, text, sos= False, eos= False): if (sos and eos): return "<sos> " + text + " <eos>" if eos: return text + " <eos>" if sos: return "<sos> " + text return text def remove_accents(self, text): return unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode('UTF-8', 'ignore') def call_preprocessing(df_col, nlp_en= True, lower_= True, remove_pattern_= False, tokenize_words_= False, expand_contractions_= False, do_lemmatization_= False, sos= False, eos= False, remove_accents_= False): nlp= spacy.load('en_core_web_sm') if nlp_en else spacy.load('de_core_news_sm') prep= preprocess_text() if expand_contractions_: df_col= df_col.map(lambda text: prep.expand_contractions(text)) if remove_accents_: df_col= df_col.map(lambda text: prep.remove_accents(text)) if do_lemmatization_: df_col= df_col.map(lambda text: prep.do_lemmatization(text, nlp)) if tokenize_words_: df_col= df_col.map(lambda text: prep.tokenize_words(text, nlp)) if remove_pattern_: df_col= df_col.map(lambda text: prep.remove_pattern_(text)) if eos or sos: df_col= df_col.map(lambda text: prep.add_sos_eos(text, sos, eos)) if lower_: df_col= df_col.map(lambda text: text.lower()) return df_col def tokenizer(df_col, nlp_en= True): vocab= set() _= [[vocab.update([tok]) for tok in text.split(" ")] for text in df_col] if not nlp_en: vocab.update(["<sos>"]) vocab.update(["<eos>"]) tokenize= dict(zip(vocab, range(1, 1+len(vocab)))) detokenize= dict(zip(range(1, 1+len(vocab)), vocab)) return tokenize, detokenize, len(vocab) def padding(txt_toks, max_len): curr_ls= txt_toks.split(" ") len_ls= len(curr_ls) _= [curr_ls.append("<pad>") for i in range(max_len-len_ls) if len(curr_ls)<max_len] return " ".join(curr_ls) def make_minibatches(df, col1= 'rev_eng_tok', col2= 'teach_force_tok', col3= 'target_tok'): enc_seq= np.array([df[col1].values[i] for i in range(len(df[col1]))]) enc_seq= tf.data.Dataset.from_tensor_slices(enc_seq).batch(params.batch_size) teach_force_seq= np.array([df[col2].values[i] for i in range(len(df[col2]))]) teach_force_seq= tf.data.Dataset.from_tensor_slices(teach_force_seq).batch(params.batch_size) y= np.array([df[col3].values[i] for i in range(len(df[col3]))]) y= tf.data.Dataset.from_tensor_slices(y).batch(params.batch_size) return enc_seq, teach_force_seq, y
1.90625
2
pymc/mc_enum.py
cherish-web/pymc
4
1328
# _*_ coding: utf-8 _*_ # @Time : 2021/3/29 上午 08:57 # @Author : cherish_peng # @Email : <EMAIL> # @File : cmd.py # @Software : PyCharm from enum import Enum class EnumSubTitle(Enum): Request4e = 0x5400 # 请求 Request = 0x5000 # 应答 Respond = 0xD000 Respond4e = 0xD400 class EnumEndCode(Enum): # 正常应答 Ok = 0x0000 # 异常应答 Err = 0x51C0 class EnumCmd(Enum): # 成批读 ReadBatch = 0x0401 # 成批写 WriteBatch = 0x1401 class EnumSubCmd(Enum): # 有存储扩展模块b7=0,b6=0:随机读出,监视数据注册用外 # 按位读写 Bit = 0x0001 # 按字读写 Word = 0x0000 # 有存储扩展模块b7=1,b6=0:随机读出,监视数据注册用外 # 按位读写 BitEx = 0x0081 # 按字读写 WordEx = 0x0080 class EnumType(Enum): # 位类型 Bit = 0 # 字类型 Word = 1
1.695313
2
images/forms.py
mpgarate/OST-fauxra
1
1336
from django import forms from django.forms import ModelForm from images.models import Image class ImageForm(ModelForm): class Meta: model = Image
0.691406
1
tree/list/BinaryNode.py
EliHar/BinaryTree-ADT
0
1344
__author__ = '<NAME>' class BinaryNode(object): def __init__(self, data, left, right): self.data = data self.left = left self.right = right def getData(self): return self.data def getLeft(self): return self.left def getRight(self): return self.right def setData(self, data): self.data = data def setLeft(self, aNode): self.left = aNode def setRight(self, aNode): self.right = aNode def hasLeft(self): return self.getLeft() is not None def hasRight(self): return self.getRight() is not None def isLeaf(self): return not(self.hasLeft() | self.hasRight())
2.390625
2
modules/gitbox/files/asfgit/hooks/sync.py
Humbedooh/infrastructure-puppet
1
1360
#!/usr/local/bin/python import json import socket import sys import asfgit.cfg as cfg import asfgit.git as git import asfgit.log as log import asfgit.util as util import subprocess, os, time def main(): ghurl = "git@github:apache/%s.git" % cfg.repo_name os.chdir("/x1/repos/asf/%s.git" % cfg.repo_name) try: for ref in git.stream_refs(sys.stdin): if ref.is_rewrite(): print("Syncing %s (FORCED)..." % ref.name) subprocess.check_call(["git", "push", "-f", ghurl, "%s:%s" % (ref.newsha, ref.name)]) else: print("Syncing %s..." % ref.name) subprocess.check_call(["git", "push", ghurl, "%s:%s" % (ref.newsha, ref.name)]) except subprocess.CalledProcessError as err: util.abort("Could not sync with GitHub: %s" % err.output)
1.25
1
cmdb-compliance/biz/handlers/asset_hipaa_data.py
zjj1002/aws-cloud-cmdb-system
0
1368
from sqlalchemy import or_ from websdk.db_context import DBContext from libs.base_handler import BaseHandler from libs.pagination import pagination_util from models.hipaa_data import HipaaData, model_to_dict class HipaaDataHandler(BaseHandler): @pagination_util def get(self, *args, **kwargs): key = self.get_argument('key', default=None, strip=True) hipaa_data_list = [] with DBContext('r') as session: if key: # 模糊查所有 hipaa_data_info = session.query(HipaaData).filter( or_(HipaaData.profile.like('%{}%'.format(key)), HipaaData.result.like('%{}%'.format(key)), HipaaData.level.like('%{}%'.format(key)), HipaaData.region.like('%{}%'.format(key)), HipaaData.account_id.like('%{}%'.format(key)), HipaaData.group.like('%{}%'.format(key)), HipaaData.group.like('%{}%'.format(key)), HipaaData.check_title.like('%{}%'.format(key)), HipaaData.check_output.like('%{}%'.format(key))) ).filter( HipaaData.result != "PASS" ).all() else: hipaa_data_info = session.query(HipaaData).filter( HipaaData.result != "PASS" ).all() for data in hipaa_data_info: data_dict = model_to_dict(data) hipaa_data_list.append(data_dict) return hipaa_data_list hipaa_data_host_urls = [ (r"/v1/cmdb/hipaa_data/", HipaaDataHandler), ] if __name__ == '__main__': pass
1.226563
1
examples/nn_cudamat.py
cloudspectatordevelopment/cudamat
526
1376
# This file shows how to implement a single hidden layer neural network for # performing binary classification on the GPU using cudamat. from __future__ import division import pdb import time import numpy as np import cudamat as cm from cudamat import learn as cl import util # initialize CUDA cm.cublas_init() # load data util.load('mnist49.dat', globals()) # Put training data onto the GPU. dat_train = dat_train/255. dat_train = dat_train - (np.mean(dat_train, 1)+10**-8)[:, np.newaxis] dev_train = cm.CUDAMatrix(dat_train) dev_lbl = cm.CUDAMatrix(lbl_train) # training parameters epsilon = 0.01 momentum = 0.9 num_epochs = 30 batch_size = 128 num_batches = dat_train.shape[1]//batch_size # model parameters dim_in = dat_train.shape[0] dim_out = 1 num_hid = 1024 # initialize weights w_w1 = cm.CUDAMatrix(dim_in ** -0.5 * np.random.randn(dim_in, num_hid)) w_b1 = cm.CUDAMatrix(np.zeros((num_hid, 1))) w_w2 = cm.CUDAMatrix(num_hid ** -0.5 * np.random.randn(num_hid, dim_out)) w_b2 = cm.CUDAMatrix(np.zeros((dim_out, 1))) # initialize weight update matrices wu_w1 = cm.empty(w_w1.shape).assign(0) wu_b1 = cm.empty(w_b1.shape).assign(0) wu_w2 = cm.empty(w_w2.shape).assign(0) wu_b2 = cm.empty(w_b2.shape).assign(0) # initialize temporary storage h = cm.empty((num_hid, batch_size)) out = cm.empty((dim_out, batch_size)) delta = cm.empty((num_hid, batch_size)) # Train neural network. start_time = time.time() for epoch in range(num_epochs): print("Epoch %i" % (epoch + 1)) err = [] for batch in range(num_batches): # get current minibatch inp = dev_train.slice(batch*batch_size,(batch + 1)*batch_size) target = dev_lbl.slice(batch*batch_size,(batch + 1)*batch_size) # forward pass cm.dot(w_w1.T, inp, target = h) h.add_col_vec(w_b1) h.apply_sigmoid() cm.dot(w_w2.T, h, target = out) out.add_col_vec(w_b2) out.apply_sigmoid() # back prop errors out.subtract(target) # compute error # gradients for w_w2 and w_b2 wu_w2.add_dot(h, out.T, beta = momentum) wu_b2.add_sums(out, axis = 1, beta = momentum) # compute delta cm.dot(w_w2, out, target = delta) # delta = delta * h * (1 - h) cl.mult_by_sigmoid_deriv(delta, h) # gradients for w_w1 and w_b1 wu_w1.add_dot(inp, delta.T, beta = momentum) wu_b1.add_sums(delta, axis = 1, beta = momentum) # update weights w_w1.subtract_mult(wu_w1, epsilon/batch_size) w_b1.subtract_mult(wu_b1, epsilon/batch_size) w_w2.subtract_mult(wu_w2, epsilon/batch_size) w_b2.subtract_mult(wu_b2, epsilon/batch_size) # calculate error on current minibatch err.append(np.abs(out.asarray())>0.5) print("Training misclassification rate: %f" % np.mean(err)) print("Time: %f" % (time.time() - start_time)) # Evaluate neural network on test data. # Load test data onto the GPU. dat_test = dat_test/255. dat_test = dat_test - np.mean(dat_test, 1)[:, np.newaxis] dev_test = cm.CUDAMatrix(dat_test) dev_lbl = cm.CUDAMatrix(lbl_test) # Initalize temporary storage. h = cm.empty((num_hid, dat_test.shape[1])) out = cm.empty((dim_out, dat_test.shape[1])) # forward pass cm.dot(w_w1.T, dev_test, target = h) h.add_col_vec(w_b1) h.apply_sigmoid() cm.dot(w_w2.T, h, target = out) out.add_col_vec(w_b2) out.apply_sigmoid() # compute error out.subtract(dev_lbl) print("Testing misclassification rate: %f" % np.mean(np.abs(out.asarray())>0.5)) cm.cublas_shutdown()
2.484375
2
fts/fluxrss.py
AetherBlack/Veille-Informatique
0
1400
#!/usr/bin/python3 from urllib.parse import urlparse import feedparser import requests import asyncio import discord import hashlib import os from const import CHANNEL_RSS, WAIT_UNTIL_NEW_CHECK, \ SQLITE_FOLDER_NAME, SQLITE_FILE_NAME from fts.database import Database from fts.cleandatabase import CleanDatabase class FluxRSS: """ Class of FluxRSS. Get news of the feedrss url parse in args. """ def __init__(self, bot, cwd): """ Initialize class @param => DiscordBot: `bot`: Discord Bot Instance. @param => str: `cwd`: Current Working Directory of main.py file. """ # Discord self.bot = bot self.bot_username = self.bot.user.name self.rss_channel = self.bot.get_channel(CHANNEL_RSS) # Path self.cwd = cwd # Database self.db_path = os.path.join(self.cwd, SQLITE_FOLDER_NAME) self.database = Database(self.db_path, SQLITE_FILE_NAME) def get_news(self, url): """ Get the news of the rss feed. @param => str: `url`: url of the rss feed. Return dict with an int index key and title, description and link in a list for the value. """ dict_news = dict() # Get the content of the requests content = requests.get(url).text # Parse the content parser = feedparser.parse(content) # Set the root parser = parser["entries"] # Get the number of news news_number = len(parser) # Construct the dict for index in range(news_number): # Get the title title = parser[index]["title"] # Get the description description = parser[index]["description"] # Get the link link = parser[index]["links"][0]["href"] # Set list args = [ title, description, link ] # Add the list to the dict dict_news[str(index)] = args # Return the dict return dict_news def is_new(self, root, name, title, description, link): """ Return True if the news in the feed is new. @param => str: `title`: Title of the news. @param => str: `description`: Description of the news. @param => str: `link`: Link of the rss feed. """ # Hash description hash_description = hashlib.sha256(bytes(description, "utf-8", errors="ignore")).hexdigest() # Return the check of the query return not self.database.isNewsExists(root, name, title, hash_description, link) def embeded_msg(self, root, name, title, content, link, color): """ Create the embeded message and send it to discord. @param => str: `root`: Name of the Website. @param => str: `name`: Name set in const. Categorie of the news @param => str: `title`: Title of the news. @param => str: `content`: Content description of the news. @param => str: `link`: Link of the news. @param => discord.Color: `color`: Color for the left panel. """ # Set the Name, description and color on the left news = discord.Embed(title="{0} - {1}".format(root, name), description="News :", color=(color or 0x00ff00)) #Set bot name and profil picture news.set_author(name=self.bot_username, icon_url=self.bot.user.avatar_url) #Set the description and the link for the main message content = content + "\n" + link news.add_field(name=title, value=content[:1024], inline=False) #Show the bot username in footer news.set_footer(text="Generate by @{0}".format(self.bot_username)) # Return the final Discord embeded message return news async def feedrss(self, json_rss): """ Get the news and send it to the channel. @param => dict: `json_rss`: JSON data of the RSS Flux. """ # Show const for the format self.json_rss = json_rss # While the connection is not closed while not self.bot.is_closed(): # For each key for key, sections in self.json_rss.items(): # Get the root name set in const root = key # For each sections for index_section, section in enumerate(sections): # Check customization of the section if "custom" in section.keys(): # Check color if "color" in section["custom"].keys(): color = getattr(discord.Color, section["custom"]["color"])() else: color = False else: color = False # Get the name of the section name = section["name"] # Get the time until the cleaning of the database for the root and name given wait_time = section["clean"] # Check if the cleaning database is already launched if isinstance(wait_time, str): # Launch the function to clean the database Thread = CleanDatabase(root, name, wait_time, self.db_path, SQLITE_FILE_NAME) Thread.start() # Change the variable type of the clean line in json_rss to launch relaunch the requests self.json_rss[root][index_section]["clean"] = True # For each link in the section for link in section["link"]: # Get title, description and link in a dict dict_news = self.get_news(link) # Verify if the news already exists for value in dict_news.values(): # Get title title = value[0] # Get description description = value[1] # Get link link = value[2] # Check if the news is new if self.is_new(root, name, title, description, link): # Hash the description hash_description = hashlib.sha256(bytes(description, "utf-8", errors="ignore")).hexdigest() # write the news into the database self.database.AddNews(root, name, title, hash_description, link) #Create the discord message message = self.embeded_msg(root, name, title, description, link, color) #Send to discord await self.rss_channel.send(embed=message) # Wait until the next verification await asyncio.sleep(WAIT_UNTIL_NEW_CHECK)
2.15625
2
kafka-rockset-integration/generate_customers_data.py
farkaskid/recipes
21
1416
"""Generate Customer Data""" import csv import random from config import MIN_CUSTOMER_ID, MAX_CUSTOMER_ID ACQUISITION_SOURCES = [ 'OrganicSearch', 'PaidSearch', 'Email', 'SocialMedia', 'Display', 'Affiliate' 'Referral' ] def main(): with open('customers.csv', 'w') as fout: writer = csv.DictWriter(fout, fieldnames=['CustomerID', 'AcquisitionSource']) writer.writeheader() for customer_id in range(MIN_CUSTOMER_ID, MAX_CUSTOMER_ID + 1): record = { 'CustomerID': int(customer_id), 'AcquisitionSource': random.choices(ACQUISITION_SOURCES).pop() } writer.writerow(record) if __name__ == '__main__': main()
2.5
2
borze.py
AmitHasanShuvo/Programming
8
1424
a = input() a = a.replace('--', '2') a = a.replace('-.', '1') a = a.replace('.', '0') print(a)  
1.882813
2
fanscribed/apps/transcripts/tests/test_transcripts.py
fanscribed/fanscribed
8
1440
from decimal import Decimal import os from django.test import TestCase from unipath import Path from ....utils import refresh from ...media import tests from ..models import Transcript, TranscriptMedia MEDIA_TESTDATA_PATH = Path(tests.__file__).parent.child('testdata') RAW_MEDIA_PATH = MEDIA_TESTDATA_PATH.child('raw').child( 'NA-472-2012-12-23-Final-excerpt.mp3').absolute() class TranscriptsTestCase(TestCase): def test_transcript_starts_out_with_unknown_length(self): transcript = Transcript.objects.create(title='test') self.assertEqual(transcript.length, None) def test_setting_transcript_length_creates_fragments_and_stitches(self): t = Transcript.objects.create(title='test') t.set_length('3.33') f0, = t.fragments.all() self.assertEqual(f0.start, Decimal('0.00')) self.assertEqual(f0.end, Decimal('3.33')) self.assertEqual(t.stitches.count(), 0) t = Transcript.objects.create(title='test') t.set_length('7.77') f0, = t.fragments.all() self.assertEqual(f0.start, Decimal('0.00')) self.assertEqual(f0.end, Decimal('7.77')) self.assertEqual(t.stitches.count(), 0) t = Transcript.objects.create(title='test') t.set_length('17.77') f0, f1, f2 = t.fragments.all() self.assertEqual(f0.start, Decimal('0.00')) self.assertEqual(f0.end, Decimal('5.00')) self.assertEqual(f1.start, Decimal('5.00')) self.assertEqual(f1.end, Decimal('10.00')) self.assertEqual(f2.start, Decimal('10.00')) self.assertEqual(f2.end, Decimal('17.77')) s0, s1 = t.stitches.all() self.assertEqual(s0.left, f0) self.assertEqual(s0.right, f1) self.assertEqual(s0.state, 'notready') self.assertEqual(s1.left, f1) self.assertEqual(s1.right, f2) self.assertEqual(s1.state, 'notready') if os.environ.get('FAST_TEST') != '1': from django.core.files import File class SlowTranscriptsTestCase(TestCase): def test_transcript_with_processed_media_has_length(self): transcript = Transcript.objects.create( title='test transcript', ) raw_media = TranscriptMedia( transcript=transcript, is_processed=False, is_full_length=True, ) with open(RAW_MEDIA_PATH, 'rb') as f: raw_media.file.save('{transcript.id}_raw.mp3'.format(**locals()), File(f)) raw_media.save() # Process raw media. raw_media.create_processed_task() transcript = refresh(transcript) # Check length. expected_length = 5 * 60 # 5 minutes. self.assertAlmostEqual( transcript.length, expected_length, delta=0.2)
1.320313
1
tests/test_masked_inference_wsi_dataset.py
HabibMrad/MONAI
1
1456
import os import unittest from unittest import skipUnless import numpy as np from numpy.testing import assert_array_equal from parameterized import parameterized from monai.apps.pathology.datasets import MaskedInferenceWSIDataset from monai.apps.utils import download_url from monai.utils import optional_import from tests.utils import skip_if_quick _, has_cim = optional_import("cucim") _, has_osl = optional_import("openslide") FILE_URL = "http://openslide.cs.cmu.edu/download/openslide-testdata/Generic-TIFF/CMU-1.tiff" FILE_PATH = os.path.join(os.path.dirname(__file__), "testing_data", os.path.basename(FILE_URL)) MASK1 = os.path.join(os.path.dirname(__file__), "testing_data", "tissue_mask1.npy") MASK2 = os.path.join(os.path.dirname(__file__), "testing_data", "tissue_mask2.npy") MASK4 = os.path.join(os.path.dirname(__file__), "testing_data", "tissue_mask4.npy") HEIGHT = 32914 WIDTH = 46000 def prepare_data(): mask = np.zeros((WIDTH // 2, HEIGHT // 2)) mask[100, 100] = 1 np.save(MASK1, mask) mask[100, 100:102] = 1 np.save(MASK2, mask) mask[100:102, 100:102] = 1 np.save(MASK4, mask) TEST_CASE_0 = [ { "data": [ {"image": FILE_PATH, "mask": MASK1}, ], "patch_size": 1, "image_reader_name": "cuCIM", }, [ { "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), "name": "CMU-1", "mask_location": [100, 100], }, ], ] TEST_CASE_1 = [ { "data": [{"image": FILE_PATH, "mask": MASK2}], "patch_size": 1, "image_reader_name": "cuCIM", }, [ { "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), "name": "CMU-1", "mask_location": [100, 100], }, { "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), "name": "CMU-1", "mask_location": [101, 100], }, ], ] TEST_CASE_2 = [ { "data": [{"image": FILE_PATH, "mask": MASK4}], "patch_size": 1, "image_reader_name": "cuCIM", }, [ { "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), "name": "CMU-1", "mask_location": [100, 100], }, { "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), "name": "CMU-1", "mask_location": [100, 101], }, { "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), "name": "CMU-1", "mask_location": [101, 100], }, { "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), "name": "CMU-1", "mask_location": [101, 101], }, ], ] TEST_CASE_3 = [ { "data": [ {"image": FILE_PATH, "mask": MASK1}, ], "patch_size": 2, "image_reader_name": "cuCIM", }, [ { "image": np.array( [ [[243, 243], [243, 243]], [[243, 243], [243, 243]], [[243, 243], [243, 243]], ], dtype=np.uint8, ), "name": "CMU-1", "mask_location": [100, 100], }, ], ] TEST_CASE_4 = [ { "data": [ {"image": FILE_PATH, "mask": MASK1}, {"image": FILE_PATH, "mask": MASK2}, ], "patch_size": 1, "image_reader_name": "cuCIM", }, [ { "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), "name": "CMU-1", "mask_location": [100, 100], }, { "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), "name": "CMU-1", "mask_location": [100, 100], }, { "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), "name": "CMU-1", "mask_location": [101, 100], }, ], ] TEST_CASE_OPENSLIDE_0 = [ { "data": [ {"image": FILE_PATH, "mask": MASK1}, ], "patch_size": 1, "image_reader_name": "OpenSlide", }, [ { "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), "name": "CMU-1", "mask_location": [100, 100], }, ], ] TEST_CASE_OPENSLIDE_1 = [ { "data": [{"image": FILE_PATH, "mask": MASK2}], "patch_size": 1, "image_reader_name": "OpenSlide", }, [ { "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), "name": "CMU-1", "mask_location": [100, 100], }, { "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), "name": "CMU-1", "mask_location": [101, 100], }, ], ] class TestMaskedInferenceWSIDataset(unittest.TestCase): def setUp(self): prepare_data() download_url(FILE_URL, FILE_PATH, "5a3cfd4fd725c50578ddb80b517b759f") @parameterized.expand( [ TEST_CASE_0, TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, ] ) @skipUnless(has_cim, "Requires CuCIM") @skip_if_quick def test_read_patches_cucim(self, input_parameters, expected): dataset = MaskedInferenceWSIDataset(**input_parameters) self.compare_samples_expected(dataset, expected) @parameterized.expand( [ TEST_CASE_OPENSLIDE_0, TEST_CASE_OPENSLIDE_1, ] ) @skipUnless(has_osl, "Requires OpenSlide") @skip_if_quick def test_read_patches_openslide(self, input_parameters, expected): dataset = MaskedInferenceWSIDataset(**input_parameters) self.compare_samples_expected(dataset, expected) def compare_samples_expected(self, dataset, expected): for i in range(len(dataset)): self.assertTupleEqual(dataset[i][0]["image"].shape, expected[i]["image"].shape) self.assertIsNone(assert_array_equal(dataset[i][0]["image"], expected[i]["image"])) self.assertEqual(dataset[i][0]["name"], expected[i]["name"]) self.assertListEqual(dataset[i][0]["mask_location"], expected[i]["mask_location"]) if __name__ == "__main__": unittest.main()
1.476563
1
elastalert/alerts.py
dekhrekh/elastalert
0
1472
# -*- coding: utf-8 -*- import copy import datetime import json import logging import subprocess import sys import warnings from email.mime.text import MIMEText from email.utils import formatdate from smtplib import SMTP from smtplib import SMTP_SSL from smtplib import SMTPAuthenticationError from smtplib import SMTPException from socket import error import boto3 import requests import stomp from exotel import Exotel from jira.client import JIRA from jira.exceptions import JIRAError from requests.exceptions import RequestException from staticconf.loader import yaml_loader from texttable import Texttable from twilio.base.exceptions import TwilioRestException from twilio.rest import Client as TwilioClient from util import EAException from util import elastalert_logger from util import lookup_es_key from util import pretty_ts from util import ts_now from util import ts_to_dt class DateTimeEncoder(json.JSONEncoder): def default(self, obj): if hasattr(obj, 'isoformat'): return obj.isoformat() else: return json.JSONEncoder.default(self, obj) class BasicMatchString(object): """ Creates a string containing fields in match for the given rule. """ def __init__(self, rule, match): self.rule = rule self.match = match def _ensure_new_line(self): while self.text[-2:] != '\n\n': self.text += '\n' def _add_custom_alert_text(self): missing = '<MISSING VALUE>' alert_text = unicode(self.rule.get('alert_text', '')) if 'alert_text_args' in self.rule: alert_text_args = self.rule.get('alert_text_args') alert_text_values = [lookup_es_key(self.match, arg) for arg in alert_text_args] # Support referencing other top-level rule properties # This technically may not work if there is a top-level rule property with the same name # as an es result key, since it would have been matched in the lookup_es_key call above for i in xrange(len(alert_text_values)): if alert_text_values[i] is None: alert_value = self.rule.get(alert_text_args[i]) if alert_value: alert_text_values[i] = alert_value alert_text_values = [missing if val is None else val for val in alert_text_values] alert_text = alert_text.format(*alert_text_values) elif 'alert_text_kw' in self.rule: kw = {} for name, kw_name in self.rule.get('alert_text_kw').items(): val = lookup_es_key(self.match, name) # Support referencing other top-level rule properties # This technically may not work if there is a top-level rule property with the same name # as an es result key, since it would have been matched in the lookup_es_key call above if val is None: val = self.rule.get(name) kw[kw_name] = missing if val is None else val alert_text = alert_text.format(**kw) self.text += alert_text def _add_rule_text(self): self.text += self.rule['type'].get_match_str(self.match) def _add_top_counts(self): for key, counts in self.match.items(): if key.startswith('top_events_'): self.text += '%s:\n' % (key[11:]) top_events = counts.items() if not top_events: self.text += 'No events found.\n' else: top_events.sort(key=lambda x: x[1], reverse=True) for term, count in top_events: self.text += '%s: %s\n' % (term, count) self.text += '\n' def _add_match_items(self): match_items = self.match.items() match_items.sort(key=lambda x: x[0]) for key, value in match_items: if key.startswith('top_events_'): continue value_str = unicode(value) value_str.replace('\\n', '\n') if type(value) in [list, dict]: try: value_str = self._pretty_print_as_json(value) except TypeError: # Non serializable object, fallback to str pass self.text += '%s: %s\n' % (key, value_str) def _pretty_print_as_json(self, blob): try: return json.dumps(blob, cls=DateTimeEncoder, sort_keys=True, indent=4, ensure_ascii=False) except UnicodeDecodeError: # This blob contains non-unicode, so lets pretend it's Latin-1 to show something return json.dumps(blob, cls=DateTimeEncoder, sort_keys=True, indent=4, encoding='Latin-1', ensure_ascii=False) def __str__(self): self.text = '' if 'alert_text' not in self.rule: self.text += self.rule['name'] + '\n\n' self._add_custom_alert_text() self._ensure_new_line() if self.rule.get('alert_text_type') != 'alert_text_only': self._add_rule_text() self._ensure_new_line() if self.rule.get('top_count_keys'): self._add_top_counts() if self.rule.get('alert_text_type') != 'exclude_fields': self._add_match_items() return self.text class JiraFormattedMatchString(BasicMatchString): def _add_match_items(self): match_items = dict([(x, y) for x, y in self.match.items() if not x.startswith('top_events_')]) json_blob = self._pretty_print_as_json(match_items) preformatted_text = u'{{code:json}}{0}{{code}}'.format(json_blob) self.text += preformatted_text class Alerter(object): """ Base class for types of alerts. :param rule: The rule configuration. """ required_options = frozenset([]) def __init__(self, rule): self.rule = rule # pipeline object is created by ElastAlerter.send_alert() # and attached to each alerters used by a rule before calling alert() self.pipeline = None self.resolve_rule_references(self.rule) def resolve_rule_references(self, root): # Support referencing other top-level rule properties to avoid redundant copy/paste if type(root) == list: # Make a copy since we may be modifying the contents of the structure we're walking for i, item in enumerate(copy.copy(root)): if type(item) == dict or type(item) == list: self.resolve_rule_references(root[i]) else: root[i] = self.resolve_rule_reference(item) elif type(root) == dict: # Make a copy since we may be modifying the contents of the structure we're walking for key, value in root.copy().iteritems(): if type(value) == dict or type(value) == list: self.resolve_rule_references(root[key]) else: root[key] = self.resolve_rule_reference(value) def resolve_rule_reference(self, value): strValue = unicode(value) if strValue.startswith('$') and strValue.endswith('$') and strValue[1:-1] in self.rule: if type(value) == int: return int(self.rule[strValue[1:-1]]) else: return self.rule[strValue[1:-1]] else: return value def alert(self, match): """ Send an alert. Match is a dictionary of information about the alert. :param match: A dictionary of relevant information to the alert. """ raise NotImplementedError() def get_info(self): """ Returns a dictionary of data related to this alert. At minimum, this should contain a field type corresponding to the type of Alerter. """ return {'type': 'Unknown'} def create_title(self, matches): """ Creates custom alert title to be used, e.g. as an e-mail subject or JIRA issue summary. :param matches: A list of dictionaries of relevant information to the alert. """ if 'alert_subject' in self.rule: return self.create_custom_title(matches) return self.create_default_title(matches) def create_custom_title(self, matches): alert_subject = unicode(self.rule['alert_subject']) if 'alert_subject_args' in self.rule: alert_subject_args = self.rule['alert_subject_args'] alert_subject_values = [lookup_es_key(matches[0], arg) for arg in alert_subject_args] # Support referencing other top-level rule properties # This technically may not work if there is a top-level rule property with the same name # as an es result key, since it would have been matched in the lookup_es_key call above for i in xrange(len(alert_subject_values)): if alert_subject_values[i] is None: alert_value = self.rule.get(alert_subject_args[i]) if alert_value: alert_subject_values[i] = alert_value alert_subject_values = ['<MISSING VALUE>' if val is None else val for val in alert_subject_values] return alert_subject.format(*alert_subject_values) return alert_subject def create_alert_body(self, matches): body = self.get_aggregation_summary_text(matches) for match in matches: body += unicode(BasicMatchString(self.rule, match)) # Separate text of aggregated alerts with dashes if len(matches) > 1: body += '\n----------------------------------------\n' return body def get_aggregation_summary_text(self, matches): text = '' if 'aggregation' in self.rule and 'summary_table_fields' in self.rule: summary_table_fields = self.rule['summary_table_fields'] if not isinstance(summary_table_fields, list): summary_table_fields = [summary_table_fields] # Include a count aggregation so that we can see at a glance how many of each aggregation_key were encountered summary_table_fields_with_count = summary_table_fields + ['count'] text += "Aggregation resulted in the following data for summary_table_fields ==> {0}:\n\n".format( summary_table_fields_with_count ) text_table = Texttable() text_table.header(summary_table_fields_with_count) match_aggregation = {} # Maintain an aggregate count for each unique key encountered in the aggregation period for match in matches: key_tuple = tuple([unicode(lookup_es_key(match, key)) for key in summary_table_fields]) if key_tuple not in match_aggregation: match_aggregation[key_tuple] = 1 else: match_aggregation[key_tuple] = match_aggregation[key_tuple] + 1 for keys, count in match_aggregation.iteritems(): text_table.add_row([key for key in keys] + [count]) text += text_table.draw() + '\n\n' return unicode(text) def create_default_title(self, matches): return self.rule['name'] def get_account(self, account_file): """ Gets the username and password from an account file. :param account_file: Name of the file which contains user and password information. """ account_conf = yaml_loader(account_file) if 'user' not in account_conf or 'password' not in account_conf: raise EAException('Account file must have user and password fields') self.user = account_conf['user'] self.password = account_conf['password'] class StompAlerter(Alerter): """ The stomp alerter publishes alerts via stomp to a broker. """ required_options = frozenset(['stomp_hostname', 'stomp_hostport', 'stomp_login', 'stomp_password']) def alert(self, matches): alerts = [] qk = self.rule.get('query_key', None) fullmessage = {} for match in matches: if qk in match: elastalert_logger.info( 'Alert for %s, %s at %s:' % (self.rule['name'], match[qk], lookup_es_key(match, self.rule['timestamp_field']))) alerts.append( '1)Alert for %s, %s at %s:' % (self.rule['name'], match[qk], lookup_es_key(match, self.rule['timestamp_field'])) ) fullmessage['match'] = match[qk] else: elastalert_logger.info('Alert for %s at %s:' % (self.rule['name'], lookup_es_key(match, self.rule['timestamp_field']))) alerts.append( '2)Alert for %s at %s:' % (self.rule['name'], lookup_es_key(match, self.rule['timestamp_field'])) ) fullmessage['match'] = lookup_es_key(match, self.rule['timestamp_field']) elastalert_logger.info(unicode(BasicMatchString(self.rule, match))) fullmessage['alerts'] = alerts fullmessage['rule'] = self.rule['name'] fullmessage['matching'] = unicode(BasicMatchString(self.rule, match)) fullmessage['alertDate'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") fullmessage['body'] = self.create_alert_body(matches) self.stomp_hostname = self.rule.get('stomp_hostname', 'localhost') self.stomp_hostport = self.rule.get('stomp_hostport', '61613') self.stomp_login = self.rule.get('stomp_login', 'admin') self.stomp_password = self.rule.get('stomp_password', '<PASSWORD>') self.stomp_destination = self.rule.get('stomp_destination', '/queue/ALERT') conn = stomp.Connection([(self.stomp_hostname, self.stomp_hostport)]) conn.start() conn.connect(self.stomp_login, self.stomp_password) conn.send(self.stomp_destination, json.dumps(fullmessage)) conn.disconnect() def get_info(self): return {'type': 'stomp'} class DebugAlerter(Alerter): """ The debug alerter uses a Python logger (by default, alerting to terminal). """ def alert(self, matches): qk = self.rule.get('query_key', None) for match in matches: if qk in match: elastalert_logger.info( 'Alert for %s, %s at %s:' % (self.rule['name'], match[qk], lookup_es_key(match, self.rule['timestamp_field']))) else: elastalert_logger.info('Alert for %s at %s:' % (self.rule['name'], lookup_es_key(match, self.rule['timestamp_field']))) elastalert_logger.info(unicode(BasicMatchString(self.rule, match))) def get_info(self): return {'type': 'debug'} class EmailAlerter(Alerter): """ Sends an email alert """ required_options = frozenset(['email']) def __init__(self, *args): super(EmailAlerter, self).__init__(*args) self.smtp_host = self.rule.get('smtp_host', 'localhost') self.smtp_ssl = self.rule.get('smtp_ssl', False) self.from_addr = self.rule.get('from_addr', 'ElastAlert') self.smtp_port = self.rule.get('smtp_port') if self.rule.get('smtp_auth_file'): self.get_account(self.rule['smtp_auth_file']) self.smtp_key_file = self.rule.get('smtp_key_file') self.smtp_cert_file = self.rule.get('smtp_cert_file') # Convert email to a list if it isn't already if isinstance(self.rule['email'], basestring): self.rule['email'] = [self.rule['email']] # If there is a cc then also convert it a list if it isn't cc = self.rule.get('cc') if cc and isinstance(cc, basestring): self.rule['cc'] = [self.rule['cc']] # If there is a bcc then also convert it to a list if it isn't bcc = self.rule.get('bcc') if bcc and isinstance(bcc, basestring): self.rule['bcc'] = [self.rule['bcc']] add_suffix = self.rule.get('email_add_domain') if add_suffix and not add_suffix.startswith('@'): self.rule['email_add_domain'] = '@' + add_suffix def alert(self, matches): body = self.create_alert_body(matches) # Add JIRA ticket if it exists if self.pipeline is not None and 'jira_ticket' in self.pipeline: url = '%s/browse/%s' % (self.pipeline['jira_server'], self.pipeline['jira_ticket']) body += '\nJIRA ticket: %s' % (url) to_addr = self.rule['email'] if 'email_from_field' in self.rule: recipient = lookup_es_key(matches[0], self.rule['email_from_field']) if isinstance(recipient, basestring): if '@' in recipient: to_addr = [recipient] elif 'email_add_domain' in self.rule: to_addr = [recipient + self.rule['email_add_domain']] elif isinstance(recipient, list): to_addr = recipient if 'email_add_domain' in self.rule: to_addr = [name + self.rule['email_add_domain'] for name in to_addr] email_msg = MIMEText(body.encode('UTF-8'), _charset='UTF-8') email_msg['Subject'] = self.create_title(matches) email_msg['To'] = ', '.join(to_addr) email_msg['From'] = self.from_addr email_msg['Reply-To'] = self.rule.get('email_reply_to', email_msg['To']) email_msg['Date'] = formatdate() if self.rule.get('cc'): email_msg['CC'] = ','.join(self.rule['cc']) to_addr = to_addr + self.rule['cc'] if self.rule.get('bcc'): to_addr = to_addr + self.rule['bcc'] try: if self.smtp_ssl: if self.smtp_port: self.smtp = SMTP_SSL(self.smtp_host, self.smtp_port, keyfile=self.smtp_key_file, certfile=self.smtp_cert_file) else: self.smtp = SMTP_SSL(self.smtp_host, keyfile=self.smtp_key_file, certfile=self.smtp_cert_file) else: if self.smtp_port: self.smtp = SMTP(self.smtp_host, self.smtp_port) else: self.smtp = SMTP(self.smtp_host) self.smtp.ehlo() if self.smtp.has_extn('STARTTLS'): self.smtp.starttls(keyfile=self.smtp_key_file, certfile=self.smtp_cert_file) if 'smtp_auth_file' in self.rule: self.smtp.login(self.user, self.password) except (SMTPException, error) as e: raise EAException("Error connecting to SMTP host: %s" % (e)) except SMTPAuthenticationError as e: raise EAException("SMTP username/password rejected: %s" % (e)) self.smtp.sendmail(self.from_addr, to_addr, email_msg.as_string()) self.smtp.close() elastalert_logger.info("Sent email to %s" % (to_addr)) def create_default_title(self, matches): subject = 'ElastAlert: %s' % (self.rule['name']) # If the rule has a query_key, add that value plus timestamp to subject if 'query_key' in self.rule: qk = matches[0].get(self.rule['query_key']) if qk: subject += ' - %s' % (qk) return subject def get_info(self): return {'type': 'email', 'recipients': self.rule['email']} class JiraAlerter(Alerter): """ Creates a Jira ticket for each alert """ required_options = frozenset(['jira_server', 'jira_account_file', 'jira_project', 'jira_issuetype']) # Maintain a static set of built-in fields that we explicitly know how to set # For anything else, we will do best-effort and try to set a string value known_field_list = [ 'jira_account_file', 'jira_assignee', 'jira_bump_after_inactivity', 'jira_bump_in_statuses', 'jira_bump_not_in_statuses', 'jira_bump_tickets', 'jira_component', 'jira_components', 'jira_description', 'jira_ignore_in_title', 'jira_issuetype', 'jira_label', 'jira_labels', 'jira_max_age', 'jira_priority', 'jira_project', 'jira_server', 'jira_watchers', ] # Some built-in jira types that can be used as custom fields require special handling # Here is a sample of one of them: # {"id":"customfield_12807","name":"My Custom Field","custom":true,"orderable":true,"navigable":true,"searchable":true, # "clauseNames":["cf[12807]","My Custom Field"],"schema":{"type":"array","items":"string", # "custom":"com.atlassian.jira.plugin.system.customfieldtypes:multiselect","customId":12807}} # There are likely others that will need to be updated on a case-by-case basis custom_string_types_with_special_handling = [ 'com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes', 'com.atlassian.jira.plugin.system.customfieldtypes:multiselect', 'com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons', ] def __init__(self, rule): super(JiraAlerter, self).__init__(rule) self.server = self.rule['jira_server'] self.get_account(self.rule['jira_account_file']) self.project = self.rule['jira_project'] self.issue_type = self.rule['jira_issuetype'] # We used to support only a single component. This allows us to maintain backwards compatibility # while also giving the user-facing API a more representative name self.components = self.rule.get('jira_components', self.rule.get('jira_component')) # We used to support only a single label. This allows us to maintain backwards compatibility # while also giving the user-facing API a more representative name self.labels = self.rule.get('jira_labels', self.rule.get('jira_label')) self.description = self.rule.get('jira_description', '') self.assignee = self.rule.get('jira_assignee') self.max_age = self.rule.get('jira_max_age', 30) self.priority = self.rule.get('jira_priority') self.bump_tickets = self.rule.get('jira_bump_tickets', False) self.bump_not_in_statuses = self.rule.get('jira_bump_not_in_statuses') self.bump_in_statuses = self.rule.get('jira_bump_in_statuses') self.bump_after_inactivity = self.rule.get('jira_bump_after_inactivity', self.max_age) self.watchers = self.rule.get('jira_watchers') if self.bump_in_statuses and self.bump_not_in_statuses: msg = 'Both jira_bump_in_statuses (%s) and jira_bump_not_in_statuses (%s) are set.' % \ (','.join(self.bump_in_statuses), ','.join(self.bump_not_in_statuses)) intersection = list(set(self.bump_in_statuses) & set(self.bump_in_statuses)) if intersection: msg = '%s Both have common statuses of (%s). As such, no tickets will ever be found.' % ( msg, ','.join(intersection)) msg += ' This should be simplified to use only one or the other.' logging.warning(msg) self.jira_args = {'project': {'key': self.project}, 'issuetype': {'name': self.issue_type}} if self.components: # Support single component or list if type(self.components) != list: self.jira_args['components'] = [{'name': self.components}] else: self.jira_args['components'] = [{'name': component} for component in self.components] if self.labels: # Support single label or list if type(self.labels) != list: self.labels = [self.labels] self.jira_args['labels'] = self.labels if self.watchers: # Support single watcher or list if type(self.watchers) != list: self.watchers = [self.watchers] if self.assignee: self.jira_args['assignee'] = {'name': self.assignee} try: self.client = JIRA(self.server, basic_auth=(self.user, self.password)) self.get_priorities() self.get_arbitrary_fields() except JIRAError as e: # JIRAError may contain HTML, pass along only first 1024 chars raise EAException("Error connecting to JIRA: %s" % (str(e)[:1024])) try: if self.priority is not None: self.jira_args['priority'] = {'id': self.priority_ids[self.priority]} except KeyError: logging.error("Priority %s not found. Valid priorities are %s" % (self.priority, self.priority_ids.keys())) def get_arbitrary_fields(self): # This API returns metadata about all the fields defined on the jira server (built-ins and custom ones) fields = self.client.fields() for jira_field, value in self.rule.iteritems(): # If we find a field that is not covered by the set that we are aware of, it means it is either: # 1. A built-in supported field in JIRA that we don't have on our radar # 2. A custom field that a JIRA admin has configured if jira_field.startswith('jira_') and jira_field not in self.known_field_list: # Remove the jira_ part. Convert underscores to spaces normalized_jira_field = jira_field[5:].replace('_', ' ').lower() # All jira fields should be found in the 'id' or the 'name' field. Therefore, try both just in case for identifier in ['name', 'id']: field = next((f for f in fields if normalized_jira_field == f[identifier].replace('_', ' ').lower()), None) if field: break if not field: # Log a warning to ElastAlert saying that we couldn't find that type? # OR raise and fail to load the alert entirely? Probably the latter... raise Exception("Could not find a definition for the jira field '{0}'".format(normalized_jira_field)) arg_name = field['id'] # Check the schema information to decide how to set the value correctly # If the schema information is not available, raise an exception since we don't know how to set it # Note this is only the case for two built-in types, id: issuekey and id: thumbnail if not ('schema' in field or 'type' in field['schema']): raise Exception("Could not determine schema information for the jira field '{0}'".format(normalized_jira_field)) arg_type = field['schema']['type'] # Handle arrays of simple types like strings or numbers if arg_type == 'array': # As a convenience, support the scenario wherein the user only provides # a single value for a multi-value field e.g. jira_labels: Only_One_Label if type(value) != list: value = [value] array_items = field['schema']['items'] # Simple string types if array_items in ['string', 'date', 'datetime']: # Special case for multi-select custom types (the JIRA metadata says that these are strings, but # in reality, they are required to be provided as an object. if 'custom' in field['schema'] and field['schema']['custom'] in self.custom_string_types_with_special_handling: self.jira_args[arg_name] = [{'value': v} for v in value] else: self.jira_args[arg_name] = value elif array_items == 'number': self.jira_args[arg_name] = [int(v) for v in value] # Also attempt to handle arrays of complex types that have to be passed as objects with an identifier 'key' elif array_items == 'option': self.jira_args[arg_name] = [{'value': v} for v in value] else: # Try setting it as an object, using 'name' as the key # This may not work, as the key might actually be 'key', 'id', 'value', or something else # If it works, great! If not, it will manifest itself as an API error that will bubble up self.jira_args[arg_name] = [{'name': v} for v in value] # Handle non-array types else: # Simple string types if arg_type in ['string', 'date', 'datetime']: # Special case for custom types (the JIRA metadata says that these are strings, but # in reality, they are required to be provided as an object. if 'custom' in field['schema'] and field['schema']['custom'] in self.custom_string_types_with_special_handling: self.jira_args[arg_name] = {'value': value} else: self.jira_args[arg_name] = value # Number type elif arg_type == 'number': self.jira_args[arg_name] = int(value) elif arg_type == 'option': self.jira_args[arg_name] = {'value': value} # Complex type else: self.jira_args[arg_name] = {'name': value} def get_priorities(self): """ Creates a mapping of priority index to id. """ priorities = self.client.priorities() self.priority_ids = {} for x in range(len(priorities)): self.priority_ids[x] = priorities[x].id def set_assignee(self, assignee): self.assignee = assignee if assignee: self.jira_args['assignee'] = {'name': assignee} elif 'assignee' in self.jira_args: self.jira_args.pop('assignee') def find_existing_ticket(self, matches): # Default title, get stripped search version if 'alert_subject' not in self.rule: title = self.create_default_title(matches, True) else: title = self.create_title(matches) if 'jira_ignore_in_title' in self.rule: title = title.replace(matches[0].get(self.rule['jira_ignore_in_title'], ''), '') # This is necessary for search to work. Other special characters and dashes # directly adjacent to words appear to be ok title = title.replace(' - ', ' ') title = title.replace('\\', '\\\\') date = (datetime.datetime.now() - datetime.timedelta(days=self.max_age)).strftime('%Y-%m-%d') jql = 'project=%s AND summary~"%s" and created >= "%s"' % (self.project, title, date) if self.bump_in_statuses: jql = '%s and status in (%s)' % (jql, ','.join(self.bump_in_statuses)) if self.bump_not_in_statuses: jql = '%s and status not in (%s)' % (jql, ','.join(self.bump_not_in_statuses)) try: issues = self.client.search_issues(jql) except JIRAError as e: logging.exception("Error while searching for JIRA ticket using jql '%s': %s" % (jql, e)) return None if len(issues): return issues[0] def comment_on_ticket(self, ticket, match): text = unicode(JiraFormattedMatchString(self.rule, match)) timestamp = pretty_ts(lookup_es_key(match, self.rule['timestamp_field'])) comment = "This alert was triggered again at %s\n%s" % (timestamp, text) self.client.add_comment(ticket, comment) def alert(self, matches): title = self.create_title(matches) if self.bump_tickets: ticket = self.find_existing_ticket(matches) if ticket: inactivity_datetime = ts_now() - datetime.timedelta(days=self.bump_after_inactivity) if ts_to_dt(ticket.fields.updated) >= inactivity_datetime: if self.pipeline is not None: self.pipeline['jira_ticket'] = None self.pipeline['jira_server'] = self.server return None elastalert_logger.info('Commenting on existing ticket %s' % (ticket.key)) for match in matches: try: self.comment_on_ticket(ticket, match) except JIRAError as e: logging.exception("Error while commenting on ticket %s: %s" % (ticket, e)) if self.pipeline is not None: self.pipeline['jira_ticket'] = ticket self.pipeline['jira_server'] = self.server return None self.jira_args['summary'] = title self.jira_args['description'] = self.create_alert_body(matches) try: self.issue = self.client.create_issue(**self.jira_args) # You can not add watchers on initial creation. Only as a follow-up action if self.watchers: for watcher in self.watchers: try: self.client.add_watcher(self.issue.key, watcher) except Exception as ex: # Re-raise the exception, preserve the stack-trace, and give some # context as to which watcher failed to be added raise Exception( "Exception encountered when trying to add '{0}' as a watcher. Does the user exist?\n{1}" .format( watcher, ex )), None, sys.exc_info()[2] except JIRAError as e: raise EAException("Error creating JIRA ticket using jira_args (%s): %s" % (self.jira_args, e)) elastalert_logger.info("Opened Jira ticket: %s" % (self.issue)) if self.pipeline is not None: self.pipeline['jira_ticket'] = self.issue self.pipeline['jira_server'] = self.server def create_alert_body(self, matches): body = self.description + '\n' body += self.get_aggregation_summary_text(matches) for match in matches: body += unicode(JiraFormattedMatchString(self.rule, match)) if len(matches) > 1: body += '\n----------------------------------------\n' return body def get_aggregation_summary_text(self, matches): text = super(JiraAlerter, self).get_aggregation_summary_text(matches) if text: text = u'{{noformat}}{0}{{noformat}}'.format(text) return text def create_default_title(self, matches, for_search=False): # If there is a query_key, use that in the title if 'query_key' in self.rule and lookup_es_key(matches[0], self.rule['query_key']): title = 'ElastAlert: %s matched %s' % (lookup_es_key(matches[0], self.rule['query_key']), self.rule['name']) else: title = 'ElastAlert: %s' % (self.rule['name']) if for_search: return title title += ' - %s' % (pretty_ts(matches[0][self.rule['timestamp_field']], self.rule.get('use_local_time'))) # Add count for spikes count = matches[0].get('spike_count') if count: title += ' - %s+ events' % (count) return title def get_info(self): return {'type': 'jira'} class CommandAlerter(Alerter): required_options = set(['command']) def __init__(self, *args): super(CommandAlerter, self).__init__(*args) self.last_command = [] self.shell = False if isinstance(self.rule['command'], basestring): self.shell = True if '%' in self.rule['command']: logging.warning('Warning! You could be vulnerable to shell injection!') self.rule['command'] = [self.rule['command']] self.new_style_string_format = False if 'new_style_string_format' in self.rule and self.rule['new_style_string_format']: self.new_style_string_format = True def alert(self, matches): # Format the command and arguments try: if self.new_style_string_format: command = [command_arg.format(match=matches[0]) for command_arg in self.rule['command']] else: command = [command_arg % matches[0] for command_arg in self.rule['command']] self.last_command = command except KeyError as e: raise EAException("Error formatting command: %s" % (e)) # Run command and pipe data try: subp = subprocess.Popen(command, stdin=subprocess.PIPE, shell=self.shell) if self.rule.get('pipe_match_json'): match_json = json.dumps(matches, cls=DateTimeEncoder) + '\n' stdout, stderr = subp.communicate(input=match_json) if self.rule.get("fail_on_non_zero_exit", False) and subp.wait(): raise EAException("Non-zero exit code while running command %s" % (' '.join(command))) except OSError as e: raise EAException("Error while running command %s: %s" % (' '.join(command), e)) def get_info(self): return {'type': 'command', 'command': ' '.join(self.last_command)} class SnsAlerter(Alerter): """ Send alert using AWS SNS service """ required_options = frozenset(['sns_topic_arn']) def __init__(self, *args): super(SnsAlerter, self).__init__(*args) self.sns_topic_arn = self.rule.get('sns_topic_arn', '') self.aws_access_key_id = self.rule.get('aws_access_key_id') self.aws_secret_access_key = self.rule.get('aws_secret_access_key') self.aws_region = self.rule.get('aws_region', 'us-east-1') self.profile = self.rule.get('boto_profile', None) # Deprecated self.profile = self.rule.get('aws_profile', None) def create_default_title(self, matches): subject = 'ElastAlert: %s' % (self.rule['name']) return subject def alert(self, matches): body = self.create_alert_body(matches) session = boto3.Session( aws_access_key_id=self.aws_access_key_id, aws_secret_access_key=self.aws_secret_access_key, region_name=self.aws_region, profile_name=self.profile ) sns_client = session.client('sns') sns_client.publish( TopicArn=self.sns_topic_arn, Message=body, Subject=self.create_title(matches) ) elastalert_logger.info("Sent sns notification to %s" % (self.sns_topic_arn)) class HipChatAlerter(Alerter): """ Creates a HipChat room notification for each alert """ required_options = frozenset(['hipchat_auth_token', 'hipchat_room_id']) def __init__(self, rule): super(HipChatAlerter, self).__init__(rule) self.hipchat_msg_color = self.rule.get('hipchat_msg_color', 'red') self.hipchat_message_format = self.rule.get('hipchat_message_format', 'html') self.hipchat_auth_token = self.rule['hipchat_auth_token'] self.hipchat_room_id = self.rule['hipchat_room_id'] self.hipchat_domain = self.rule.get('hipchat_domain', 'api.hipchat.com') self.hipchat_ignore_ssl_errors = self.rule.get('hipchat_ignore_ssl_errors', False) self.hipchat_notify = self.rule.get('hipchat_notify', True) self.hipchat_from = self.rule.get('hipchat_from', '') self.url = 'https://%s/v2/room/%s/notification?auth_token=%s' % ( self.hipchat_domain, self.hipchat_room_id, self.hipchat_auth_token) self.hipchat_proxy = self.rule.get('hipchat_proxy', None) def alert(self, matches): body = self.create_alert_body(matches) # HipChat sends 400 bad request on messages longer than 10000 characters if (len(body) > 9999): body = body[:9980] + '..(truncated)' # Use appropriate line ending for text/html if self.hipchat_message_format == 'html': body = body.replace('\n', '<br />') # Post to HipChat headers = {'content-type': 'application/json'} # set https proxy, if it was provided proxies = {'https': self.hipchat_proxy} if self.hipchat_proxy else None payload = { 'color': self.hipchat_msg_color, 'message': body, 'message_format': self.hipchat_message_format, 'notify': self.hipchat_notify, 'from': self.hipchat_from } try: if self.hipchat_ignore_ssl_errors: requests.packages.urllib3.disable_warnings() response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, verify=not self.hipchat_ignore_ssl_errors, proxies=proxies) warnings.resetwarnings() response.raise_for_status() except RequestException as e: raise EAException("Error posting to HipChat: %s" % e) elastalert_logger.info("Alert sent to HipChat room %s" % self.hipchat_room_id) def get_info(self): return {'type': 'hipchat', 'hipchat_room_id': self.hipchat_room_id} class MsTeamsAlerter(Alerter): """ Creates a Microsoft Teams Conversation Message for each alert """ required_options = frozenset(['ms_teams_webhook_url', 'ms_teams_alert_summary']) def __init__(self, rule): super(MsTeamsAlerter, self).__init__(rule) self.ms_teams_webhook_url = self.rule['ms_teams_webhook_url'] if isinstance(self.ms_teams_webhook_url, basestring): self.ms_teams_webhook_url = [self.ms_teams_webhook_url] self.ms_teams_proxy = self.rule.get('ms_teams_proxy', None) self.ms_teams_alert_summary = self.rule.get('ms_teams_alert_summary', 'ElastAlert Message') self.ms_teams_alert_fixed_width = self.rule.get('ms_teams_alert_fixed_width', False) self.ms_teams_theme_color = self.rule.get('ms_teams_theme_color', '') def format_body(self, body): body = body.encode('UTF-8') if self.ms_teams_alert_fixed_width: body = body.replace('`', "'") body = "```{0}```".format('```\n\n```'.join(x for x in body.split('\n'))).replace('\n``````', '') return body def alert(self, matches): body = self.create_alert_body(matches) body = self.format_body(body) # post to Teams headers = {'content-type': 'application/json'} # set https proxy, if it was provided proxies = {'https': self.ms_teams_proxy} if self.ms_teams_proxy else None payload = { '@type': 'MessageCard', '@context': 'http://schema.org/extensions', 'summary': self.ms_teams_alert_summary, 'title': self.create_title(matches), 'text': body } if self.ms_teams_theme_color != '': payload['themeColor'] = self.ms_teams_theme_color for url in self.ms_teams_webhook_url: try: response = requests.post(url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies) response.raise_for_status() except RequestException as e: raise EAException("Error posting to ms teams: %s" % e) elastalert_logger.info("Alert sent to MS Teams") def get_info(self): return {'type': 'ms_teams', 'ms_teams_webhook_url': self.ms_teams_webhook_url} class SlackAlerter(Alerter): """ Creates a Slack room message for each alert """ required_options = frozenset(['slack_webhook_url']) def __init__(self, rule): super(SlackAlerter, self).__init__(rule) self.slack_webhook_url = self.rule['slack_webhook_url'] if isinstance(self.slack_webhook_url, basestring): self.slack_webhook_url = [self.slack_webhook_url] self.slack_proxy = self.rule.get('slack_proxy', None) self.slack_username_override = self.rule.get('slack_username_override', 'elastalert') self.slack_channel_override = self.rule.get('slack_channel_override', '') self.slack_emoji_override = self.rule.get('slack_emoji_override', ':ghost:') self.slack_icon_url_override = self.rule.get('slack_icon_url_override', '') self.slack_msg_color = self.rule.get('slack_msg_color', 'danger') self.slack_parse_override = self.rule.get('slack_parse_override', 'none') self.slack_text_string = self.rule.get('slack_text_string', '') def format_body(self, body): # https://api.slack.com/docs/formatting body = body.encode('UTF-8') body = body.replace('&', '&amp;') body = body.replace('<', '&lt;') body = body.replace('>', '&gt;') return body def alert(self, matches): body = self.create_alert_body(matches) body = self.format_body(body) # post to slack headers = {'content-type': 'application/json'} # set https proxy, if it was provided proxies = {'https': self.slack_proxy} if self.slack_proxy else None payload = { 'username': self.slack_username_override, 'channel': self.slack_channel_override, 'parse': self.slack_parse_override, 'text': self.slack_text_string, 'attachments': [ { 'color': self.slack_msg_color, 'title': self.create_title(matches), 'text': body, 'mrkdwn_in': ['text', 'pretext'], 'fields': [] } ] } if self.slack_icon_url_override != '': payload['icon_url'] = self.slack_icon_url_override else: payload['icon_emoji'] = self.slack_emoji_override for url in self.slack_webhook_url: try: response = requests.post(url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies) response.raise_for_status() except RequestException as e: raise EAException("Error posting to slack: %s" % e) elastalert_logger.info("Alert sent to Slack") def get_info(self): return {'type': 'slack', 'slack_username_override': self.slack_username_override, 'slack_webhook_url': self.slack_webhook_url} class PagerDutyAlerter(Alerter): """ Create an incident on PagerDuty for each alert """ required_options = frozenset(['pagerduty_service_key', 'pagerduty_client_name']) def __init__(self, rule): super(PagerDutyAlerter, self).__init__(rule) self.pagerduty_service_key = self.rule['pagerduty_service_key'] self.pagerduty_client_name = self.rule['pagerduty_client_name'] self.pagerduty_incident_key = self.rule.get('pagerduty_incident_key', '') self.pagerduty_incident_key_args = self.rule.get('pagerduty_incident_key_args', None) self.pagerduty_proxy = self.rule.get('pagerduty_proxy', None) self.url = 'https://events.pagerduty.com/generic/2010-04-15/create_event.json' def alert(self, matches): body = self.create_alert_body(matches) # post to pagerduty headers = {'content-type': 'application/json'} payload = { 'service_key': self.pagerduty_service_key, 'description': self.create_title(matches), 'event_type': 'trigger', 'incident_key': self.get_incident_key(matches), 'client': self.pagerduty_client_name, 'details': { "information": body.encode('UTF-8'), }, } # set https proxy, if it was provided proxies = {'https': self.pagerduty_proxy} if self.pagerduty_proxy else None try: response = requests.post( self.url, data=json.dumps(payload, cls=DateTimeEncoder, ensure_ascii=False), headers=headers, proxies=proxies ) response.raise_for_status() except RequestException as e: raise EAException("Error posting to pagerduty: %s" % e) elastalert_logger.info("Trigger sent to PagerDuty") def get_incident_key(self, matches): if self.pagerduty_incident_key_args: incident_key_values = [lookup_es_key(matches[0], arg) for arg in self.pagerduty_incident_key_args] # Populate values with rule level properties too for i in range(len(incident_key_values)): if incident_key_values[i] is None: key_value = self.rule.get(self.pagerduty_incident_key_args[i]) if key_value: incident_key_values[i] = key_value incident_key_values = ['<MISSING VALUE>' if val is None else val for val in incident_key_values] return self.pagerduty_incident_key.format(*incident_key_values) else: return self.pagerduty_incident_key def get_info(self): return {'type': 'pagerduty', 'pagerduty_client_name': self.pagerduty_client_name} class ExotelAlerter(Alerter): required_options = frozenset(['exotel_account_sid', 'exotel_auth_token', 'exotel_to_number', 'exotel_from_number']) def __init__(self, rule): super(ExotelAlerter, self).__init__(rule) self.exotel_account_sid = self.rule['exotel_account_sid'] self.exotel_auth_token = self.rule['exotel_auth_token'] self.exotel_to_number = self.rule['exotel_to_number'] self.exotel_from_number = self.rule['exotel_from_number'] self.sms_body = self.rule.get('exotel_message_body', '') def alert(self, matches): client = Exotel(self.exotel_account_sid, self.exotel_auth_token) try: message_body = self.rule['name'] + self.sms_body response = client.sms(self.rule['exotel_from_number'], self.rule['exotel_to_number'], message_body) if response != 200: raise EAException("Error posting to Exotel, response code is %s" % response) except: raise EAException("Error posting to Exotel"), None, sys.exc_info()[2] elastalert_logger.info("Trigger sent to Exotel") def get_info(self): return {'type': 'exotel', 'exotel_account': self.exotel_account_sid} class TwilioAlerter(Alerter): required_options = frozenset(['twilio_account_sid', 'twilio_auth_token', 'twilio_to_number', 'twilio_from_number']) def __init__(self, rule): super(TwilioAlerter, self).__init__(rule) self.twilio_account_sid = self.rule['twilio_account_sid'] self.twilio_auth_token = self.rule['twilio_auth_token'] self.twilio_to_number = self.rule['twilio_to_number'] self.twilio_from_number = self.rule['twilio_from_number'] def alert(self, matches): client = TwilioClient(self.twilio_account_sid, self.twilio_auth_token) try: client.messages.create(body=self.rule['name'], to=self.twilio_to_number, from_=self.twilio_from_number) except TwilioRestException as e: raise EAException("Error posting to twilio: %s" % e) elastalert_logger.info("Trigger sent to Twilio") def get_info(self): return {'type': 'twilio', 'twilio_client_name': self.twilio_from_number} class VictorOpsAlerter(Alerter): """ Creates a VictorOps Incident for each alert """ required_options = frozenset(['victorops_api_key', 'victorops_routing_key', 'victorops_message_type']) def __init__(self, rule): super(VictorOpsAlerter, self).__init__(rule) self.victorops_api_key = self.rule['victorops_api_key'] self.victorops_routing_key = self.rule['victorops_routing_key'] self.victorops_message_type = self.rule['victorops_message_type'] self.victorops_entity_display_name = self.rule.get('victorops_entity_display_name', 'no entity display name') self.url = 'https://alert.victorops.com/integrations/generic/20131114/alert/%s/%s' % ( self.victorops_api_key, self.victorops_routing_key) self.victorops_proxy = self.rule.get('victorops_proxy', None) def alert(self, matches): body = self.create_alert_body(matches) # post to victorops headers = {'content-type': 'application/json'} # set https proxy, if it was provided proxies = {'https': self.victorops_proxy} if self.victorops_proxy else None payload = { "message_type": self.victorops_message_type, "entity_display_name": self.victorops_entity_display_name, "monitoring_tool": "ElastAlert", "state_message": body } try: response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies) response.raise_for_status() except RequestException as e: raise EAException("Error posting to VictorOps: %s" % e) elastalert_logger.info("Trigger sent to VictorOps") def get_info(self): return {'type': 'victorops', 'victorops_routing_key': self.victorops_routing_key} class TelegramAlerter(Alerter): """ Send a Telegram message via bot api for each alert """ required_options = frozenset(['telegram_bot_token', 'telegram_room_id']) def __init__(self, rule): super(TelegramAlerter, self).__init__(rule) self.telegram_bot_token = self.rule['telegram_bot_token'] self.telegram_room_id = self.rule['telegram_room_id'] self.telegram_api_url = self.rule.get('telegram_api_url', 'api.telegram.org') self.url = 'https://%s/bot%s/%s' % (self.telegram_api_url, self.telegram_bot_token, "sendMessage") self.telegram_proxy = self.rule.get('telegram_proxy', None) def alert(self, matches): body = u'⚠ *%s* ⚠ ```\n' % (self.create_title(matches)) for match in matches: body += unicode(BasicMatchString(self.rule, match)) # Separate text of aggregated alerts with dashes if len(matches) > 1: body += '\n----------------------------------------\n' body += u' ```' headers = {'content-type': 'application/json'} # set https proxy, if it was provided proxies = {'https': self.telegram_proxy} if self.telegram_proxy else None payload = { 'chat_id': self.telegram_room_id, 'text': body, 'parse_mode': 'markdown', 'disable_web_page_preview': True } try: response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies) warnings.resetwarnings() response.raise_for_status() except RequestException as e: raise EAException("Error posting to Telegram: %s" % e) elastalert_logger.info( "Alert sent to Telegram room %s" % self.telegram_room_id) def get_info(self): return {'type': 'telegram', 'telegram_room_id': self.telegram_room_id} class GitterAlerter(Alerter): """ Creates a Gitter activity message for each alert """ required_options = frozenset(['gitter_webhook_url']) def __init__(self, rule): super(GitterAlerter, self).__init__(rule) self.gitter_webhook_url = self.rule['gitter_webhook_url'] self.gitter_proxy = self.rule.get('gitter_proxy', None) self.gitter_msg_level = self.rule.get('gitter_msg_level', 'error') def alert(self, matches): body = self.create_alert_body(matches) # post to Gitter headers = {'content-type': 'application/json'} # set https proxy, if it was provided proxies = {'https': self.gitter_proxy} if self.gitter_proxy else None payload = { 'message': body, 'level': self.gitter_msg_level } try: response = requests.post(self.gitter_webhook_url, json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies) response.raise_for_status() except RequestException as e: raise EAException("Error posting to Gitter: %s" % e) elastalert_logger.info("Alert sent to Gitter") def get_info(self): return {'type': 'gitter', 'gitter_webhook_url': self.gitter_webhook_url} class ServiceNowAlerter(Alerter): """ Creates a ServiceNow alert """ required_options = set([ 'username', 'password', 'servicenow_rest_url', 'short_description', 'comments', 'assignment_group', 'category', 'subcategory', 'cmdb_ci', 'caller_id' ]) def __init__(self, rule): super(ServiceNowAlerter, self).__init__(rule) self.servicenow_rest_url = self.rule['servicenow_rest_url'] self.servicenow_proxy = self.rule.get('servicenow_proxy', None) def alert(self, matches): for match in matches: # Parse everything into description. description = str(BasicMatchString(self.rule, match)) # Set proper headers headers = { "Content-Type": "application/json", "Accept": "application/json;charset=utf-8" } proxies = {'https': self.servicenow_proxy} if self.servicenow_proxy else None payload = { "description": description, "short_description": self.rule['short_description'], "comments": self.rule['comments'], "assignment_group": self.rule['assignment_group'], "category": self.rule['category'], "subcategory": self.rule['subcategory'], "cmdb_ci": self.rule['cmdb_ci'], "caller_id": self.rule["caller_id"] } try: response = requests.post( self.servicenow_rest_url, auth=(self.rule['username'], self.rule['password']), headers=headers, data=json.dumps(payload, cls=DateTimeEncoder), proxies=proxies ) response.raise_for_status() except RequestException as e: raise EAException("Error posting to ServiceNow: %s" % e) elastalert_logger.info("Alert sent to ServiceNow") def get_info(self): return {'type': 'ServiceNow', 'self.servicenow_rest_url': self.servicenow_rest_url} class HTTPPostAlerter(Alerter): """ Requested elasticsearch indices are sent by HTTP POST. Encoded with JSON. """ def __init__(self, rule): super(HTTPPostAlerter, self).__init__(rule) post_url = self.rule.get('http_post_url') if isinstance(post_url, basestring): post_url = [post_url] self.post_url = post_url self.post_proxy = self.rule.get('http_post_proxy') self.post_payload = self.rule.get('http_post_payload', {}) self.post_static_payload = self.rule.get('http_post_static_payload', {}) self.post_all_values = self.rule.get('http_post_all_values', not self.post_payload) def alert(self, matches): """ Each match will trigger a POST to the specified endpoint(s). """ for match in matches: payload = match if self.post_all_values else {} payload.update(self.post_static_payload) for post_key, es_key in self.post_payload.items(): payload[post_key] = lookup_es_key(match, es_key) headers = { "Content-Type": "application/json", "Accept": "application/json;charset=utf-8" } proxies = {'https': self.post_proxy} if self.post_proxy else None for url in self.post_url: try: response = requests.post(url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies) response.raise_for_status() except RequestException as e: raise EAException("Error posting HTTP Post alert: %s" % e) elastalert_logger.info("HTTP Post alert sent.") def get_info(self): return {'type': 'http_post', 'http_post_webhook_url': self.post_url}
1.570313
2
core/domain/role_services_test.py
Mohitbalwani26/oppia
0
1528
# coding: utf-8 # # Copyright 2017 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test functions relating to roles and actions.""" from __future__ import absolute_import # pylint: disable=import-only-modules from __future__ import unicode_literals # pylint: disable=import-only-modules from core.domain import role_services from core.tests import test_utils import feconf import python_utils class RolesAndActionsServicesUnitTests(test_utils.GenericTestBase): """Tests for roles and actions.""" def test_get_role_actions_return_value_in_correct_schema(self): role_actions = role_services.get_role_actions() self.assertTrue(isinstance(role_actions, dict)) for role_name, allotted_actions in role_actions.items(): self.assertTrue(isinstance(role_name, python_utils.UNICODE)) self.assertTrue(isinstance(allotted_actions, list)) self.assertEqual(len(set(allotted_actions)), len(allotted_actions)) for action_name in allotted_actions: self.assertTrue( isinstance(action_name, python_utils.UNICODE)) def test_get_all_actions(self): with self.assertRaisesRegexp( Exception, 'Role TEST_ROLE does not exist.'): role_services.get_all_actions('TEST_ROLE') self.assertEqual( role_services.get_all_actions(feconf.ROLE_ID_GUEST), [role_services.ACTION_PLAY_ANY_PUBLIC_ACTIVITY])
1.460938
1
tests/prep_post/test.py
Aslic/rmats_turbo_4.1.0
0
1536
import glob import os.path import subprocess import sys import unittest import tests.bam import tests.base_test import tests.gtf import tests.output_parser as output_parser import tests.test_config import tests.util class Test(tests.base_test.BaseTest): def setUp(self): super().setUp() self._test_base_dir = tests.test_config.TEST_BASE_DIR self._test_dir = os.path.join(self._test_base_dir, 'prep_post') self._generated_input_dir = os.path.join(self._test_dir, 'generated_input') self._out_dir = os.path.join(self._test_dir, 'out') self._prep_1_tmp_dir = os.path.join(self._test_dir, 'tmp_prep_1') self._prep_2_tmp_dir = os.path.join(self._test_dir, 'tmp_prep_2') self._post_tmp_dir = os.path.join(self._test_dir, 'tmp_post') self._dup_input_bam_tmp_dir = os.path.join(self._test_dir, 'tmp_dup_input_bam') self._dup_prep_bam_tmp_dir = os.path.join(self._test_dir, 'tmp_dup_prep_bam') self._miss_input_bam_tmp_dir = os.path.join(self._test_dir, 'tmp_miss_input_bam') self._miss_prep_bam_tmp_dir = os.path.join(self._test_dir, 'tmp_miss_prep_bam') tests.util.recreate_dirs([ self._generated_input_dir, self._out_dir, self._prep_1_tmp_dir, self._prep_2_tmp_dir, self._post_tmp_dir, self._dup_input_bam_tmp_dir, self._dup_prep_bam_tmp_dir, self._miss_input_bam_tmp_dir, self._miss_prep_bam_tmp_dir, self._command_output_dir() ]) self._read_type = 'paired' self._read_length = 50 self._sample_1_bams_path = os.path.join(self._generated_input_dir, 'b1.txt') self._sample_2_bams_path = os.path.join(self._generated_input_dir, 'b2.txt') sample_1_bam_replicate_template = os.path.join( self._generated_input_dir, 'sample_1_rep_{}.bam') sample_2_bam_replicate_template = os.path.join( self._generated_input_dir, 'sample_2_rep_{}.bam') self._sample_1_bams = self._create_sample_1_bams( self._sample_1_bams_path, sample_1_bam_replicate_template) self._sample_2_bams = self._create_sample_2_bams( self._sample_2_bams_path, sample_2_bam_replicate_template) self._gtf_path = os.path.join(self._generated_input_dir, 'test.gtf') self._gtf = self._create_gtf(self._gtf_path) self._sub_steps = [ 'prep_1', 'inte_1_fail', 'inte_1_pass', 'prep_2', 'inte_2_fail', 'inte_2_pass', 'post', 'duplicate_input_bam', 'duplicate_prep_bam', 'missing_input_bam', 'missing_prep_bam', ] self._sub_step = None def test(self): for sub_step in self._sub_steps: self._sub_step = sub_step self._setup_sub_step() self._run_test() def _command_output_dir(self): return os.path.join(self._test_dir, 'command_output') def _rmats_arguments(self): arguments = [ '--gtf', self._gtf_path, '--od', self._out_dir, '-t', self._read_type, '--readLength', str(self._read_length), ] if self._sub_step == 'prep_1': arguments.extend([ '--tmp', self._prep_1_tmp_dir, '--b1', self._sample_1_bams_path, '--task', 'prep', ]) elif self._sub_step == 'inte_1_fail': arguments.extend([ '--tmp', self._post_tmp_dir, '--b1', self._sample_1_bams_path, '--b2', self._sample_2_bams_path, '--task', 'inte', ]) elif self._sub_step == 'inte_1_pass': arguments.extend([ '--tmp', self._post_tmp_dir, '--b1', self._sample_1_bams_path, '--task', 'inte', '--statoff', ]) elif self._sub_step == 'prep_2': arguments.extend([ '--tmp', self._prep_2_tmp_dir, '--b1', self._sample_2_bams_path, '--task', 'prep', ]) elif self._sub_step == 'inte_2_fail': arguments.extend([ '--tmp', self._post_tmp_dir, '--b1', self._sample_2_bams_path, '--task', 'inte', '--statoff', ]) elif self._sub_step == 'inte_2_pass': arguments.extend([ '--tmp', self._post_tmp_dir, '--b1', self._sample_1_bams_path, '--b2', self._sample_2_bams_path, '--task', 'inte', ]) elif self._sub_step == 'post': arguments.extend([ '--tmp', self._post_tmp_dir, '--b1', self._sample_1_bams_path, '--b2', self._sample_2_bams_path, '--task', 'post', ]) elif self._sub_step == 'duplicate_input_bam': arguments.extend([ '--tmp', self._dup_input_bam_tmp_dir, '--b1', self._dup_input_bam_path, '--task', 'post', '--statoff', ]) elif self._sub_step == 'duplicate_prep_bam': arguments.extend([ '--tmp', self._dup_prep_bam_tmp_dir, '--b1', self._dup_prep_bam_path, '--task', 'post', '--statoff', ]) elif self._sub_step == 'missing_input_bam': arguments.extend([ '--tmp', self._miss_input_bam_tmp_dir, '--b1', self._miss_input_bam_path, '--task', 'post', '--statoff', ]) elif self._sub_step == 'missing_prep_bam': arguments.extend([ '--tmp', self._miss_prep_bam_tmp_dir, '--b1', self._miss_prep_bam_path, '--task', 'post', '--statoff', ]) return arguments def _setup_sub_step(self): if self._sub_step == 'duplicate_input_bam': self._setup_dup_input_bam() elif self._sub_step == 'duplicate_prep_bam': self._setup_dup_prep_bam() elif self._sub_step == 'missing_input_bam': self._setup_miss_input_bam() elif self._sub_step == 'missing_prep_bam': self._setup_miss_prep_bam() def _setup_dup_input_bam(self): self._dup_input_bam_path = os.path.join(self._generated_input_dir, 'dup_input.txt') bams = self._sample_1_bams + [self._sample_1_bams[0]] self._write_bams(bams, self._dup_input_bam_path) self._cp_with_prefix('prep_1', self._prep_1_tmp_dir, self._dup_input_bam_tmp_dir) def _setup_dup_prep_bam(self): self._dup_prep_bam_path = os.path.join(self._generated_input_dir, 'dup_prep.txt') bams = self._sample_1_bams self._write_bams(bams, self._dup_prep_bam_path) self._cp_with_prefix('prep_1', self._prep_1_tmp_dir, self._dup_prep_bam_tmp_dir) self._cp_with_prefix('prep_1_again', self._prep_1_tmp_dir, self._dup_prep_bam_tmp_dir) def _setup_miss_input_bam(self): self._miss_input_bam_path = os.path.join(self._generated_input_dir, 'miss_input.txt') bams = [self._sample_1_bams[0]] self._write_bams(bams, self._miss_input_bam_path) self._cp_with_prefix('prep_1', self._prep_1_tmp_dir, self._miss_input_bam_tmp_dir) def _setup_miss_prep_bam(self): self._miss_prep_bam_path = os.path.join(self._generated_input_dir, 'miss_prep.txt') bams = self._sample_1_bams + self._sample_2_bams self._write_bams(bams, self._miss_prep_bam_path) self._cp_with_prefix('prep_1', self._prep_1_tmp_dir, self._miss_prep_bam_tmp_dir) def _create_gtf(self, gtf_path): gtf = tests.gtf.GTF() gtf.path = gtf_path transcript_1 = tests.gtf.Transcript() transcript_1.chromosome = '1' transcript_1.strand = '+' transcript_1.gene_id = tests.util.gene_id_str(1) transcript_1.gene_name = tests.util.gene_name_str(1) transcript_1.transcript_id = tests.util.transcript_id_str(1) transcript_1.exons = [(1, 100), (201, 300), (401, 500)] gtf.transcripts = [transcript_1] error = gtf.write() self.assertFalse(error) return gtf def _create_sample_1_bams(self, sample_1_bams_path, sample_1_replicate_template): rep_1_bam = tests.bam.BAM() rep_1_bam.path = sample_1_replicate_template.format(1) rep_2_bam = tests.bam.BAM() rep_2_bam.path = sample_1_replicate_template.format(2) sample_1_bams = [rep_1_bam, rep_2_bam] rep_1_read_1 = tests.bam.Read() rep_1_read_1.ref_seq_name = '1' # chromosome rep_1_read_1.ref_seq_len = 1000 # chromosome length rep_1_read_1.template_name = tests.util.template_name_str([1, 1]) rep_1_read_2 = tests.bam.Read() error = tests.bam.set_read_pair_from_intervals(rep_1_read_1, rep_1_read_2, [[76, 100], [201, 300]], [[401, 475]], self._read_length) self.assertFalse(error) rep_1_bam.reads = [rep_1_read_1, rep_1_read_2] rep_2_read_1 = tests.bam.Read() rep_2_read_1.ref_seq_name = '1' # chromosome rep_2_read_1.ref_seq_len = 1000 # chromosome length rep_2_read_1.template_name = tests.util.template_name_str([1, 2]) rep_2_read_2 = tests.bam.Read() error = tests.bam.set_read_pair_from_intervals( rep_2_read_1, rep_2_read_2, [[26, 100]], [[201, 300], [401, 425]], self._read_length) self.assertFalse(error) rep_2_bam.reads = [rep_2_read_1, rep_2_read_2] self._write_bams(sample_1_bams, sample_1_bams_path) return sample_1_bams def _create_sample_2_bams(self, sample_2_bams_path, sample_2_replicate_template): rep_1_bam = tests.bam.BAM() rep_1_bam.path = sample_2_replicate_template.format(1) rep_2_bam = tests.bam.BAM() rep_2_bam.path = sample_2_replicate_template.format(2) sample_2_bams = [rep_1_bam, rep_2_bam] rep_1_read_1 = tests.bam.Read() rep_1_read_1.ref_seq_name = '1' # chromosome rep_1_read_1.ref_seq_len = 1000 # chromosome length rep_1_read_1.template_name = tests.util.template_name_str([2, 1]) rep_1_read_2 = tests.bam.Read() error = tests.bam.set_read_pair_from_intervals(rep_1_read_1, rep_1_read_2, [[76, 100], [401, 500]], [[401, 475]], self._read_length) self.assertFalse(error) rep_1_bam.reads = [rep_1_read_1, rep_1_read_2] rep_2_read_1 = tests.bam.Read() rep_2_read_1.ref_seq_name = '1' # chromosome rep_2_read_1.ref_seq_len = 1000 # chromosome length rep_2_read_1.template_name = tests.util.template_name_str([2, 2]) rep_2_read_2 = tests.bam.Read() error = tests.bam.set_read_pair_from_intervals(rep_2_read_1, rep_2_read_2, [[26, 100]], [[1, 100], [401, 425]], self._read_length) self.assertFalse(error) rep_2_bam.reads = [rep_2_read_1, rep_2_read_2] self._write_bams(sample_2_bams, sample_2_bams_path) return sample_2_bams def _cp_with_prefix(self, prefix, source_dir, dest_dir): source_paths = self._get_dot_rmats_paths(source_dir) command = [ sys.executable, tests.test_config.CP_WITH_PREFIX, prefix, dest_dir ] command.extend(source_paths) subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True) def _check_results(self): if self._sub_step == 'prep_1': self._check_results_prep_1() elif self._sub_step == 'inte_1_fail': self._check_results_inte_1_fail() elif self._sub_step == 'inte_1_pass': self._check_results_inte_1_pass() elif self._sub_step == 'prep_2': self._check_results_prep_2() elif self._sub_step == 'inte_2_fail': self._check_results_inte_2_fail() elif self._sub_step == 'inte_2_pass': self._check_results_inte_2_pass() elif self._sub_step == 'post': self._check_results_post() elif self._sub_step == 'duplicate_input_bam': self._check_results_dup_input_bam() elif self._sub_step == 'duplicate_prep_bam': self._check_results_dup_prep_bam() elif self._sub_step == 'missing_input_bam': self._check_results_miss_input_bam() elif self._sub_step == 'missing_prep_bam': self._check_results_miss_prep_bam() else: self.fail('unexpected sub_step: {}'.format(self._sub_step)) def _get_dot_rmats_paths(self, tmp_dir): dot_rmats_file_paths = glob.glob(os.path.join(tmp_dir, '*.rmats')) # filenames begin with a timestamp used for alphanumeric sort return sorted(dot_rmats_file_paths) def _check_results_prep_1(self): self._check_no_error_results() command_stdout_file_name = self._get_stdout_file_name() with open(command_stdout_file_name, 'rt') as out_f_h: out_lines = out_f_h.readlines() tests.util.assert_no_line_has(self, out_lines, 'Processing count files') test_gene_id = tests.util.gene_id_str(1) quoted_test_gene_id = tests.util.double_quote(test_gene_id) dot_rmats_paths = self._get_dot_rmats_paths(self._prep_1_tmp_dir) self.assertEqual(len(dot_rmats_paths), 2) for dot_rmats_i in range(2): dot_rmats_contents, error = output_parser.parse_dot_rmats( dot_rmats_paths[dot_rmats_i]) self.assertFalse(error) self.assertEqual(dot_rmats_contents['bams'], [self._sample_1_bams[dot_rmats_i].path]) self.assertEqual(dot_rmats_contents['read_length'], self._read_length) novel_juncs = dot_rmats_contents['novel_juncs'] self.assertEqual(novel_juncs, [dict()]) exons = dot_rmats_contents['exons'] if dot_rmats_i == 0: self.assertEqual(exons, [{ quoted_test_gene_id: [{ 'start_box': [401, 499], 'end_box': [401, 499], 'counts': [1, 0] }] }]) else: self.assertEqual(exons, [{ quoted_test_gene_id: [{ 'start_box': [1, 99], 'end_box': [1, 99], 'counts': [1, 0] }] }]) multis = dot_rmats_contents['multis'] if dot_rmats_i == 0: self.assertEqual(multis, [{ quoted_test_gene_id: [{ 'junction_pairs': [[1, 1], [100, 200], [299, 299]], 'count': 1 }] }]) else: self.assertEqual(multis, [{ quoted_test_gene_id: [{ 'junction_pairs': [[201, 201], [300, 400], [499, 499]], 'count': 1 }] }]) self._cp_with_prefix('prep_1_', self._prep_1_tmp_dir, self._post_tmp_dir) def _check_results_prep_2(self): self._check_no_error_results() command_stdout_file_name = self._get_stdout_file_name() with open(command_stdout_file_name, 'rt') as out_f_h: out_lines = out_f_h.readlines() tests.util.assert_no_line_has(self, out_lines, 'Processing count files') test_gene_id = tests.util.gene_id_str(1) quoted_test_gene_id = tests.util.double_quote(test_gene_id) dot_rmats_paths = self._get_dot_rmats_paths(self._prep_2_tmp_dir) self.assertEqual(len(dot_rmats_paths), 2) for dot_rmats_i in range(2): dot_rmats_contents, error = output_parser.parse_dot_rmats( dot_rmats_paths[dot_rmats_i]) self.assertFalse(error) self.assertEqual(dot_rmats_contents['bams'], [self._sample_2_bams[dot_rmats_i].path]) self.assertEqual(dot_rmats_contents['read_length'], self._read_length) novel_juncs = dot_rmats_contents['novel_juncs'] self.assertEqual(novel_juncs, [{quoted_test_gene_id: [[0, 0, 2]]}]) exons = dot_rmats_contents['exons'] if dot_rmats_i == 0: self.assertEqual(exons, [{ quoted_test_gene_id: [{ 'start_box': [401, 499], 'end_box': [401, 499], 'counts': [1, 0] }] }]) else: self.assertEqual(exons, [{ quoted_test_gene_id: [{ 'start_box': [1, 99], 'end_box': [1, 99], 'counts': [1, 0] }] }]) multis = dot_rmats_contents['multis'] if dot_rmats_i == 0: self.assertEqual(multis, [{ quoted_test_gene_id: [{ 'junction_pairs': [[1, 1], [100, 400], [499, 499]], 'count': 1 }] }]) else: self.assertEqual(multis, [{ quoted_test_gene_id: [{ 'junction_pairs': [[1, 1], [100, 400], [499, 499]], 'count': 1 }] }]) self._cp_with_prefix('prep_2_', self._prep_2_tmp_dir, self._post_tmp_dir) def _check_results_inte_1_fail(self): self.assertNotEqual(self._rmats_return_code, 0) command_stderr_file_name = self._get_stderr_file_name() with open(command_stderr_file_name, 'rt') as err_f_h: err_lines = err_f_h.readlines() tests.util.assert_some_line_has( self, err_lines, 'input bam files with no associated prep output') def _check_results_inte_1_pass(self): self._check_no_error_results() def _check_results_inte_2_fail(self): self.assertNotEqual(self._rmats_return_code, 0) command_stderr_file_name = self._get_stderr_file_name() with open(command_stderr_file_name, 'rt') as err_f_h: err_lines = err_f_h.readlines() tests.util.assert_some_line_has( self, err_lines, 'bam files not in input but associated with prep output') def _check_results_inte_2_pass(self): self._check_no_error_results() def _check_results_post(self): self._check_no_error_results() command_stdout_file_name = self._get_stdout_file_name() with open(command_stdout_file_name, 'rt') as out_f_h: out_lines = out_f_h.readlines() tests.util.assert_some_line_has(self, out_lines, 'Processing count files') from_gtf_se_path = os.path.join(self._out_dir, 'fromGTF.SE.txt') from_gtf_se_header, from_gtf_se_rows, error = output_parser.parse_from_gtf( from_gtf_se_path) self.assertFalse(error) self.assertEqual(len(from_gtf_se_rows), 1) from_gtf_se_row = from_gtf_se_rows[0] self.assertEqual(from_gtf_se_row['GeneID'], tests.util.double_quote(tests.util.gene_id_str(1))) self.assertEqual(from_gtf_se_row['exonStart_0base'], '200') self.assertEqual(from_gtf_se_row['exonEnd'], '300') jc_raw_se_path = os.path.join(self._out_dir, 'JC.raw.input.SE.txt') jc_raw_se_header, jc_raw_se_rows, error = output_parser.parse_jc_raw( jc_raw_se_path) self.assertFalse(error) self.assertEqual(len(jc_raw_se_rows), 1) jc_raw_se_row = jc_raw_se_rows[0] self.assertEqual(jc_raw_se_row['ID'], from_gtf_se_row['ID']) self.assertEqual(jc_raw_se_row['IJC_SAMPLE_1'], '1,1') self.assertEqual(jc_raw_se_row['SJC_SAMPLE_1'], '0,0') self.assertEqual(jc_raw_se_row['IJC_SAMPLE_2'], '0,0') self.assertEqual(jc_raw_se_row['SJC_SAMPLE_2'], '1,1') se_mats_jc_path = os.path.join(self._out_dir, 'SE.MATS.JC.txt') se_mats_jc_header, se_mats_jc_rows, error = output_parser.parse_mats_jc( se_mats_jc_path) self.assertFalse(error) self._check_se_mats_jc_header(se_mats_jc_header) self.assertEqual(len(se_mats_jc_rows), 1) se_mats_jc_row = se_mats_jc_rows[0] pvalue = float(se_mats_jc_row['PValue']) tests.util.assert_within_bounds(self, pvalue, 0, 1) fdr = float(se_mats_jc_row['FDR']) tests.util.assert_within_bounds(self, fdr, 0, 1) inc_level_1_splits = se_mats_jc_row['IncLevel1'].split(',') self.assertEqual(len(inc_level_1_splits), 2) self.assertAlmostEqual(float(inc_level_1_splits[0]), 1) self.assertAlmostEqual(float(inc_level_1_splits[1]), 1) inc_level_2_splits = se_mats_jc_row['IncLevel2'].split(',') self.assertEqual(len(inc_level_2_splits), 2) self.assertAlmostEqual(float(inc_level_2_splits[0]), 0) self.assertAlmostEqual(float(inc_level_2_splits[1]), 0) self.assertAlmostEqual(float(se_mats_jc_row['IncLevelDifference']), 1) def _check_results_dup_input_bam(self): self.assertNotEqual(self._rmats_return_code, 0) command_stderr_file_name = self._get_stderr_file_name() with open(command_stderr_file_name, 'rt') as err_f_h: err_lines = err_f_h.readlines() dup_bam_path = self._sample_1_bams[0].path expected_error = '{} given 2 times'.format(dup_bam_path) tests.util.assert_some_line_has(self, err_lines, expected_error) def _check_results_dup_prep_bam(self): self.assertNotEqual(self._rmats_return_code, 0) command_stderr_file_name = self._get_stderr_file_name() with open(command_stderr_file_name, 'rt') as err_f_h: err_lines = err_f_h.readlines() for bam in self._sample_1_bams: dup_bam_path = bam.path expected_error = '{} found 2 times in .rmats'.format(dup_bam_path) tests.util.assert_some_line_has(self, err_lines, expected_error) def _check_results_miss_input_bam(self): self._check_no_error_results() def _check_results_miss_prep_bam(self): self.assertNotEqual(self._rmats_return_code, 0) command_stderr_file_name = self._get_stderr_file_name() with open(command_stderr_file_name, 'rt') as err_f_h: err_lines = err_f_h.readlines() for bam in self._sample_2_bams: miss_bam_path = bam.path expected_error = '{} not found in .rmats'.format(miss_bam_path) tests.util.assert_some_line_has(self, err_lines, expected_error) if __name__ == '__main__': unittest.main(verbosity=2)
1.210938
1
skbio/draw/tests/test_distributions.py
johnchase/scikit-bio
0
1544
# ---------------------------------------------------------------------------- # Copyright (c) 2013--, scikit-bio development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. # ---------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function from unittest import TestCase, main import numpy as np import numpy.testing as npt import matplotlib.pyplot as plt from skbio.draw import boxplots, grouped_distributions from skbio.draw._distributions import ( _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values) class DistributionsTests(TestCase): def setUp(self): # Test null data list. self.Null = None # Test empty data list. self.Empty = [] # Test nested empty data list. self.EmptyNested = [[]] # Test nested empty data list (for bar/scatter plots). self.EmptyDeeplyNested = [[[]]] # Test invalid number of samples in data list (for bar/scatter plots). self.InvalidNumSamples = [[[1, 2, 3, 4, 5]], [[4, 5, 6, 7, 8], [2, 3, 2]], [[4, 7, 10, 33, 32, 6, 7, 8]]] # Test valid data with three samples and four data points # (for bar/scatter plots). self.ValidTypicalData = [[[1.0, 2, 3.5, 5], [2, 3, 5, 6], [2, 3, 8]], [[4, 7, 8], [8, 9, 10, 11], [9.0, 4, 1, 1]], [[4, 33, 32, 6, 8], [5, 4, 8, 13], [1, 1, 2]], [[2, 2, 2, 2], [3, 9, 8], [2, 1, 6, 7, 4, 5]]] # Test valid data with one sample (for bar/scatter plots). self.ValidSingleSampleData = [[[1, 2, 3, 4, 5]], [[4, 5, 6, 7, 8]], [[4, 7, 10, 33, 32, 6, 7, 8]]] # Test typical data to be plotted by the boxplot function. self.ValidTypicalBoxData = [[3.4, 10, 11.67, 12.0, 2, 2, 99.99], [2.3, 4, 5, 88, 9, 10, 11, 1, 0, 3, -8], [2, 9, 7, 5, 6]] def tearDown(self): # We get a warning from mpl if we don't clean up our figures. plt.close('all') def test_validate_input_null(self): with npt.assert_raises(ValueError): _validate_input(self.Null, None, None, None) def test_validate_input_empty(self): with npt.assert_raises(ValueError): _validate_input(self.Empty, None, None, None) def test_validate_input_empty_nested(self): with npt.assert_raises(ValueError): _validate_input(self.EmptyNested, None, None, None) def test_validate_input_empty_deeply_nested(self): num_points, num_samples = _validate_input(self.EmptyDeeplyNested, None, None, None) self.assertEqual(num_points, 1) self.assertEqual(num_samples, 1) def test_validate_input_empty_point(self): with npt.assert_raises(ValueError): _validate_input([[[1, 2, 3], [4, 5]], []], None, None, None) def test_validate_input_invalid_num_samples(self): with npt.assert_raises(ValueError): _validate_input(self.InvalidNumSamples, None, None, None) def test_validate_input_invalid_data_point_names(self): with npt.assert_raises(ValueError): _validate_input(self.ValidSingleSampleData, None, ["T0", "T1"], None) def test_validate_input_invalid_sample_names(self): with npt.assert_raises(ValueError): _validate_input(self.ValidSingleSampleData, None, None, ["Men", "Women"]) def test_validate_input_all_valid_input(self): self.assertEqual(_validate_input(self.ValidTypicalData, [1, 3, 4, 8], ["T0", "T1", "T2", "T3"], ["Infants", "Children", "Teens"]), (4, 3)) def test_validate_x_values_invalid_x_values(self): with npt.assert_raises(ValueError): _validate_x_values([1, 2, 3, 4], ["T0", "T1", "T2"], len(self.ValidSingleSampleData)) def test_validate_x_values_invalid_x_tick_labels(self): with npt.assert_raises(ValueError): _validate_x_values(None, ["T0"], len(self.ValidSingleSampleData)) def test_validate_x_values_nonnumber_x_values(self): with npt.assert_raises(ValueError): _validate_x_values(["foo", 2, 3], None, len(self.ValidSingleSampleData)) def test_validate_x_values_valid_x_values(self): _validate_x_values([1, 2.0, 3], None, 3) def test_get_distribution_markers_null_marker_list(self): self.assertEqual(_get_distribution_markers('colors', None, 5), ['b', 'g', 'r', 'c', 'm']) def test_get_distribution_markers_empty_marker_list(self): self.assertEqual(_get_distribution_markers('colors', None, 4), ['b', 'g', 'r', 'c']) def test_get_distribution_markers_insufficient_markers(self): self.assertEqual(npt.assert_warns(RuntimeWarning, _get_distribution_markers, 'colors', None, 10), ['b', 'g', 'r', 'c', 'm', 'y', 'w', 'b', 'g', 'r']) self.assertEqual(npt.assert_warns(RuntimeWarning, _get_distribution_markers, 'symbols', ['^', '>', '<'], 5), ['^', '>', '<', '^', '>']) def test_get_distribution_markers_bad_marker_type(self): with npt.assert_raises(ValueError): _get_distribution_markers('shapes', [], 3) def test_get_distribution_markers_zero_markers(self): self.assertEqual(_get_distribution_markers('symbols', None, 0), []) self.assertEqual(_get_distribution_markers('symbols', ['^'], 0), []) def test_get_distribution_markers_negative_num_markers(self): with npt.assert_raises(ValueError): _get_distribution_markers('symbols', [], -1) def test_plot_bar_data(self): fig, ax = plt.subplots() result = _plot_bar_data(ax, [1, 2, 3], 'red', 0.5, 3.75, 1.5, 'stdv') self.assertEqual(result[0].__class__.__name__, "Rectangle") self.assertEqual(len(result), 1) self.assertAlmostEqual(result[0].get_width(), 0.5) self.assertAlmostEqual(result[0].get_facecolor(), (1.0, 0.0, 0.0, 1.0)) self.assertAlmostEqual(result[0].get_height(), 2.0) fig, ax = plt.subplots() result = _plot_bar_data(ax, [1, 2, 3], 'red', 0.5, 3.75, 1.5, 'sem') self.assertEqual(result[0].__class__.__name__, "Rectangle") self.assertEqual(len(result), 1) self.assertAlmostEqual(result[0].get_width(), 0.5) self.assertAlmostEqual(result[0].get_facecolor(), (1.0, 0.0, 0.0, 1.0)) self.assertAlmostEqual(result[0].get_height(), 2.0) def test_plot_bar_data_bad_error_bar_type(self): fig, ax = plt.subplots() with npt.assert_raises(ValueError): _plot_bar_data(ax, [1, 2, 3], 'red', 0.5, 3.75, 1.5, 'var') def test_plot_bar_data_empty(self): fig, ax = plt.subplots() result = _plot_bar_data(ax, [], 'red', 0.5, 3.75, 1.5, 'stdv') self.assertTrue(result is None) fig, ax = plt.subplots() result = _plot_bar_data(ax, [], 'red', 0.5, 3.75, 1.5, 'sem') self.assertTrue(result is None) def test_plot_scatter_data(self): fig, ax = plt.subplots() result = _plot_scatter_data(ax, [1, 2, 3], '^', 0.77, 1, 1.5, 'stdv') self.assertEqual(result.get_sizes(), 20) def test_plot_scatter_data_empty(self): fig, ax = plt.subplots() result = _plot_scatter_data(ax, [], '^', 0.77, 1, 1.5, 'stdv') self.assertTrue(result is None) def test_plot_box_data(self): fig, ax = plt.subplots() result = _plot_box_data(ax, [0, 0, 7, 8, -3, 44], 'blue', 0.33, 55, 1.5, 'stdv') self.assertEqual(result.__class__.__name__, "dict") self.assertEqual(len(result['boxes']), 1) self.assertEqual(len(result['medians']), 1) self.assertEqual(len(result['whiskers']), 2) # mpl < 1.4.0 creates two Line2D instances, mpl 1.4.0 creates one, # though the resulting plot looks identical between the two versions. # see: # https://github.com/pydata/pandas/issues/8382#issuecomment-56840974 # https://github.com/matplotlib/matplotlib/issues/3544 self.assertTrue(len(result['fliers']) == 1 or len(result['fliers']) == 2) self.assertEqual(len(result['caps']), 2) def test_plot_box_data_empty(self): fig, ax = plt.subplots() result = _plot_box_data(ax, [], 'blue', 0.33, 55, 1.5, 'stdv') self.assertTrue(result is None) def test_calc_data_point_locations_invalid_x_values(self): with npt.assert_raises(ValueError): _calc_data_point_locations(3, [1, 10.5]) def test_calc_data_point_locations_default_spacing(self): locs = _calc_data_point_locations(4) np.testing.assert_allclose(locs, [1, 2, 3, 4]) def test_calc_data_point_locations_custom_spacing(self): # Scaling down from 3..12 to 1..4. locs = _calc_data_point_locations(4, [3, 4, 10, 12]) np.testing.assert_allclose(locs, np.array([1, 1.33333333, 3.33333333, 4])) # Sorted order shouldn't affect scaling. locs = _calc_data_point_locations(4, [4, 3, 12, 10]) np.testing.assert_allclose(locs, np.array([1.33333333, 1, 4, 3.33333333])) # Scaling up from 0.001..0.87 to 1..3. locs = _calc_data_point_locations(3, [0.001, 0.2543, 0.87]) np.testing.assert_allclose(locs, np.array([1, 1.58296893, 3])) def test_calc_data_point_ticks(self): ticks = _calc_data_point_ticks(np.array([1, 5, 9, 11]), 1, 0.5, False) np.testing.assert_allclose(ticks, [1.25, 5.25, 9.25, 11.25]) ticks = _calc_data_point_ticks(np.array([0]), 3, 0.5, False) np.testing.assert_allclose(ticks, [0.75]) def test_set_axes_options(self): fig, ax = plt.subplots() _set_axes_options(ax, "Plot Title", "x-axis label", "y-axis label", x_tick_labels=["T0", "T1"]) self.assertEqual(ax.get_title(), "Plot Title") self.assertEqual(ax.get_ylabel(), "y-axis label") self.assertEqual(ax.get_xticklabels()[0].get_text(), "T0") self.assertEqual(ax.get_xticklabels()[1].get_text(), "T1") def test_set_axes_options_ylim(self): fig, ax = plt.subplots() _set_axes_options(ax, "Plot Title", "x-axis label", "y-axis label", x_tick_labels=["T0", "T1", "T2"], y_min=0, y_max=1) self.assertEqual(ax.get_title(), "Plot Title") self.assertEqual(ax.get_ylabel(), "y-axis label") self.assertEqual(ax.get_xticklabels()[0].get_text(), "T0") self.assertEqual(ax.get_xticklabels()[1].get_text(), "T1") self.assertEqual(ax.get_ylim(), (0.0, 1.0)) def test_set_axes_options_x_values_as_tick_labels(self): fig, ax = plt.subplots() _set_axes_options(ax, "Plot Title", "x-axis label", "y-axis label", x_values=[42, 45, 800]) self.assertEqual(ax.get_title(), "Plot Title") self.assertEqual(ax.get_ylabel(), "y-axis label") self.assertEqual(ax.get_xticklabels()[0].get_text(), '42') self.assertEqual(ax.get_xticklabels()[1].get_text(), '45') self.assertEqual(ax.get_xticklabels()[2].get_text(), '800') def test_set_axes_options_bad_ylim(self): fig, ax = plt.subplots() with npt.assert_raises(ValueError): _set_axes_options(ax, "Plot Title", "x-axis label", "y-axis label", x_tick_labels=["T0", "T1", "T2"], y_min='car', y_max=30) def test_set_axes_options_invalid_x_tick_labels_orientation(self): fig, ax = plt.subplots() with npt.assert_raises(ValueError): _set_axes_options(ax, "Plot Title", "x-axis label", "y-axis label", x_tick_labels=["T0", "T1"], x_tick_labels_orientation='brofist') def test_create_legend(self): fig, ax = plt.subplots() _create_legend(ax, ['b', 'r'], ['dist1', 'dist2'], 'colors') self.assertEqual(len(ax.get_legend().get_texts()), 2) fig, ax = plt.subplots() _create_legend(ax, ['^', '<', '>'], ['dist1', 'dist2', 'dist3'], 'symbols') self.assertEqual(len(ax.get_legend().get_texts()), 3) def test_create_legend_invalid_input(self): fig, ax = plt.subplots() with npt.assert_raises(ValueError): _create_legend(ax, ['^', '<', '>'], ['dist1', 'dist2'], 'symbols') with npt.assert_raises(ValueError): _create_legend(ax, ['^', '<', '>'], ['dist1', 'dist2', 'dist3'], 'foo') def test_grouped_distributions_bar(self): fig = grouped_distributions('bar', self.ValidTypicalData, [1, 4, 10, 11], ["T0", "T1", "T2", "T3"], ["Infants", "Children", "Teens"], ['b', 'r', 'g'], "x-axis label", "y-axis label", "Test") ax = fig.get_axes()[0] self.assertEqual(ax.get_title(), "Test") self.assertEqual(ax.get_xlabel(), "x-axis label") self.assertEqual(ax.get_ylabel(), "y-axis label") self.assertEqual(len(ax.get_xticklabels()), 4) np.testing.assert_allclose(ax.get_xticks(), [1.1125, 2.0125, 3.8125, 4.1125]) def test_grouped_distributions_insufficient_colors(self): args = ('bar', self.ValidTypicalData, [1, 4, 10, 11], ["T0", "T1", "T2", "T3"], ["Infants", "Children", "Teens"], ['b', 'r'], "x-axis label", "y-axis label", "Test") npt.assert_warns(RuntimeWarning, grouped_distributions, *args) def test_grouped_distributions_scatter(self): fig = grouped_distributions('scatter', self.ValidTypicalData, [1, 4, 10, 11], ["T0", "T1", "T2", "T3"], ["Infants", "Children", "Teens"], ['^', '>', '<'], "x-axis label", "y-axis label", "Test") ax = fig.get_axes()[0] self.assertEqual(ax.get_title(), "Test") self.assertEqual(ax.get_xlabel(), "x-axis label") self.assertEqual(ax.get_ylabel(), "y-axis label") self.assertEqual(len(ax.get_xticklabels()), 4) np.testing.assert_allclose(ax.get_xticks(), [1.075, 1.975, 3.775, 4.075]) def test_grouped_distributions_insufficient_symbols(self): args = ('scatter', self.ValidTypicalData, [1, 4, 10, 11], ["T0", "T1", "T2", "T3"], ["Infants", "Children", "Teens"], ['^'], "x-axis label", "y-axis label", "Test") npt.assert_warns(RuntimeWarning, grouped_distributions, *args) def test_grouped_distributions_empty_marker_list(self): grouped_distributions('scatter', self.ValidTypicalData, [1, 4, 10, 11], ["T0", "T1", "T2", "T3"], ["Infants", "Children", "Teens"], [], "x-axis label", "y-axis label", "Test") def test_grouped_distributions_box(self): fig = grouped_distributions('box', self.ValidTypicalData, [1, 4, 10, 11], ["T0", "T1", "T2", "T3"], ["Infants", "Children", "Teens"], ['b', 'g', 'y'], "x-axis label", "y-axis label", "Test") ax = fig.get_axes()[0] self.assertEqual(ax.get_title(), "Test") self.assertEqual(ax.get_xlabel(), "x-axis label") self.assertEqual(ax.get_ylabel(), "y-axis label") self.assertEqual(len(ax.get_xticklabels()), 4) np.testing.assert_allclose(ax.get_xticks(), [1.075, 1.975, 3.775, 4.075]) def test_grouped_distributions_error(self): with npt.assert_raises(ValueError): grouped_distributions('pie', self.ValidTypicalData, [1, 4, 10, 11], ["T0", "T1", "T2", "T3"], ["Infants", "Children", "Teens"], ['b', 'g', 'y'], "x-axis label", "y-axis label", "Test") def test_grouped_distributions_negative_distribution_width(self): args = ('box', self.ValidTypicalData, [1, 4, 10, 11], ["T0", "T1", "T2", "T3"], ["Infants", "Children", "Teens"], ['b', 'g', 'y'], "x-axis label", "y-axis label", "Test") with self.assertRaises(ValueError): grouped_distributions(*args, distribution_width=0) with self.assertRaises(ValueError): grouped_distributions(*args, distribution_width=-42) def test_boxplots(self): fig = boxplots(self.ValidTypicalBoxData, [1, 4, 10], ["Data 1", "Data 2", "Data 3"], "Test", "x-axis label", "y-axis label", legend=(('blue', 'red'), ('foo', 'bar'))) ax = fig.get_axes()[0] self.assertEqual(ax.get_title(), "Test") self.assertEqual(ax.get_xlabel(), "x-axis label") self.assertEqual(ax.get_ylabel(), "y-axis label") self.assertEqual(len(ax.get_xticklabels()), 3) self.assertTrue(np.array_equal(ax.get_xticks(), [1, 4, 10])) def test_boxplots_empty_distributions(self): fig = boxplots([[1, 2, 3], [], [4, 5, 6]], [1, 4, 10], ["Data 1", "Data 2", "Data 3"], "Test", "x-axis label", "y-axis label") ax = fig.get_axes()[0] self.assertEqual(ax.get_title(), "Test") self.assertEqual(ax.get_xlabel(), "x-axis label") self.assertEqual(ax.get_ylabel(), "y-axis label") self.assertEqual(len(ax.get_xticklabels()), 3) self.assertTrue(np.array_equal(ax.get_xticks(), [1, 4, 10])) # second distribution (empty) should have nans since it is hidden. # boxplots in mpl < 1.4.0 have 8 lines per boxplot, while mpl 1.4.0 has # 7. in either case, the line at index 8 should have a nan for its y # value lines = ax.get_lines() self.assertTrue(np.isnan(lines[8].get_xydata()[0][1])) # line in first distribution should *not* have nan for its y value self.assertFalse(np.isnan(lines[0].get_xydata()[0][1])) # All distributions are empty. fig = boxplots([[], [], []], [1, 4, 10], ["Data 1", "Data 2", "Data 3"], "Test", "x-axis label", "y-axis label") ax = fig.get_axes()[0] self.assertEqual(ax.get_title(), "Test") self.assertEqual(ax.get_xlabel(), "x-axis label") self.assertEqual(ax.get_ylabel(), "y-axis label") self.assertEqual(len(ax.get_xticklabels()), 3) self.assertTrue(np.array_equal(ax.get_xticks(), [1, 4, 10])) lines = ax.get_lines() self.assertTrue(np.isnan(lines[0].get_xydata()[0][1])) self.assertTrue(np.isnan(lines[8].get_xydata()[0][1])) self.assertTrue(np.isnan(lines[16].get_xydata()[0][1])) def test_boxplots_box_colors(self): # Coloring works with all empty distributions. fig = boxplots([[], [], []], box_colors=['blue', 'red', 'yellow']) ax = fig.get_axes()[0] self.assertEqual(len(ax.get_xticklabels()), 3) # patch colors should match what we specified self.assertEqual(ax.patches[0].get_facecolor(), (0.0, 0.0, 1.0, 1.0)) self.assertEqual(ax.patches[1].get_facecolor(), (1.0, 0.0, 0.0, 1.0)) self.assertEqual(ax.patches[2].get_facecolor(), (1.0, 1.0, 0.0, 1.0)) # patch location should include at least one nan since the distribution # is empty, and thus hidden for patch in ax.patches: self.assertTrue(np.isnan(patch.xy[0][1])) fig = boxplots([[], [], []], box_colors='pink') ax = fig.get_axes()[0] self.assertEqual(len(ax.get_xticklabels()), 3) for patch in ax.patches: npt.assert_almost_equal( patch.get_facecolor(), (1.0, 0.7529411764705882, 0.796078431372549, 1.0)) self.assertTrue(np.isnan(patch.xy[0][1])) # Coloring works with some empty distributions. fig = boxplots([[], [1, 2, 3.5], []], box_colors=['blue', 'red', 'yellow']) ax = fig.get_axes()[0] self.assertEqual(len(ax.get_xticklabels()), 3) self.assertEqual(ax.patches[0].get_facecolor(), (0.0, 0.0, 1.0, 1.0)) self.assertEqual(ax.patches[1].get_facecolor(), (1.0, 0.0, 0.0, 1.0)) self.assertEqual(ax.patches[2].get_facecolor(), (1.0, 1.0, 0.0, 1.0)) self.assertTrue(np.isnan(ax.patches[0].xy[0][1])) self.assertFalse(np.isnan(ax.patches[1].xy[0][1])) self.assertTrue(np.isnan(ax.patches[2].xy[0][1])) def test_boxplots_invalid_input(self): # Non-numeric entries in distribution. with npt.assert_raises(ValueError): boxplots([[1, 'foo', 3]]) # Number of colors doesn't match number of distributions. with npt.assert_raises(ValueError): boxplots([[1, 2, 3], [], [4, 5, 6]], box_colors=['blue', 'red']) # Invalid legend. with npt.assert_raises(ValueError): boxplots([[1, 2, 3]], legend=('foo', 'bar', 'baz')) def test_color_box_plot(self): fig, ax = plt.subplots() box_plot = plt.boxplot(self.ValidTypicalBoxData) _color_box_plot(ax, box_plot, ['blue', 'w', (1, 1, 0.9)]) # Some colors are None. fig, ax = plt.subplots() box_plot = plt.boxplot(self.ValidTypicalBoxData) _color_box_plot(ax, box_plot, ['blue', None, (1, 1, 0.9)]) # All colors are None. fig, ax = plt.subplots() box_plot = plt.boxplot(self.ValidTypicalBoxData) _color_box_plot(ax, box_plot, [None, None, None]) def test_color_box_plot_invalid_input(self): # Invalid color. fig, ax = plt.subplots() box_plot = plt.boxplot(self.ValidTypicalBoxData) with npt.assert_raises(ValueError): _color_box_plot(ax, box_plot, ['red', 'foobarbaz', 'blue']) # Wrong number of colors. fig, ax = plt.subplots() box_plot = plt.boxplot(self.ValidTypicalBoxData) with npt.assert_raises(ValueError): _color_box_plot(ax, box_plot, ['blue', (1, 1, 0.9)]) def test_is_single_matplotlib_color(self): self.assertTrue(_is_single_matplotlib_color('w')) self.assertTrue(_is_single_matplotlib_color('white')) self.assertTrue(_is_single_matplotlib_color([1, 1, 1])) self.assertTrue(_is_single_matplotlib_color([1, 1, 1, 1])) self.assertTrue(_is_single_matplotlib_color((1, 1, 1))) self.assertTrue(_is_single_matplotlib_color((1, 1, 1, 1))) self.assertTrue(_is_single_matplotlib_color((1.0, 1.0, 1.0, 1.0))) self.assertTrue(_is_single_matplotlib_color((1.0, 1, 1.0))) self.assertTrue(_is_single_matplotlib_color((2.0, 1, 1.0))) self.assertFalse(_is_single_matplotlib_color(['w', 'r'])) self.assertFalse(_is_single_matplotlib_color(['w'])) self.assertFalse(_is_single_matplotlib_color(('w',))) self.assertFalse(_is_single_matplotlib_color(((1.0, 1.0, 1),))) self.assertFalse(_is_single_matplotlib_color(((1.0, 1.0, 1), (0.9, 0.9)))) def test_set_figure_size(self): fig, ax = plt.subplots() _set_axes_options(ax, 'foo', 'x_foo', 'y_foo', x_tick_labels=['foofoofoo', 'barbarbar'], x_tick_labels_orientation='vertical') _set_figure_size(fig, 3, 4) self.assertTrue(np.array_equal(fig.get_size_inches(), (3, 4))) def test_set_figure_size_defaults(self): fig, ax = plt.subplots() _set_axes_options(ax, 'foo', 'x_foo', 'y_foo', x_tick_labels=['foofoofoo', 'barbarbar'], x_tick_labels_orientation='vertical') orig_fig_size = fig.get_size_inches() _set_figure_size(fig) self.assertTrue(np.array_equal(fig.get_size_inches(), orig_fig_size)) def test_set_figure_size_invalid(self): fig, ax = plt.subplots() _set_axes_options(ax, 'foo', 'x_foo', 'y_foo', x_tick_labels=['foofoofoo', 'barbarbar'], x_tick_labels_orientation='vertical') orig_fig_size = fig.get_size_inches() _set_figure_size(fig, -1, 0) self.assertTrue(np.array_equal(fig.get_size_inches(), orig_fig_size)) def test_set_figure_size_long_labels(self): fig, ax = plt.subplots() _set_axes_options(ax, 'foo', 'x_foo', 'y_foo', x_tick_labels=['foofoofooooooooooooooooooooooooo' 'oooooooooooooooooooooooooooooooo' 'oooooooooooooooooooooooooooooooo' 'oooo', 'barbarbar'], x_tick_labels_orientation='vertical') npt.assert_warns(RuntimeWarning, _set_figure_size, fig, 3, 3) npt.assert_array_equal(fig.get_size_inches(), (3, 3)) if __name__ == '__main__': main()
1.8125
2
python/test-nose-3.py
li-ma/homework
0
1560
# Module Level def setUp(): print 'test setup' def tearDown(): print 'test teardown' # Function Level def func_1_setup(): print 'test_func_1 setup' def func_1_teardown(): print 'test_func_1_teardown' # Target Func def test_func_1(): print 'test_func_1 run' assert True test_func_1.setUp = func_1_setup test_func_1.tearDown = func_1_teardown
1.382813
1
day7/main5list.py
nikhilsamninan/python-files
0
1584
v=[a[-1] for a in a.split() if(len(a)%2==0)] print(v)
1.195313
1
Unsupervised/pix2pixHD/extract_frames.py
Kebniss/AutoDetect
1
1600
import os import cv2 import argparse from utils import * from tqdm import tqdm from glob import glob from pathlib import Path def _extract_frames(video_path, parent, start=0, sampling_f=1): vidcap = cv2.VideoCapture(video_path) success, image = success, image = vidcap.read() count = -1 saved = 0 print(f'Processing: {video_path}') while success: count += 1 if count % 300 == 0: print('Processing frame: ', count) if count % sampling_f == 0: # sampling cv2.imwrite(''.join([dest_folder, f"/{count + start}.jpg"]), image) saved += 1 success, image = vidcap.read() # read next print(f'Successfully saved {saved} frames to {dest_folder}') return count + start parser = argparse.ArgumentParser( description='build a "frame dataset" from a given video') parser.add_argument('-input', dest="input", required=True, help='''Path to a single video or a folder. If path to folder the algorithm will extract frames from all files with extension defined in --extension and save them under separate folders under dest_folder. The frames from each video will be saved under a folder with its name. ''') parser.add_argument('--dest-folder', dest="dest_folder", default='./dataset/', help='''Path where to store frames. NB all files in this folder will be removed before adding the new frames''') parser.add_argument('--same-folder', dest="same_folder", default=False, help='''Set it to True if you want to save the frames of all videos to the same folder in ascending order going from the first frame of the first video to the last frame of the last video. If True frames will be saved in dest_folder/frames.''') parser.add_argument('--sampling', help='how many fps', default='3') parser.add_argument('--run-type', help='train or test', default='train') parser.add_argument('--extension', help='avi, mp4, mov...', default='mp4') parser.add_argument('-width', help='output width', default=640, type=int) parser.add_argument('-height', help='output height', default=480, type=int) args = parser.parse_args() mkdir(args.dest_folder) if (args.width % 32 != 0) or (args.height % 32 != 0): raise Exception("Please use width and height that are divisible by 32") if os.path.isdir(args.input): inp = str(Path(args.input) / f'*.{args.extension}') videos = [v for v in glob(inp)] if not videos: raise Exception(f'No {args.extension} files in input directory {args.input}') elif os.path.isfile(args.input): _, ext = get_filename_extension(args.input) if ext != args.extension: raise ValueError(f'Correct inputs: folder or path to {args.extension} file only') videos = [args.input] else: raise ValueError(f'Correct inputs: folder or path to {args.extension} file only') if args.same_folder: start = 0 dest_folder = str(Path(args.dest_folder) / f'{args.run_type}_frames') mkdir(dest_folder) for v in tqdm(videos): if not args.same_folder: start = 0 name, _ = get_filename_extension(v) dest_folder = str(Path(args.dest_folder) / name) mkdir(dest_folder) start = _extract_frames(v, dest_folder, start, sampling_f=int(args.sampling))
2.765625
3
model_building/svr_experiment_configuration.py
eubr-atmosphere/a-MLLibrary
3
1608
""" Copyright 2019 <NAME> Copyright 2019 <NAME> Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import sklearn.svm as svm import model_building.experiment_configuration as ec class SVRExperimentConfiguration(ec.ExperimentConfiguration): """ Class representing a single experiment configuration for linear regression Attributes ---------- _linear_regression : LinearRegression The actual scikt object which performs the linear regression Methods ------- _train() Performs the actual building of the linear model compute_estimations() Compute the estimated values for a give set of data """ def __init__(self, campaign_configuration, hyperparameters, regression_inputs, prefix): """ campaign_configuration: dict of dict: The set of options specified by the user though command line and campaign configuration files hyperparameters: dictionary The set of hyperparameters of this experiment configuration regression_inputs: RegressionInputs The input of the regression problem to be solved """ super().__init__(campaign_configuration, hyperparameters, regression_inputs, prefix) self.technique = ec.Technique.SVR self._regressor = svm.SVR(C=self._hyperparameters['C'], epsilon=self._hyperparameters['epsilon'], gamma=self._hyperparameters['gamma'], kernel=self._hyperparameters['kernel'], degree=self._hyperparameters['degree']) def _compute_signature(self, prefix): """ Compute the signature associated with this experiment configuration """ signature = prefix.copy() signature.append("C_" + str(self._hyperparameters['C'])) signature.append("epsilon_" + str(self._hyperparameters['epsilon'])) signature.append("gamma_" + str(self._hyperparameters['gamma'])) signature.append("kernel_" + str(self._hyperparameters['kernel'])) signature.append("degree_" + str(self._hyperparameters['degree'])) return signature def _train(self): """ Build the model with the experiment configuration represented by this object """ self._logger.debug("Building model for %s", self._signature) assert self._regression_inputs xdata, ydata = self._regression_inputs.get_xy_data(self._regression_inputs.inputs_split["training"]) self._regressor.fit(xdata, ydata) self._logger.debug("Model built") # for idx, col_name in enumerate(self._regression_inputs.x_columns): # self._logger.debug("The coefficient for %s is %f", col_name, self._linear_regression.coef_[idx]) def compute_estimations(self, rows): """ Compute the estimations and the MAPE for runs in rows """ xdata, _ = self._regression_inputs.get_xy_data(rows) return self._regressor.predict(xdata)
2.171875
2
Day01-15/code/Day15/pdf2.py
bdfd/Python_Zero2Hero_DS
3
1616
""" 读取PDF文件 Version: 0.1 Author: BDFD Date: 2018-03-26 """ from PyPDF2 import PdfFileReader with open('./res/Python课程大纲.pdf', 'rb') as f: reader = PdfFileReader(f, strict=False) print(reader.numPages) if reader.isEncrypted: reader.decrypt('') current_page = reader.getPage(5) print(current_page) print(current_page.extractText())
1.679688
2
Training/train_baseHD.py
Wenyuan-Vincent-Li/SSL_Seg_GAN
1
1624
import torch.nn as nn import torch.optim as optim import torch.utils.data from Training import functions from Training.imresize import imresize import matplotlib.pyplot as plt from Models.pix2pixHD_base import GANLoss, VGGLoss from Models.pix2pixHD2 import mask2onehot class Losses(): def __init__(self, opt): self.criterionGAN = GANLoss(not opt.no_lsgan) self.criterionFeat = nn.L1Loss() if opt.contour: self.crossEntropy = nn.BCEWithLogitsLoss() else: self.crossEntropy = nn.CrossEntropyLoss() if not opt.no_vgg_loss: self.criterionVGG = VGGLoss() def train_single_scale(dataloader, netD, netG, netS, reals, Gs, Ss, in_s, in_s_S, NoiseAmp, NoiseAmpS, opt): ''' :param netD: currD :param netG: currG :param netS: currS :param reals: a list of image pyramid ## TODO: you can just pass image shape here :param Gs: list of prev netG :param Ss: list of prev netS :param in_s: 0-> all zero [1, 3, 26, 26] :param NoiseAmp: [] -> [1] :param opt: config :return: ''' loss = Losses(opt) real = reals[opt.scale_num] # find the current level image xn opt.nzx = real[0] opt.nzy = real[1] # z_opt = 0 ## dummy z_opt alpha = opt.alpha # setup optimizer optimizerD = optim.Adam(netD.parameters(), lr=opt.lr_d, betas=(opt.beta1, 0.999)) optimizerG = optim.Adam(netG.parameters(), lr=opt.lr_g, betas=(opt.beta1, 0.999)) optimizerS = optim.Adam(netS.parameters(), lr=opt.lr_s, betas=(opt.beta1, 0.999)) schedulerD = torch.optim.lr_scheduler.MultiStepLR(optimizer=optimizerD, milestones=[opt.niter * 0.8], gamma=opt.gamma) schedulerG = torch.optim.lr_scheduler.MultiStepLR(optimizer=optimizerG, milestones=[opt.niter * 0.8], gamma=opt.gamma) schedulerS = torch.optim.lr_scheduler.MultiStepLR(optimizer=optimizerS, milestones=[opt.niter * 0.8], gamma=opt.gamma) errD2plot = [] errG2plot = [] D_real2plot = [] D_fake2plot = [] for epoch in range(opt.niter): # niter = 2000 if Gs == [] and Ss == []: noise_ = functions.generate_noise([1, opt.nzx, opt.nzy], opt.batchSize) # [None, 1, 32, 32] noise_ = noise_.expand(opt.batchSize, 3, opt.nzx, opt.nzy) ## Noise_: for generated false samples through generator else: noise_ = functions.generate_noise([1, opt.nzx, opt.nzy], opt.batchSize) for j, data in enumerate(dataloader): data['image'] = data['image'].to(opt.device) data['label'] = data['label'].long().to(opt.device) ############################ # (1) Update D network: maximize D(x) + D(G(z)) ########################### # train with real netD.zero_grad() pred_real = netD(data['image'], data['label'][:,0:1,...]) loss_D_real = loss.criterionGAN(pred_real, True) D_x = loss_D_real.item() # train with fake if (j == 0) & (epoch == 0): # first iteration training in this level if Gs == [] and Ss == []: prev = torch.full([opt.batchSize, opt.nc_z, opt.nzx, opt.nzy], 0, device=opt.device) in_s = prev # full of 0 [None, 3, 32, 32] prev_S = torch.full([opt.batchSize, opt.label_nc, opt.nzx, opt.nzy], 0, device=opt.device) in_s_S = prev_S # full of 0 [None, 4, 32, 32] mask = data['label'][:,0:1,...] opt.noise_amp = opt.noise_amp_init opt.noise_amp_S = opt.noise_amp_init else: prev = draw_concat(Gs, data['down_scale_label'], reals, NoiseAmp, in_s, 'generator', opt) ## given a new noise, prev is a image generated by previous Generator with bilinear upsampling [1, 3, 33, 33] criterion = nn.MSELoss() RMSE = torch.sqrt(criterion(data['image'], prev)) opt.noise_amp = opt.noise_amp_init * RMSE prev_S = draw_concat(Ss, data['down_scale_image'], reals, NoiseAmpS, in_s_S, 'segment', opt) ## prob with [None, 4, 32, 32] onehot_label = mask2onehot(data['label'][:,0:1,...], opt.label_nc) RMSE_S = torch.sqrt(criterion(onehot_label, prev_S)) # RMSE_S = 0 opt.noise_amp_S = opt.noise_amp_init * RMSE_S mask = data['label'][:,0:1,...] else: prev = draw_concat(Gs, data['down_scale_label'], reals, NoiseAmp, in_s, 'generator', opt) prev_S = draw_concat(Ss, data['down_scale_image'], reals, NoiseAmpS, in_s_S, 'segment', opt) mask = data['label'][:,0:1,...] if Gs == []: noise = noise_ ## Gausiaan noise for generating image [None, 3, 42, 42] else: noise = opt.noise_amp * noise_ + prev ## [None, 3, 43, 43] new noise is equal to the prev generated image plus the gaussian noise. fake = netG(noise.detach(), prev, mask) # [None, 3, 32, 32] the same size with the input image # detach() make sure that the gradients don't go to the noise. # prev:[None, 3, 42, 42] -> [None, 3, 43, 43] first step prev = 0, second step prev = a image generated by previous Generator with bilinaer upsampling pred_fake = netD(fake.detach(), data['label'][:,0:1,...]) # output shape [1, 1, 16, 16] -> [1, 1, 23, 23] # print(len(pred_fake), len(pred_fake[0])) loss_D_fake = loss.criterionGAN(pred_fake, False) D_G_z = loss_D_fake.item() # segment_logit, segment_mask = netS(data['image'], mask2onehot(prev_S, opt.label_nc)) # print(data['image'].shape, onehot.shape) # print(epoch, j) segment_logit, segment_prob, segment_mask = netS(data['image'], prev_S.detach()) pred_fake_S = netD(data['image'], segment_prob.detach()) loss_D_fake_S = loss.criterionGAN(pred_fake_S, False) D_S_z = loss_D_fake_S.item() errD = (loss_D_real + 0.5 * loss_D_fake + 0.5 * loss_D_fake_S) ## Todo: figure out a proper coefficient errD.backward() optimizerD.step() errD2plot.append(errD.detach()) ## errD for each iteration ############################ # (2) Update G network: maximize D(G(z)) ########################### netG.zero_grad() pred_fake = netD(fake, data['label'][:,0:1,...]) loss_G_GAN = 0.5 * loss.criterionGAN(pred_fake, True) # GAN feature matching loss loss_G_GAN_Feat = 0 if not opt.no_ganFeat_loss: feat_weights = 4.0 / (opt.n_layers_D + 1) D_weights = 1.0 / opt.num_D for i in range(opt.num_D): for j in range(len(pred_fake[i]) - 1): loss_G_GAN_Feat += D_weights * feat_weights * \ loss.criterionFeat(pred_fake[i][j], pred_real[i][j].detach()) * opt.lambda_feat # VGG feature matching loss loss_G_VGG = 0 if not opt.no_vgg_loss: loss_G_VGG = loss.criterionVGG(fake, data['image']) * opt.lambda_feat ## reconstruction loss if alpha != 0: ## alpha = 10 calculate the reconstruction loss Recloss = nn.MSELoss() rec_loss = alpha * Recloss(fake, data['image']) else: rec_loss = 0 errG = loss_G_GAN + loss_G_GAN_Feat + loss_G_VGG + rec_loss errG.backward() optimizerG.step() ############################ # (3) Update S network: maximize D(S(z)) ########################### netS.zero_grad() pred_fake_S = netD(data['image'], segment_prob) loss_G_GAN_S = 0.03 * loss.criterionGAN(pred_fake_S, True) # Segmentation loss if opt.contour: loss_G_Seg = loss.crossEntropy(segment_logit, data['label'].float()) else: loss_G_Seg = loss.crossEntropy(segment_prob, torch.squeeze(data['label'][:,0:1,...], dim =1)) # GAN feature matching loss loss_G_GAN_Feat_S = 0 if not opt.no_ganFeat_loss: feat_weights = 4.0 / (opt.n_layers_D + 1) D_weights = 1.0 / opt.num_D for i in range(opt.num_D): for j in range(len(pred_fake_S[i]) - 1): loss_G_GAN_Feat_S += D_weights * feat_weights * \ loss.criterionFeat(pred_fake_S[i][j], pred_real[i][j].detach()) * opt.lambda_feat errS = loss_G_GAN_S + loss_G_GAN_Feat_S + loss_G_Seg errS.backward() optimizerS.step() ## for every epoch, do the following: errG2plot.append(errG.detach()) ## ErrG for each iteration D_real2plot.append(D_x) ## discriminator loss on real D_fake2plot.append(D_G_z + D_S_z) ## discriminator loss on fake if epoch % 25 == 0 or epoch == (opt.niter - 1): print('scale %d:[%d/%d]' % (opt.scale_num, epoch, opt.niter)) if epoch % 25 == 0 or epoch == (opt.niter - 1): plt.imsave('%s/fake_sample_%d.png' % (opt.outf, epoch), functions.convert_image_np(fake.detach()), vmin=0, vmax=1) plt.imsave('%s/fake_sample_real_%d.png' % (opt.outf, epoch), functions.convert_image_np(data['image']), vmin=0, vmax=1) plt.imsave('%s/fake_sample_mask_%d.png' % (opt.outf, epoch), functions.convert_mask_np(data['label'][:,0:1,...], num_classes= opt.label_nc)) plt.imsave('%s/segmentation_mask_%d.png' % (opt.outf, epoch), functions.convert_mask_np(segment_mask.detach(), num_classes=opt.label_nc)) schedulerD.step() schedulerG.step() schedulerS.step() functions.save_networks(netG, netD, netS, opt) ## save netG, netD, z_opt, opt is used to parser output path return in_s, in_s_S, netG, netS def draw_concat(Gs, masks, reals, NoiseAmp, in_s, mode, opt): ''' :param Gs: [G0] :param mask: [down scaled _mask] :param reals: [image pyramid] only used to represent the image shape :param NoiseAmp: [1] :param in_s: all zeros [1, 3, 26, 26] :param mode: 'rand' :param opt: :return: ''' G_z = in_s[:opt.batchSize, :, :, :] # [None, 3, 26, 26] all zeros, image input for the corest level if len(Gs) > 0: if mode == 'generator': count = 0 for G, mask, real_curr, real_next, noise_amp in zip(Gs, masks, reals, reals[1:], NoiseAmp): if count == 0: z = functions.generate_noise([1, real_curr[0], real_curr[1]], opt.batchSize) z = z.expand(opt.batchSize, G_z.shape[1], z.shape[2], z.shape[3]) else: z = functions.generate_noise( [opt.nc_z, real_curr[0], real_curr[1]], opt.batchSize) G_z = G_z[:, :, 0:real_curr[0], 0:real_curr[1]] ## G_z [None, 3, 32, 32] z_in = noise_amp * z + G_z G_z = G(z_in.detach(), G_z, mask) ## [1, 3, 26, 26] output of previous generator G_z = imresize(G_z, real_next[1] / real_curr[1], opt) G_z = G_z[:, :, 0:real_next[0], 0:real_next[1]] ## resize the image to be compatible with current G [1, 3, 33, 33] count += 1 elif mode == 'segment': count = 0 for G, mask, real_curr, real_next, noise_amp in zip(Gs, masks, reals, reals[1:], NoiseAmp): G_z = G_z[:, :, 0:real_curr[0], 0:real_curr[1]] ## G_z [None, 3, 32, 32] _, G_z, _ = G(mask, G_z) ## [1, 3, 26, 26] output of previous generator if opt.contour: G_z = torch.cat((G_z, 1-G_z), 1) G_z = imresize(G_z, real_next[1] / real_curr[1], opt) G_z = G_z[:, :, 0:real_next[0], 0:real_next[1]] ## resize the image to be compatible with current G [1, 3, 33, 33] count += 1 return G_z
1.976563
2
apps/accounts/views.py
tarvitz/icu
1
1640
# Create your views here. # -*- coding: utf-8 -*- from apps.core.helpers import render_to, ajax_response, get_object_or_None from apps.core.decorators import lock, login_required_json from apps.accounts.models import Invite from apps.accounts.decorators import check_invite from apps.accounts.forms import ( LoginForm, AccountRegisterForm, SendInviteForm, InviteRegisterForm ) from django.core.mail import send_mail from django.core.urlresolvers import reverse from django.contrib import auth from django.contrib.auth.decorators import login_required from django.conf import settings from django.db import transaction from django.utils.translation import ugettext_lazy as _ @render_to('accounts/login.html') def login(request): form = LoginForm(request.POST or None) if request.method == 'POST': if form.is_valid(): user = form.cleaned_data['user'] auth.login(request, user) return {'redirect': 'core:index'} return { 'form': form } @render_to('index.html') def logout(request): auth.logout(request) return {} @render_to('accounts/profile.html') def profile(request): return {} @login_required_json @ajax_response def generate_new_api_key(request): if request.method == 'POST': request.user.api_key.key = request.user.api_key.generate_key() request.user.api_key.save() key = request.user.api_key.key return {'success': True, 'key': key} return {'success': False} @lock("REGISTER_ALLOWED") @render_to('accounts/register.html') def register(request): form = AccountRegisterForm(request.POST or None) if request.method == "POST": if form.is_valid(): user = form.save(commit=False) user.set_password(form.cleaned_data['password']) user.save() return {'redirect': 'core:index'} return { 'form': form } @login_required @render_to('accounts/invite.html') def invite(request): form = SendInviteForm(request.POST or None, request=request) if request.method == 'POST': if form.is_valid(): form.save(commit=False) invite = form.instance email = form.cleaned_data['email'] msg = settings.INVITE_MESSAGE % { 'user': request.user.username, 'link': "http://b3ban.blacklibrary.ru%s" % reverse('accounts:invite-register', args=(invite.sid, )) } #no mail send, no money :) send_mail( subject=unicode(_('You have been invited to b3ban service')), message=unicode(msg), from_email=settings.EMAIL_FROM, recipient_list=[email] ) invite.save() return {'redirect': 'accounts:invite-success'} return { 'form': form } #@check for possibility to register @transaction.commit_on_success @check_invite(sid='sid') @render_to('accounts/invite_register.html') def invite_register(request, sid): invite = get_object_or_None(Invite, sid=sid) if not invite: return {'redirect': 'core:ufo'} form = InviteRegisterForm(request.POST or None) if request.method == 'POST': if form.is_valid(): invite.is_verified = True invite.save() user = form.save(commit=False) user.email = invite.email user.set_password(form.cleaned_data['password']) user.save() return {'redirect': 'accounts:invite-register-success'} return {'form': form, 'sid': sid}
1.171875
1
datatableview/tests/test_helpers.py
gregneagle/sal
2
1648
# -*- encoding: utf-8 -*- from datetime import datetime from functools import partial from django import get_version from datatableview import helpers import six from .testcase import DatatableViewTestCase from .test_app.models import ExampleModel, RelatedM2MModel if get_version().split('.') < ['1', '7']: test_data_fixture = 'test_data_legacy.json' else: test_data_fixture = 'test_data.json' class HelpersTests(DatatableViewTestCase): fixtures = [test_data_fixture] def test_link_to_model(self): """ Verifies that link_to_model works. """ helper = helpers.link_to_model # Verify that a model without get_absolute_url() raises a complaint related = RelatedM2MModel.objects.get(pk=1) with self.assertRaises(AttributeError) as cm: helper(related) self.assertEqual(str(cm.exception), "'RelatedM2MModel' object has no attribute 'get_absolute_url'") # Verify simple use instance = ExampleModel.objects.get(pk=1) output = helper(instance) self.assertEqual(output, '<a href="#1">ExampleModel 1</a>') # Verify text override output = helper(instance, text="Special text") self.assertEqual(output, '<a href="#1">Special text</a>') # Verify ``key`` access to transition an instance to a related field instance = ExampleModel.objects.get(pk=2) secondary_helper = helper(key=lambda o: o.related) output = secondary_helper(instance) self.assertEqual(output, '<a href="#1">RelatedModel object</a>') # Verify ``key`` access version of custom text output = secondary_helper(instance, text="Special text") self.assertEqual(output, '<a href="#1">Special text</a>') def test_make_boolean_checkmark(self): """ Verifies that make_boolean_checkmark works. """ helper = helpers.make_boolean_checkmark # Verify simple use output = helper("True-ish value") self.assertEqual(output, '&#10004;') output = helper("") self.assertEqual(output, '&#10008;') # Verify custom values output = helper("True-ish value", true_value="Yes", false_value="No") self.assertEqual(output, 'Yes') output = helper("", true_value="Yes", false_value="No") self.assertEqual(output, 'No') def test_format_date(self): """ Verifies that format_date works. """ helper = helpers.format_date # Verify simple use data = datetime.now() secondary_helper = helper("%m/%d/%Y") output = secondary_helper(data) self.assertEqual(output, data.strftime("%m/%d/%Y")) # Verify that None objects get swallowed without complaint. # This helps promise that the helper won't blow up for models.DateTimeField that are allowed # to be null. output = secondary_helper(None) self.assertEqual(output, "") def test_format(self): """ Verifies that format works. """ helper = helpers.format # Verify simple use data = 1234567890 secondary_helper = helper("{0:,}") output = secondary_helper(data) self.assertEqual(output, "{0:,}".format(data)) # Verify ``cast`` argument data = "1234.56789" secondary_helper = helper("{0:.2f}", cast=float) output = secondary_helper(data) self.assertEqual(output, "{0:.2f}".format(float(data))) def test_through_filter(self): """ Verifies that through_filter works. """ helper = helpers.through_filter target_function = lambda data, arg=None: (data, arg) # Verify simple use data = "Data string" secondary_helper = helper(target_function) output = secondary_helper(data) self.assertEqual(output, (data, None)) # Verify ``arg`` argument secondary_helper = helper(target_function, arg="Arg data") output = secondary_helper(data) self.assertEqual(output, (data, "Arg data")) def test_itemgetter(self): """ Verifies that itemgetter works. """ helper = helpers.itemgetter # Verify simple index access data = list(range(5)) secondary_helper = helper(-1) output = secondary_helper(data) self.assertEqual(output, data[-1]) # Verify slicing access secondary_helper = helper(slice(1, 3)) output = secondary_helper(data) self.assertEqual(output, data[1:3]) # Verify ellipsis works for strings data = str(range(10)) secondary_helper = helper(slice(0, 5), ellipsis=True) output = secondary_helper(data) self.assertEqual(output, data[:5] + "...") # Verify ellipsis can be customized secondary_helper = helper(slice(0, 5), ellipsis="custom") output = secondary_helper(data) self.assertEqual(output, data[:5] + "custom") # Verify ellipsis does nothing for non-string data types data = range(10) output = secondary_helper(data) self.assertEqual(output, data[:5]) def test_attrgetter(self): """ Verifies that attrgetter works. """ helper = helpers.attrgetter # Verify simple attr lookup data = ExampleModel.objects.get(pk=1) secondary_helper = helper('pk') output = secondary_helper(data) self.assertEqual(output, data.pk) # Verify bad attribrute lookup data = ExampleModel.objects.get(pk=1) secondary_helper = helper('bad field name') with self.assertRaises(AttributeError) as cm: output = secondary_helper(data) self.assertEqual(str(cm.exception), "'ExampleModel' object has no attribute 'bad field name'") def test_make_xeditable(self): """ Verifies that make_xeditable works. """ helper = helpers.make_xeditable # Items that the helper normally expects in a callback context internals = {'field_name': 'name'} # Verify chain calls don't trigger rendering secondary_helper = helper() tertiary_helper = secondary_helper() self.assertEqual(type(secondary_helper), partial) self.assertEqual(type(tertiary_helper), partial) # Verify chain ends with provision of a value data = ExampleModel.objects.get(pk=1) # This needs a "url" arg because we want to test successful use output = tertiary_helper(data, url="/", **internals) self.assertTrue(isinstance(output, six.string_types)) # Verify that no "view" kwarg means the url is required from the call with self.assertRaises(ValueError) as cm: tertiary_helper(data, **internals) self.assertEqual(str(cm.exception), "'make_xeditable' cannot determine a value for 'url'.") # Verify kwargs accumulate kwargs1 = { 'type': 'textarea' } kwargs2 = { 'other_arg': True } secondary_helper = helper(**kwargs1) expected_kwargs = dict(kwargs1, extra_attrs=[]) self.assertEqual(secondary_helper.keywords, expected_kwargs) tertiary_helper = secondary_helper(**kwargs2) expected_kwargs = dict(kwargs1, **dict(kwargs2, extra_attrs=[])) self.assertEqual(tertiary_helper.keywords, expected_kwargs) # Verify default kwarg names end up as attributes data = ExampleModel.objects.get(pk=1) kwargs = { 'pk': "PK DATA", 'type': "TYPE DATA", 'url': "URL DATA", 'source': "SOURCE DATA", 'title': "TITLE DATA", 'placeholder': "PLACEHOLDER DATA", # Extra stuff not in anticipated to appear in rendered string 'special': "SPECIAL DATA", 'data_custom': "DATA-CUSTOM DATA", } secondary_helper = helper(**kwargs) output = secondary_helper(data, **internals) expected_output = """ <a href="#" data-name="name" data-pk="PK DATA" data-placeholder="PLACEHOLDER DATA" data-source="SOURCE DATA" data-title="TITLE DATA" data-type="TYPE DATA" data-url="URL DATA" data-value="1" data-xeditable="xeditable"> ExampleModel 1 </a> """ self.assertHTMLEqual(output, expected_output) # Verify that explicit additions via ``extra_attrs`` allows kwargs to appear in HTML as # "data-*" attributes. secondary_helper = helper(extra_attrs=['special', 'data_custom', 'fake'], **kwargs) output = secondary_helper(data, **internals) expected_output = """ <a href="#" data-name="name" data-pk="PK DATA" data-placeholder="PLACEHOLDER DATA" data-source="SOURCE DATA" data-title="TITLE DATA" data-type="TYPE DATA" data-url="URL DATA" data-value="1" data-special="SPECIAL DATA" data-custom="DATA-CUSTOM DATA" data-xeditable="xeditable"> ExampleModel 1 </a> """ self.assertHTMLEqual(output, expected_output)
1.765625
2
plugins/action/normalize_gitlab_cfg.py
sma-de/ansible-collections-gitlab
0
1672
from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.errors import AnsibleOptionsError from ansible.module_utils.six import iteritems, string_types from ansible_collections.smabot.base.plugins.module_utils.plugins.config_normalizing.base import ConfigNormalizerBaseMerger, NormalizerBase, NormalizerNamed, DefaultSetterConstant, DefaultSetterOtherKey from ansible_collections.smabot.base.plugins.module_utils.utils.dicting import setdefault_none, SUBDICT_METAKEY_ANY, get_subdict from ansible_collections.smabot.base.plugins.module_utils.utils.utils import ansible_assert def user_role_to_cfg(username, urole, cfg): tmp = ['roles', 'subroles'] \ + urole['path'].replace('/', '/subroles/').split('/') tmp = get_subdict(cfg, tmp, default_empty=True) setdefault_none(setdefault_none(tmp, 'members', {}), urole['level'], [] ).append(username) class ConfigRootNormalizer(NormalizerBase): def __init__(self, pluginref, *args, **kwargs): self._add_defaultsetter(kwargs, 'random_pwlen', DefaultSetterConstant(80) ) subnorms = kwargs.setdefault('sub_normalizers', []) subnorms += [ ServersNormalizer(pluginref), ] super(ConfigRootNormalizer, self).__init__(pluginref, *args, **kwargs) class ServersNormalizer(NormalizerBase): def __init__(self, pluginref, *args, **kwargs): subnorms = kwargs.setdefault('sub_normalizers', []) subnorms += [ ServerInstancesNormalizer(pluginref), ] super(ServersNormalizer, self).__init__( pluginref, *args, **kwargs ) @property def config_path(self): return ['servers'] class ServerInstancesNormalizer(NormalizerBase): def __init__(self, pluginref, *args, **kwargs): subnorms = kwargs.setdefault('sub_normalizers', []) subnorms += [ SrvInstNormalizer(pluginref), ] super(ServerInstancesNormalizer, self).__init__( pluginref, *args, **kwargs ) @property def config_path(self): return ['instances'] class SrvInstNormalizer(NormalizerBase): def __init__(self, pluginref, *args, **kwargs): subnorms = kwargs.setdefault('sub_normalizers', []) subnorms += [ ServerUsersNormalizer(pluginref), SrvRolesNormalizer(pluginref), ] super(SrvInstNormalizer, self).__init__( pluginref, *args, **kwargs ) @property def config_path(self): return [SUBDICT_METAKEY_ANY] class SrvRolesBaseNormalizer(NormalizerBase): def __init__(self, pluginref, *args, **kwargs): subnorms = kwargs.setdefault('sub_normalizers', []) subnorms += [ SrvRolesMembersNormalizer(pluginref), ## note: for recursive structures, the sub normalizers can only ## be instantiated if the corresponding key actually exists ## to avoid indefinite recursions of death (SrvSubRolesNormalizer, True), ] super(SrvRolesBaseNormalizer, self).__init__( pluginref, *args, **kwargs ) def _handle_specifics_presub(self, cfg, my_subcfg, cfgpath_abs): # do config subkey c = setdefault_none(my_subcfg, 'config', defval={}) setdefault_none(c, 'name', defval=cfgpath_abs[-1]) # build role hierarchy path and parent if cfgpath_abs[-1] == 'roles': ## top level parent = [] else: ## subrole parent = get_subdict(cfg, cfgpath_abs[:-2]) parent = parent['role_abspath'] my_subcfg['role_abspath'] = parent + [c['name']] c['parent'] = '/'.join(parent) return my_subcfg class SrvRolesNormalizer(SrvRolesBaseNormalizer): def __init__(self, pluginref, *args, **kwargs): super(SrvRolesNormalizer, self).__init__( pluginref, *args, **kwargs ) @property def config_path(self): return ['roles'] class SrvSubRolesNormalizer(NormalizerBase): NORMER_CONFIG_PATH = ['subroles'] def __init__(self, pluginref, *args, **kwargs): subnorms = kwargs.setdefault('sub_normalizers', []) subnorms += [ SrvRoleInstNormalizer(pluginref), ] super(SrvSubRolesNormalizer, self).__init__( pluginref, *args, **kwargs ) @property def config_path(self): return type(self).NORMER_CONFIG_PATH class SrvRoleInstNormalizer(SrvRolesBaseNormalizer): def __init__(self, pluginref, *args, **kwargs): super(SrvRoleInstNormalizer, self).__init__( pluginref, *args, **kwargs ) @property def config_path(self): return [SUBDICT_METAKEY_ANY] class SrvRolesMembersNormalizer(NormalizerBase): def __init__(self, pluginref, *args, **kwargs): super(SrvRolesMembersNormalizer, self).__init__( pluginref, *args, **kwargs ) @property def config_path(self): return ['members'] def _handle_specifics_presub(self, cfg, my_subcfg, cfgpath_abs): if not my_subcfg: return my_subcfg ## if it exists, members should be a dict where the keys are ## valid gitlab access levels (like guest or developer) and ## the values should be a list of users exportcfg = [] my_group = self.get_parentcfg(cfg, cfgpath_abs) my_group = '/'.join(my_group['role_abspath']) for (k,ul) in iteritems(my_subcfg): for u in ul: exportcfg.append({ 'gitlab_group': my_group, 'gitlab_user': u, 'access_level': k }) my_subcfg['_exportcfg'] = exportcfg return my_subcfg class ServerUsersNormalizer(NormalizerBase): def __init__(self, pluginref, *args, **kwargs): subnorms = kwargs.setdefault('sub_normalizers', []) subnorms += [ ServerBotsNormalizer(pluginref), ServerHumansNormalizer(pluginref), ] super(ServerUsersNormalizer, self).__init__( pluginref, *args, **kwargs ) @property def config_path(self): return ['users'] class ServerUsrBaseNormalizer(NormalizerBase): def __init__(self, pluginref, *args, **kwargs): subnorms = kwargs.setdefault('sub_normalizers', []) subnorms += [ SrvUsrNormalizer(pluginref), ] super(ServerUsrBaseNormalizer, self).__init__( pluginref, *args, **kwargs ) class ServerBotsNormalizer(ServerUsrBaseNormalizer): def __init__(self, pluginref, *args, **kwargs): super(ServerBotsNormalizer, self).__init__( pluginref, *args, **kwargs ) @property def config_path(self): return ['bots'] class ServerHumansNormalizer(ServerUsrBaseNormalizer): def __init__(self, pluginref, *args, **kwargs): super(ServerHumansNormalizer, self).__init__( pluginref, *args, **kwargs ) @property def config_path(self): return ['humans'] class SrvUsrNormalizer(NormalizerBase): def __init__(self, pluginref, *args, **kwargs): subnorms = kwargs.setdefault('sub_normalizers', []) subnorms += [ SrvUsrCfgNormalizer(pluginref), ] self._add_defaultsetter(kwargs, 'pw_access', DefaultSetterConstant(True) ) super(SrvUsrNormalizer, self).__init__( pluginref, *args, **kwargs ) @property def config_path(self): return [SUBDICT_METAKEY_ANY] def _handle_specifics_postsub(self, cfg, my_subcfg, cfgpath_abs): usr_roles = my_subcfg.get('roles', None) if usr_roles: for ur in usr_roles: user_role_to_cfg(my_subcfg['config']['username'], ur, self.get_parentcfg(cfg, cfgpath_abs, level=3) ) return my_subcfg class SrvUsrCfgNormalizer(NormalizerNamed): def __init__(self, pluginref, *args, **kwargs): super(SrvUsrCfgNormalizer, self).__init__( pluginref, *args, mapkey_lvl=-2, **kwargs ) self.default_setters['name'] = DefaultSetterOtherKey('username') @property def config_path(self): return ['config'] @property def name_key(self): return 'username' def _handle_specifics_presub(self, cfg, my_subcfg, cfgpath_abs): mail = my_subcfg.get('email', None) if not mail: # if not mail address is explicitly given, check if mail # template is specified for server, if so use this to # create address with username as param tmp = self.get_parentcfg( cfg, cfgpath_abs, level=3 ).get('mail_template', None) if tmp: my_subcfg['email'] = tmp.format( my_subcfg['username'].replace('_', '-') ) return my_subcfg class ActionModule(ConfigNormalizerBaseMerger): def __init__(self, *args, **kwargs): super(ActionModule, self).__init__(ConfigRootNormalizer(self), *args, default_merge_vars=['gitlab_cfg_defaults'], extra_merge_vars_ans=['extra_gitlab_config_maps'], **kwargs ) self._supports_check_mode = False self._supports_async = False @property def my_ansvar(self): return 'gitlab_cfg'
1.28125
1
core/views.py
Neelamegam2000/QRcode-for-license
0
1680
from django.shortcuts import render, redirect from django.conf import settings from django.core.files.storage import FileSystemStorage,default_storage from django.core.mail import send_mail, EmailMessage from core.models import Document from core.forms import DocumentForm from django.contrib import messages import os import pyqrcode import png import random import base64 import cv2 import numpy as np import pyzbar.pyzbar as pyzbar def home(request): documents= Document.objects.all() return render(request, 'home.html', { 'documents': documents }) """def simple_upload(request): if request.method == 'POST' and request.FILES['myfile']: myfile = request.FILES['myfile'] fs = FileSystemStorage() filename = fs.save(myfile.name, myfile) uploaded_file_url = fs.url(filename) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) media_path = os.path.join(BASE_DIR,'media') full_path=os.path.join(media_path,myfile.name) qr=pyqrcode.create(uploaded_file_url) filename_before=filename.rsplit(".") filename1=filename_before[0]+".png" s=qr.png(filename1,scale=6) '''from fpdf import FPDF pdf=FPDF() pdf.add_page() pdf.image(filename1,x=50,y=None,w=60,h=60,type="",link=uploaded_file_url)''' return render(request, 'simple_upload.html', { 'uploaded_file_url': uploaded_file_url }) return render(request, 'simple_upload.html')""" def model_form_upload(request): id="" msg="" if request.method == 'POST': form = DocumentForm(request.POST, request.FILES,request.POST) if form.is_valid(): form.save() email=form.cleaned_data['Email'] document_count=Document.objects.values_list('document').count() document_last=Document.objects.values_list('document')[document_count-1] document_name=document_last[0] print(email) t=Document.objects.last() num_list=['0','1','2','3','4','5','6','7','8','9'] password1="" for i in range(0,8): password1=<PASSWORD>+random.<PASSWORD>(num_list) t.password=<PASSWORD> print(type(document_name)) document_name1=document_name.encode('ascii') document_encode=str(base64.b64encode(document_name1)) ax=document_encode[2:-1] t.file_url=ax print(ax) t.save() qr=pyqrcode.create(ax) filename=document_name.rsplit(".") filename1=filename[0].split("/") filename2=filename1[1]+".png" qr.png(filename2,scale=6) """mail=EmailMessage('QR',password1,'<EMAIL>',[email]) #mail.attach(filename2,filename2.content_type) mail.send()""" subject = 'QRcode scanner for license' message = password1 email_from = settings.EMAIL_HOST_USER recipient_list = [email, ] mail=EmailMessage( subject, message, email_from, recipient_list ) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) mail.attach_file(os.path.join(BASE_DIR,filename2)) mail.send() msg="your successfully uploaded" return redirect('model_form_upload') else: form = DocumentForm() return render(request, 'model_form_upload.html', {'form': form,'msg':msg}) def mypass(request): m="" if(request.POST.get("pswd")==request.POST.get("pswd3")): user_data=Document.objects.filter(Email=request.POST.get("email"),password=request.POST.get("old_pswd")).update(password=request.POST.get("pswd")) user_data1=Document.objects.filter(Email=request.POST.get("email"),password=request.POST.get("pswd")) """if(len_user_data==1): userdata.password=request.POST.get("pswd") return render(request,'mypass.html',{u:"you have change the password successfully"}) else:""" c=0 if(user_data1): subject = 'QRcode scanner for license' message = "Password has succesfully changed"+" "+request.POST.get("pswd") email_from = settings.EMAIL_HOST_USER recipient_list = [request.POST.get("email"), ] mail=EmailMessage( subject, message, email_from, recipient_list ) mail.send() c=1 m="your password is changed succesfully" elif(len(Document.objects.filter(Email=request.POST.get("email"),password=request.POST.get("old_pswd")))==0 and request.method=="POST"): m="your email or password is incorrect" else: m="" print(m) return render(request,'mypass.html',{"m":m}) def user_req(request): if("scanner" in request.POST and request.method=="POST"): cap = cv2.VideoCapture(0+cv2.CAP_DSHOW) font = cv2.FONT_HERSHEY_PLAIN decodedObjects=[] while decodedObjects==[]: _, frame = cap.read() decodedObjects = pyzbar.decode(frame) for obj in decodedObjects: points = obj.polygon (x,y,w,h) = obj.rect pts = np.array(points, np.int32) pts = pts.reshape((-1, 1, 2)) cv2.polylines(frame, [pts], True, (0, 255, 0), 3) cv2.putText(frame, str(obj.data), (50, 50), font, 2, (255, 0, 0), 3) id =obj.data.decode("utf-8") cv2.imshow("QR Reader", frame) key = cv2.waitKey(10) & 0xFF if decodedObjects!=[] : cv2.destroyAllWindows() return render(request,"user_req.html",{"id":id}) if('proceed' in request.POST and request.method=="POST"): userdata=Document.objects.filter(file_url=request.POST.get("id1")).filter(password=request.POST.get("password1")) return render(request,"user_req.html",{"userdata":userdata}) return render(request,"user_req.html",) def user(request): return render(request,"user.html",) def forget_pass(request): msg="" if(request.method=="POST"): num_list=['0','1','2','3','4','5','6','7','8','9'] password1="" for i in range(0,8): password1=<PASSWORD>(num_list) user_data=Document.objects.filter(Email=request.POST.get("email")).update(password=<PASSWORD>) subject = 'QRcode scanner for license Forget password' message = "Password has succesfully changed"+" "+<PASSWORD> email_from = settings.EMAIL_HOST_USER recipient_list = [request.POST.get("email"), ] mail=EmailMessage( subject, message, email_from, recipient_list ) mail.send() if(user_data>0): msg="your password is changed succesfully and mail sent" elif(user_data==0): msg="your email is incorrect or not found" return render(request,"forget_pass.html",{"msg":msg}) def qrcode_miss(request): msg="" if(request.method=='POST' and Document.objects.filter(Email=request.POST.get('email'),password=request.POST.get('password1'))): user_data=Document.objects.values_list('document').filter(Email=request.POST.get('email'),password=request.POST.get('password1')) m=user_data[0][0] p=m.split('/') print(p) t=p[1] print(t) subject = 'QRcode scanner for license' message = "resend" email_from = settings.EMAIL_HOST_USER recipient_list = [request.POST.get('email'),] mail=EmailMessage( subject, message, email_from, recipient_list ) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) k=os.path.join(BASE_DIR,t) print(k) mail.attach_file(k) mail.send() msg="your qrcode is sent to your email" elif(request.method=='POST'and Document.objects.values_list('document').filter(Email=request.POST.get('email'),password=request.POST.get('<PASSWORD>')).count()==0): msg="your email or password is incorrect" return render(request,'qrcode_miss.html',{"msg":msg})
1.34375
1
appium/webdriver/common/multi_action.py
salabogdan/python-client
1
1688
#!/usr/bin/env python # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # The Selenium team implemented something like the Multi Action API in the form of # "action chains" (https://code.google.com/p/selenium/source/browse/py/selenium/webdriver/common/action_chains.py). # These do not quite work for this situation, and do not allow for ad hoc action # chaining as the spec requires. import copy from typing import TYPE_CHECKING, Dict, List, Optional, TypeVar, Union from appium.webdriver.mobilecommand import MobileCommand as Command if TYPE_CHECKING: from appium.webdriver.common.touch_action import TouchAction from appium.webdriver.webdriver import WebDriver from appium.webdriver.webelement import WebElement T = TypeVar('T', bound='MultiAction') class MultiAction: def __init__(self, driver: 'WebDriver', element: Optional['WebElement'] = None) -> None: self._driver = driver self._element = element self._touch_actions: List['TouchAction'] = [] def add(self, *touch_actions: 'TouchAction') -> None: """Add TouchAction objects to the MultiAction, to be performed later. Args: touch_actions: one or more TouchAction objects describing a chain of actions to be performed by one finger Usage: | a1 = TouchAction(driver) | a1.press(el1).move_to(el2).release() | a2 = TouchAction(driver) | a2.press(el2).move_to(el1).release() | MultiAction(driver).add(a1, a2) Returns: `MultiAction`: Self instance """ for touch_action in touch_actions: if self._touch_actions is None: self._touch_actions = [] self._touch_actions.append(copy.copy(touch_action)) def perform(self: T) -> T: """Perform the actions stored in the object. Usage: | a1 = TouchAction(driver) | a1.press(el1).move_to(el2).release() | a2 = TouchAction(driver) | a2.press(el2).move_to(el1).release() | MultiAction(driver).add(a1, a2).perform() Returns: `MultiAction`: Self instance """ self._driver.execute(Command.MULTI_ACTION, self.json_wire_gestures) # clean up and be ready for the next batch self._touch_actions = [] return self @property def json_wire_gestures(self) -> Dict[str, Union[List, str]]: actions = [] for action in self._touch_actions: actions.append(action.json_wire_gestures) if self._element is not None: return {'actions': actions, 'elementId': self._element.id} return {'actions': actions}
1.828125
2
docs/source/tutorial/code/read_csv.py
HanSooLim/DIL-Project
2
1712
import pandas datas = pandas.read_csv("../../Sample/example_dataset.csv", index_col=0) print(datas)
1.132813
1
scripts/adam/cc100_baselines.py
TimDettmers/sched
1
1736
import numpy as np import itertools import gpuscheduler import argparse import os import uuid import hashlib import glob import math from itertools import product from torch.optim.lr_scheduler import OneCycleLR from os.path import join parser = argparse.ArgumentParser(description='Compute script.') parser.add_argument('--dry', action='store_true') parser.add_argument('--verbose', action='store_true') parser.add_argument('--p', type=float, default=1.0, help='Probability with which to select a configuration.') args = parser.parse_args() gpus = 128 cmd = 'fairseq-train /private/home/namangoyal/dataset/data-bin/bookwiki_CC-NEWS_openwebtext_stories_cc100-mmap2-bin --distributed-world-size {0} --distributed-port 54187 --fp16 --memory-efficient-fp16 --num-workers 2 --criterion cross_entropy --task language_modeling --sample-break-mode none --log-interval 25 --tokens-per-sample 1024 --arch transformer_lm_big --share-decoder-input-output-embed --decoder-layers 28 --decoder-attention-heads 16 --dropout 0.0 --attention-dropout 0.0 --activation-dropout 0.0 --activation-fn relu --no-epoch-checkpoints --keep-best-checkpoints 0 --keep-interval-updates 0 --keep-last-epochs 0 --save-interval-updates 1000 --log-format simple --fp16-no-flatten-grads --ignore-unused-valid-subsets'.format(gpus) args2 = {} name = 'blockwise5' constraint = 'volta32gb' # 1024 tokens * 8 update_freq * 56250 steps = 0.4608e9 tokens -> optimal batch size 3460 # model sizes: 1.92bn, 2.43bn, 1.41bn logfolder = 'adam/cc100/{0}'.format(name) ckp_name = logfolder #time_hours = 24*2 cores_per_job = 5 mem = 56*(8 if gpus > 8 else gpus) num_seeds = 1 seed_offset = 5 time_hours = 72 time_minutes = 0 #partition = 'learnlab,learnfair,scavenge' partition = 'learnfair,learnlab' #partition = 'learnfair' #partition = 'uninterruptible' change_dir = 'fairseq_private' repo = 'fairseq_private' exclude = '' s = gpuscheduler.HyakScheduler(verbose=args.verbose, account='', partition=partition, use_gres=False) fp16 = True args3 = {} args2['lr-scheduler'] = 'polynomial_decay' args2['warmup-updates'] = 2000 args2['max-update'] = 56250 args2['total-num-update'] = 56250 #args2['lr-scheduler'] = 'cosine' #args2['warmup-updates'] = 3000 #args2['max-update'] = 56250*4 args2['fp16-scale-window'] = 250 args2['clip-norm'] = 0.4 #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(True, 32, 'quantile', 1), (False, 8, 'quantile', 1), (False, 8, 'dynamic_tree', 1), (False, 8, 'quantile', 25)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(True, 32, 'quantile', 1)]#, (False, 8, 'quantile', 1), (False, 8, 'dynamic_tree', 1), (False, 8, 'quantile', 25)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(True, 32, 'quantile', 1)] #args3['adam8bits-offset'] = [1/512] #args3['prob-quant'] = [False] #args3['dist-scale'] = [1.0] #args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)] #args3['decoder-embed-dim'] = [2048+256] #args3['decoder-ffn-embed-dim'] = [8192+2048] #args3['max-tokens'] = [3072] #args3['update-freq'] = [2] key = ('max-tokens', 'decoder-embed-dim', 'decoder-ffn-embed-dim', 'update-freq', 'lr') #key = ('max-tokens', 'decoder-embed-dim', 'decoder-ffn-embed-dim', 'update-freq') args3[key] = [] #lrkey = ('lr', 'warmup-init-lr') #args3[lrkey] = [] # 32-bit baseline #args3['optimizer'] = ['adam'] #args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(True, 32, 'quantile', 1)] ##args3[key].append((2048,2048,8192,8, 0.00075)) #args3[key].append((2048,2048,8192,2)) # #lr = 0.003239 + (-0.0001395*math.log(1.41e9)) #args3[lrkey].append((lr, lr+1e-8, lr*0.1, lr*0.1 + 1e-8)) # adafactor #args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(False, 32, 'quantile', 1)] #args2['optimizer'] = 'adafactor' #args2['beta1'] = 0.9 #args2['decay-rate'] = 0.999 ##args3[key].append((2048,2048,8192,8, 0.00075)) #args3[key].append((2048,2048+256,8192+2048,2)) ##args3[key].append((2048,2688,10752,2)) # #lr = 0.003239 + (-0.0001395*math.log(1.92e9)) #args3[lrkey].append((lr, lr+1e-8, lr*0.1, lr*0.1 + 1e-8)) # 8-bit #args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)] #args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)] #args3[('percentile-clipping', 'clip-norm')] = [(5, 0.0)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(False, 8, 'quantile', 1)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(False, 8, 'dynamic_tree', 1)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(False, 8, 'dynamic_tree', 1), (False, 8, 'quantile', 1)] args3['optimizer'] = ['adam'] args3[('use-bnb', 'optim-bits')] = [(True, 8)] args3[('stable-emb', 'no-scale-embedding')] = [(True, True)] #args3[('use-bnb', 'stable-emb', 'no-scale-embedding')] = [(True, True, True), (False, False, False)] #args3[('use-bnb', 'stable-emb', 'no-scale-embedding')] = [(False, False, False)] #args3[('use-bnb', 'stable-emb', 'no-scale-embedding', 'optim-bits')] = [(True, True, True, True)] args3[key].append((2048,2048,8192,8, 0.00075)) #args3[key].append((2048,2048,8192,8, 0.00045)) #args3[key].append((2048,2688,10752,2)) #args3['use-emb-norm'] = [True] #lr = 0.003239 + (-0.0001395*math.log(2.43e9)) #args3[lrkey].append((lr, 0.0)) #args2['train-subset'] = 'train11' args4 = [] args5 = {} args6 = {} rdm = np.random.RandomState(5345) for key, value in args2.items(): cmd = cmd + ' --{0} {1}'.format(key, value) args_prod = [] for key, values in args3.items(): if isinstance(key, tuple): keyvalues = [] for tups in values: arg = '' for i, v in enumerate(tups): if v is True: v = '' if v is False: continue if len(key[i]) == 0: arg += '{0} '.format(v) else: arg += '--{0} {1} '.format(key[i], v) keyvalues.append(arg) elif isinstance(key, str): keyvalues = [] for v in values: if v is True: v = '' if v is False: keyvalues.append('') else: keyvalues.append(' --{0} {1}'.format(key, v)) args_prod.append(keyvalues) if len(args_prod) >= 2: args_prod = list(product(*args_prod)) else: new_args = [] if len(args_prod) > 0: for arg in args_prod[0]: new_args.append([arg]) args_prod = new_args jobs = [] if len(args4) == 0: args4.append('') for seed in range(num_seeds): seed = seed + seed_offset for arg4 in args4: if len(args_prod) == 0: args_prod.append(('', '')) for i, values in enumerate(args_prod): job_cmd = cmd + arg4 for val in values: job_cmd += ' {0}' .format(val) #job_cmd += ' --checkpoint /checkpoint/timdettmers/{1}/{0}/model.pt'.format(hashlib.md5(str(job_cmd).encode('utf-8')).hexdigest(), ckp_name) if not fp16: job_cmd = job_cmd.replace('--fp16 ', ' ') job_cmd = job_cmd + ' --seed {0}'.format(seed) checkpoint_dir = '/checkpoint/timdettmers/{1}/{0} '.format(hashlib.md5(str(job_cmd).encode('utf-8')).hexdigest(), ckp_name) save_dir = ' --save-dir {0}'.format(checkpoint_dir) job_cmd = job_cmd + save_dir cmds = [job_cmd] if rdm.rand(1) <= args.p: jobs.append(job_cmd) s.add_job(logfolder, repo, change_dir, cmds, time_hours, fp16, cores=cores_per_job, mem=mem, constraint=constraint, exclude=exclude, time_minutes=time_minutes, gpus=gpus) if args.dry: for i, job in enumerate(jobs): print(i, job) print('') print('Total jobs', len(jobs)) print('Time hours: {0}'.format(time_hours)) print('GPUs: {0}'.format(gpus)) print('Jobs will be written to: {0}'.format(join('/private/home/timdettmers/logs/', logfolder))) print('Jobs will be run on: {0}'.format(partition)) print('Run in folder: {0}'.format(change_dir)) if not args.dry: s.run_jobs()
1.6875
2
tests/conftest.py
AlanRosenthal/virtual-dealer
1
1744
""" pytest fixtures """ import unittest.mock as mock import pytest import virtual_dealer.api @pytest.fixture(name="client") def fixture_client(): """ Client test fixture for testing flask APIs """ return virtual_dealer.api.app.test_client() @pytest.fixture(name="store") def fixture_store(): """ Mock for store::Store """ with mock.patch("virtual_dealer.api.store", autospec=True) as mock_store: yield mock_store @pytest.fixture(name="datastore") def fixture_datastore(): """ Client test fixture for testing Google's datastore APIs """ with mock.patch("virtual_dealer.store.datastore", autospec=True) as mock_datastore: yield mock_datastore @pytest.fixture(name="datastore_key") def fixture_datastore_key(): """ Datastore Key Mock """ return mock.MagicMock() @pytest.fixture(name="datastore_entity") def fixture_datastore_entity(): """ Datastore Entity Mock """ return mock.MagicMock()
1.5625
2