max_stars_repo_path
stringlengths
4
277
max_stars_repo_name
stringlengths
4
130
max_stars_count
int64
0
191k
id
stringlengths
1
8
content
stringlengths
1
996k
score
float64
-1.25
4.06
int_score
int64
0
4
public_data/serializers.py
MTES-MCT/sparte
0
0
from rest_framework_gis import serializers from rest_framework import serializers as s from .models import ( Artificialisee2015to2018, Artificielle2018, CommunesSybarval, CouvertureSol, EnveloppeUrbaine2018, Ocsge, Renaturee2018to2015, Sybarval, Voirie2018, ZonesBaties2018, UsageSol, ) def get_label(code="", label=""): if code is None: code = "-" if label is None: label = "inconnu" return f"{code} {label[:30]}" class Artificialisee2015to2018Serializer(serializers.GeoFeatureModelSerializer): usage_2015 = s.SerializerMethodField() usage_2018 = s.SerializerMethodField() couverture_2015 = s.SerializerMethodField() couverture_2018 = s.SerializerMethodField() def get_usage_2015(self, obj): return get_label(code=obj.us_2015, label=obj.us_2015_label) def get_usage_2018(self, obj): return get_label(code=obj.us_2018, label=obj.us_2018_label) def get_couverture_2015(self, obj): return get_label(code=obj.cs_2015, label=obj.cs_2015_label) def get_couverture_2018(self, obj): return get_label(code=obj.cs_2018, label=obj.cs_2018_label) class Meta: fields = ( "id", "surface", "usage_2015", "usage_2018", "couverture_2015", "couverture_2018", ) geo_field = "mpoly" model = Artificialisee2015to2018 class Artificielle2018Serializer(serializers.GeoFeatureModelSerializer): couverture = s.SerializerMethodField() def get_couverture(self, obj): return get_label(code=obj.couverture, label=obj.couverture_label) class Meta: fields = ( "id", "surface", "couverture", ) geo_field = "mpoly" model = Artificielle2018 class CommunesSybarvalSerializer(serializers.GeoFeatureModelSerializer): """Marker GeoJSON serializer.""" class Meta: """Marker serializer meta class.""" fields = ( "nom", "code_insee", "surface", ) geo_field = "mpoly" model = CommunesSybarval class EnveloppeUrbaine2018Serializer(serializers.GeoFeatureModelSerializer): couverture = s.SerializerMethodField() def get_couverture(self, obj): return get_label(code=obj.couverture, label=obj.couverture_label) class Meta: fields = ( "id", "couverture", "surface", ) geo_field = "mpoly" model = EnveloppeUrbaine2018 class OcsgeSerializer(serializers.GeoFeatureModelSerializer): couverture = s.SerializerMethodField() usage = s.SerializerMethodField() def get_couverture(self, obj): return get_label(code=obj.couverture, label=obj.couverture_label) def get_usage(self, obj): return get_label(code=obj.usage, label=obj.usage_label) class Meta: fields = ( "id", "couverture", "usage", "millesime", "map_color", "year", ) geo_field = "mpoly" model = Ocsge class Renaturee2018to2015Serializer(serializers.GeoFeatureModelSerializer): usage_2015 = s.SerializerMethodField() usage_2018 = s.SerializerMethodField() couverture_2015 = s.SerializerMethodField() couverture_2018 = s.SerializerMethodField() def get_usage_2015(self, obj): return get_label(code=obj.us_2015, label=obj.us_2015_label) def get_usage_2018(self, obj): return get_label(code=obj.us_2018, label=obj.us_2018_label) def get_couverture_2015(self, obj): return get_label(code=obj.cs_2015, label=obj.cs_2015_label) def get_couverture_2018(self, obj): return get_label(code=obj.cs_2018, label=obj.cs_2018_label) class Meta: fields = ( "id", "surface", "usage_2015", "usage_2018", "couverture_2015", "couverture_2018", ) geo_field = "mpoly" model = Renaturee2018to2015 class SybarvalSerializer(serializers.GeoFeatureModelSerializer): class Meta: fields = ( "id", "surface", ) geo_field = "mpoly" model = Sybarval class Voirie2018Serializer(serializers.GeoFeatureModelSerializer): couverture = s.SerializerMethodField() usage = s.SerializerMethodField() def get_couverture(self, obj): return get_label(code=obj.couverture, label=obj.couverture_label) def get_usage(self, obj): return get_label(code=obj.usage, label=obj.usage_label) class Meta: fields = ( "id", "surface", "couverture", "usage", ) geo_field = "mpoly" model = Voirie2018 class ZonesBaties2018Serializer(serializers.GeoFeatureModelSerializer): couverture = s.SerializerMethodField() usage = s.SerializerMethodField() def get_couverture(self, obj): return get_label(code=obj.couverture, label=obj.couverture_label) def get_usage(self, obj): return get_label(code=obj.usage, label=obj.usage_label) class Meta: fields = ( "id", "couverture", "usage", "surface", ) geo_field = "mpoly" model = ZonesBaties2018 class CouvertureSolSerializer(serializers.ModelSerializer): class Meta: fields = ( "id", "parent", "code", "label", "is_artificial", ) model = CouvertureSol class UsageSolSerializer(serializers.ModelSerializer): class Meta: fields = ( "id", "parent", "code", "label", ) model = UsageSol
1.257813
1
lib/variables/latent_variables/__init__.py
joelouismarino/variational_rl
15
8
from .fully_connected import FullyConnectedLatentVariable from .convolutional import ConvolutionalLatentVariable
0.408203
0
app/views/web/homestack.py
geudrik/hautomation
0
16
#! /usr/bin/env python2.7 # -*- coding: latin-1 -*- from flask import Blueprint from flask import current_app from flask import render_template from flask_login import login_required homestack = Blueprint("homestack", __name__, url_prefix="/homestack") @homestack.route("/", methods=["GET"]) @login_required def home(): return render_template("homestack/home.html")
1.242188
1
src/transformers/models/mmbt/modeling_mmbt.py
MaximovaIrina/transformers
1
32
# coding=utf-8 # Copyright (c) Facebook, Inc. and its affiliates. # Copyright (c) HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch MMBT model. """ import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from ...file_utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings from ...modeling_outputs import BaseModelOutputWithPooling, SequenceClassifierOutput from ...modeling_utils import ModuleUtilsMixin from ...utils import logging logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "MMBTConfig" class ModalEmbeddings(nn.Module): """Generic Modal Embeddings which takes in an encoder, and a transformer embedding.""" def __init__(self, config, encoder, embeddings): super().__init__() self.config = config self.encoder = encoder self.proj_embeddings = nn.Linear(config.modal_hidden_size, config.hidden_size) self.position_embeddings = embeddings.position_embeddings self.token_type_embeddings = embeddings.token_type_embeddings self.word_embeddings = embeddings.word_embeddings self.LayerNorm = embeddings.LayerNorm self.dropout = nn.Dropout(p=config.hidden_dropout_prob) def forward(self, input_modal, start_token=None, end_token=None, position_ids=None, token_type_ids=None): token_embeddings = self.proj_embeddings(self.encoder(input_modal)) seq_length = token_embeddings.size(1) if start_token is not None: start_token_embeds = self.word_embeddings(start_token) seq_length += 1 token_embeddings = torch.cat([start_token_embeds.unsqueeze(1), token_embeddings], dim=1) if end_token is not None: end_token_embeds = self.word_embeddings(end_token) seq_length += 1 token_embeddings = torch.cat([token_embeddings, end_token_embeds.unsqueeze(1)], dim=1) if position_ids is None: position_ids = torch.arange(seq_length, dtype=torch.long, device=input_modal.device) position_ids = position_ids.unsqueeze(0).expand(input_modal.size(0), seq_length) if token_type_ids is None: token_type_ids = torch.zeros( (input_modal.size(0), seq_length), dtype=torch.long, device=input_modal.device ) position_embeddings = self.position_embeddings(position_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = token_embeddings + position_embeddings + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings MMBT_START_DOCSTRING = r""" MMBT model was proposed in [Supervised Multimodal Bitransformers for Classifying Images and Text](https://github.com/facebookresearch/mmbt) by <NAME>, <NAME>, <NAME>, <NAME>. It's a supervised multimodal bitransformer model that fuses information from text and other image encoders, and obtain state-of-the-art performance on various multimodal classification benchmark tasks. This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`MMBTConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. transformer (:class: *~nn.Module*): A text transformer that is used by MMBT. It should have embeddings, encoder, and pooler attributes. encoder (:class: *~nn.Module*): Encoder for the second modality. It should take in a batch of modal inputs and return k, n dimension embeddings. """ MMBT_INPUTS_DOCSTRING = r""" Args: input_modal (`torch.FloatTensor` of shape `(batch_size, ***)`): The other modality data. It will be the shape that the encoder for that type expects. e.g. With an Image Encoder, the shape would be (batch_size, channels, height, width) input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. It does not expect [CLS] token to be added as it's appended to the end of other modality embeddings. Indices can be obtained using [`BertTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) modal_start_tokens (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Optional start token to be added to Other Modality Embedding. [CLS] Most commonly used for classification tasks. modal_end_tokens (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Optional end token to be added to Other Modality Embedding. [SEP] Most commonly used. attention_mask (*optional*) `torch.FloatTensor` of shape `(batch_size, sequence_length)`: Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (*optional*) `torch.LongTensor` of shape `(batch_size, sequence_length)`: Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) modal_token_type_ids (*optional*) `torch.LongTensor` of shape `(batch_size, modal_sequence_length)`: Segment token indices to indicate different portions of the non-text modality. The embeddings from these tokens will be summed with the respective token embeddings for the non-text modality. position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) modal_position_ids (`torch.LongTensor` of shape `(batch_size, modal_sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings for the non-text modality. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, embedding_dim)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare MMBT Model outputting raw hidden-states without any specific head on top.", MMBT_START_DOCSTRING, ) class MMBTModel(nn.Module, ModuleUtilsMixin): def __init__(self, config, transformer, encoder): super().__init__() self.config = config self.transformer = transformer self.modal_encoder = ModalEmbeddings(config, encoder, transformer.embeddings) @add_start_docstrings_to_model_forward(MMBT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC) def forward( self, input_modal, input_ids=None, modal_start_tokens=None, modal_end_tokens=None, attention_mask=None, token_type_ids=None, modal_token_type_ids=None, position_ids=None, modal_position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Returns: Examples:: # For example purposes. Not runnable. transformer = BertModel.from_pretrained('bert-base-uncased') encoder = ImageEncoder(args) mmbt = MMBTModel(config, transformer, encoder) """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_txt_shape = input_ids.size() elif inputs_embeds is not None: input_txt_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device modal_embeddings = self.modal_encoder( input_modal, start_token=modal_start_tokens, end_token=modal_end_tokens, position_ids=modal_position_ids, token_type_ids=modal_token_type_ids, ) input_modal_shape = modal_embeddings.size()[:-1] if token_type_ids is None: token_type_ids = torch.ones(input_txt_shape, dtype=torch.long, device=device) txt_embeddings = self.transformer.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds ) embedding_output = torch.cat([modal_embeddings, txt_embeddings], 1) input_shape = embedding_output.size()[:-1] if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) else: attention_mask = torch.cat( [torch.ones(input_modal_shape, device=device, dtype=torch.long), attention_mask], dim=1 ) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(input_shape, device=device) else: encoder_attention_mask = torch.cat( [torch.ones(input_modal_shape, device=device), encoder_attention_mask], dim=1 ) extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, self.device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) encoder_outputs = self.transformer.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = self.transformer.pooler(sequence_output) if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value @add_start_docstrings( """ MMBT Model with a sequence classification/regression head on top (a linear layer on top of the pooled output) """, MMBT_START_DOCSTRING, MMBT_INPUTS_DOCSTRING, ) class MMBTForClassification(nn.Module): r""" **labels**: (*optional*) `torch.LongTensor` of shape `(batch_size,)`: Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Returns: *Tuple* comprising various elements depending on the configuration (config) and inputs: **loss**: (*optional*, returned when `labels` is provided) `torch.FloatTensor` of shape `(1,)`: Classification (or regression if config.num_labels==1) loss. **logits**: `torch.FloatTensor` of shape `(batch_size, config.num_labels)` Classification (or regression if config.num_labels==1) scores (before SoftMax). **hidden_states**: (*optional*, returned when `output_hidden_states=True`) list of `torch.FloatTensor` (one for the output of each layer + the output of the embeddings) of shape `(batch_size, sequence_length, hidden_size)`: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (*optional*, returned when `output_attentions=True`) list of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples: ```python # For example purposes. Not runnable. transformer = BertModel.from_pretrained('bert-base-uncased') encoder = ImageEncoder(args) model = MMBTForClassification(config, transformer, encoder) outputs = model(input_modal, input_ids, labels=labels) loss, logits = outputs[:2] ```""" def __init__(self, config, transformer, encoder): super().__init__() self.num_labels = config.num_labels self.mmbt = MMBTModel(config, transformer, encoder) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) def forward( self, input_modal, input_ids=None, modal_start_tokens=None, modal_end_tokens=None, attention_mask=None, token_type_ids=None, modal_token_type_ids=None, position_ids=None, modal_position_ids=None, head_mask=None, inputs_embeds=None, labels=None, return_dict=None, ): return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mmbt( input_modal=input_modal, input_ids=input_ids, modal_start_tokens=modal_start_tokens, modal_end_tokens=modal_end_tokens, attention_mask=attention_mask, token_type_ids=token_type_ids, modal_token_type_ids=modal_token_type_ids, position_ids=position_ids, modal_position_ids=modal_position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) loss = None if labels is not None: if self.num_labels == 1: # We are doing regression loss_fct = MSELoss() loss = loss_fct(logits.view(-1), labels.view(-1)) else: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
1.515625
2
garaged/src/garage/tf/regressors/gaussian_mlp_regressor_model.py
artberryx/LSD
7
48
"""GaussianMLPRegressorModel.""" import numpy as np import tensorflow as tf import tensorflow_probability as tfp from garage.experiment import deterministic from garage.tf.models import GaussianMLPModel class GaussianMLPRegressorModel(GaussianMLPModel): """GaussianMLPRegressor based on garage.tf.models.Model class. This class can be used to perform regression by fitting a Gaussian distribution to the outputs. Args: input_shape (tuple[int]): Input shape of the training data. output_dim (int): Output dimension of the model. name (str): Model name, also the variable scope. hidden_sizes (list[int]): Output dimension of dense layer(s) for the MLP for mean. For example, (32, 32) means the MLP consists of two hidden layers, each with 32 hidden units. hidden_nonlinearity (callable): Activation function for intermediate dense layer(s). It should return a tf.Tensor. Set it to None to maintain a linear activation. hidden_w_init (callable): Initializer function for the weight of intermediate dense layer(s). The function should return a tf.Tensor. hidden_b_init (callable): Initializer function for the bias of intermediate dense layer(s). The function should return a tf.Tensor. output_nonlinearity (callable): Activation function for output dense layer. It should return a tf.Tensor. Set it to None to maintain a linear activation. output_w_init (callable): Initializer function for the weight of output dense layer(s). The function should return a tf.Tensor. output_b_init (callable): Initializer function for the bias of output dense layer(s). The function should return a tf.Tensor. learn_std (bool): Is std trainable. init_std (float): Initial value for std. adaptive_std (bool): Is std a neural network. If False, it will be a parameter. std_share_network (bool): Boolean for whether mean and std share the same network. std_hidden_sizes (list[int]): Output dimension of dense layer(s) for the MLP for std. For example, (32, 32) means the MLP consists of two hidden layers, each with 32 hidden units. min_std (float): If not None, the std is at least the value of min_std, to avoid numerical issues. max_std (float): If not None, the std is at most the value of max_std, to avoid numerical issues. std_hidden_nonlinearity (callable): Nonlinearity for each hidden layer in the std network. std_hidden_w_init (callable): Initializer function for the weight of intermediate dense layer(s) in the std network. std_hidden_b_init (callable): Initializer function for the bias of intermediate dense layer(s) in the std network. std_output_nonlinearity (callable): Activation function for output dense layer in the std network. It should return a tf.Tensor. Set it to None to maintain a linear activation. std_output_w_init (callable): Initializer function for the weight of output dense layer(s) in the std network. std_parameterization (str): How the std should be parametrized. There are two options: - exp: the logarithm of the std will be stored, and applied a exponential transformation - softplus: the std will be computed as log(1+exp(x)) layer_normalization (bool): Bool for using layer normalization or not. """ def __init__(self, input_shape, output_dim, name='GaussianMLPRegressorModel', hidden_sizes=(32, 32), hidden_nonlinearity=tf.nn.tanh, hidden_w_init=tf.initializers.glorot_uniform( seed=deterministic.get_tf_seed_stream()), hidden_b_init=tf.zeros_initializer(), output_nonlinearity=None, output_w_init=tf.initializers.glorot_uniform( seed=deterministic.get_tf_seed_stream()), output_b_init=tf.zeros_initializer(), learn_std=True, adaptive_std=False, std_share_network=False, init_std=1.0, min_std=1e-6, max_std=None, std_hidden_sizes=(32, 32), std_hidden_nonlinearity=tf.nn.tanh, std_hidden_w_init=tf.initializers.glorot_uniform( seed=deterministic.get_tf_seed_stream()), std_hidden_b_init=tf.zeros_initializer(), std_output_nonlinearity=None, std_output_w_init=tf.initializers.glorot_uniform( seed=deterministic.get_tf_seed_stream()), std_parameterization='exp', layer_normalization=False): super().__init__(output_dim=output_dim, name=name, hidden_sizes=hidden_sizes, hidden_nonlinearity=hidden_nonlinearity, hidden_w_init=hidden_w_init, hidden_b_init=hidden_b_init, output_nonlinearity=output_nonlinearity, output_w_init=output_w_init, output_b_init=output_b_init, learn_std=learn_std, adaptive_std=adaptive_std, std_share_network=std_share_network, init_std=init_std, min_std=min_std, max_std=max_std, std_hidden_sizes=std_hidden_sizes, std_hidden_nonlinearity=std_hidden_nonlinearity, std_output_nonlinearity=std_output_nonlinearity, std_parameterization=std_parameterization, layer_normalization=layer_normalization) self._input_shape = input_shape def network_output_spec(self): """Network output spec. Return: list[str]: List of key(str) for the network outputs. """ return [ 'normalized_dist', 'normalized_mean', 'normalized_log_std', 'dist', 'mean', 'log_std', 'x_mean', 'x_std', 'y_mean', 'y_std' ] def _build(self, state_input, name=None): """Build model given input placeholder(s). Args: state_input (tf.Tensor): Place holder for state input. name (str): Inner model name, also the variable scope of the inner model, if exist. One example is garage.tf.models.Sequential. Return: tfp.distributions.MultivariateNormalDiag: Normlizaed distribution. tf.Tensor: Normalized mean. tf.Tensor: Normalized log_std. tfp.distributions.MultivariateNormalDiag: Vanilla distribution. tf.Tensor: Vanilla mean. tf.Tensor: Vanilla log_std. tf.Tensor: Mean for data. tf.Tensor: log_std for data. tf.Tensor: Mean for label. tf.Tensor: log_std for label. """ with tf.compat.v1.variable_scope('normalized_vars'): x_mean_var = tf.compat.v1.get_variable( name='x_mean', shape=(1, ) + self._input_shape, dtype=np.float32, initializer=tf.zeros_initializer(), trainable=False) x_std_var = tf.compat.v1.get_variable( name='x_std_var', shape=(1, ) + self._input_shape, dtype=np.float32, initializer=tf.ones_initializer(), trainable=False) y_mean_var = tf.compat.v1.get_variable( name='y_mean_var', shape=(1, self._output_dim), dtype=np.float32, initializer=tf.zeros_initializer(), trainable=False) y_std_var = tf.compat.v1.get_variable( name='y_std_var', shape=(1, self._output_dim), dtype=np.float32, initializer=tf.ones_initializer(), trainable=False) normalized_xs_var = (state_input - x_mean_var) / x_std_var _, normalized_dist_mean, normalized_dist_log_std = super()._build( normalized_xs_var) # Since regressor expects [N, *dims], we need to squeeze the extra # dimension normalized_dist_log_std = tf.squeeze(normalized_dist_log_std, 1) with tf.name_scope('mean_network'): means_var = normalized_dist_mean * y_std_var + y_mean_var with tf.name_scope('std_network'): log_stds_var = normalized_dist_log_std + tf.math.log(y_std_var) normalized_dist = tfp.distributions.MultivariateNormalDiag( loc=normalized_dist_mean, scale_diag=tf.exp(normalized_dist_log_std)) vanilla_dist = tfp.distributions.MultivariateNormalDiag( loc=means_var, scale_diag=tf.exp(log_stds_var)) return (normalized_dist, normalized_dist_mean, normalized_dist_log_std, vanilla_dist, means_var, log_stds_var, x_mean_var, x_std_var, y_mean_var, y_std_var) def clone(self, name): """Return a clone of the model. It copies the configuration and parameters of the primitive. Args: name (str): Name of the newly created model. It has to be different from source model if cloned under the same computational graph. Returns: garage.tf.policies.GaussianMLPModel: Newly cloned model. """ new_regressor = self.__class__( name=name, input_shape=self._input_shape, output_dim=self._output_dim, hidden_sizes=self._hidden_sizes, hidden_nonlinearity=self._hidden_nonlinearity, hidden_w_init=self._hidden_w_init, hidden_b_init=self._hidden_b_init, output_nonlinearity=self._output_nonlinearity, output_w_init=self._output_w_init, output_b_init=self._output_b_init, learn_std=self._learn_std, adaptive_std=self._adaptive_std, std_share_network=self._std_share_network, init_std=self._init_std, min_std=self._min_std, max_std=self._max_std, std_hidden_sizes=self._std_hidden_sizes, std_hidden_nonlinearity=self._std_hidden_nonlinearity, std_hidden_w_init=self._std_hidden_w_init, std_hidden_b_init=self._std_hidden_b_init, std_output_nonlinearity=self._std_output_nonlinearity, std_output_w_init=self._std_output_w_init, std_parameterization=self._std_parameterization, layer_normalization=self._layer_normalization) new_regressor.parameters = self.parameters return new_regressor
2.671875
3
src/biotite/copyable.py
danijoo/biotite
208
56
# This source code is part of the Biotite package and is distributed # under the 3-Clause BSD License. Please see 'LICENSE.rst' for further # information. __name__ = "biotite" __author__ = "<NAME>" __all__ = ["Copyable"] import abc class Copyable(metaclass=abc.ABCMeta): """ Base class for all objects, that should be copyable. The public method `copy()` first creates a fresh instance of the class of the instance, that is copied via the `__copy_create__()` method. All variables, that could not be set via the constructor, are then copied via `__copy_fill__()`, starting with the method in the uppermost base class and ending with the class of the instance to be copied. This approach solves the problem of encapsulated variables in superclasses. """ def copy(self): """ Create a deep copy of this object. Returns ------- copy A copy of this object. """ clone = self.__copy_create__() self.__copy_fill__(clone) return clone def __copy_create__(self): """ Instantiate a new object of this class. Only the constructor should be called in this method. All further attributes, that need to be copied are handled in `__copy_fill__()` Do not call the `super()` method here. This method must be overridden, if the constructor takes parameters. Returns ------- copy A freshly instantiated copy of *self*. """ return type(self)() def __copy_fill__(self, clone): """ Copy all necessary attributes to the new object. Always call the `super()` method as first statement. Parameters ---------- clone The freshly instantiated copy of *self*. """ pass
3.078125
3
uhd_restpy/testplatform/sessions/ixnetwork/quicktest/learnframes_58e01d83db5d99bcabff902f5cf6ec51.py
OpenIxia/ixnetwork_restpy
20
64
# MIT LICENSE # # Copyright 1997 - 2020 by IXIA Keysight # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. from uhd_restpy.base import Base from uhd_restpy.files import Files from typing import List, Any, Union class LearnFrames(Base): """The learning frames that IxNetwork sends during the test. The LearnFrames class encapsulates a required learnFrames resource which will be retrieved from the server every time the property is accessed. """ __slots__ = () _SDM_NAME = 'learnFrames' _SDM_ATT_MAP = { 'FastPathEnable': 'fastPathEnable', 'FastPathLearnFrameSize': 'fastPathLearnFrameSize', 'FastPathNumFrames': 'fastPathNumFrames', 'FastPathRate': 'fastPathRate', 'LearnFrameSize': 'learnFrameSize', 'LearnFrequency': 'learnFrequency', 'LearnNumFrames': 'learnNumFrames', 'LearnRate': 'learnRate', 'LearnSendMacOnly': 'learnSendMacOnly', 'LearnSendRouterSolicitation': 'learnSendRouterSolicitation', 'LearnWaitTime': 'learnWaitTime', 'LearnWaitTimeBeforeTransmit': 'learnWaitTimeBeforeTransmit', } _SDM_ENUM_MAP = { 'learnFrequency': ['never', 'onBinaryIteration', 'oncePerFramesize', 'oncePerTest', 'onTrial'], } def __init__(self, parent, list_op=False): super(LearnFrames, self).__init__(parent, list_op) @property def FastPathEnable(self): # type: () -> bool """ Returns ------- - bool: If true, enables fast path transmit. """ return self._get_attribute(self._SDM_ATT_MAP['FastPathEnable']) @FastPathEnable.setter def FastPathEnable(self, value): # type: (bool) -> None self._set_attribute(self._SDM_ATT_MAP['FastPathEnable'], value) @property def FastPathLearnFrameSize(self): # type: () -> int """ Returns ------- - number: Specifies the size of the learning frames in the fast path. """ return self._get_attribute(self._SDM_ATT_MAP['FastPathLearnFrameSize']) @FastPathLearnFrameSize.setter def FastPathLearnFrameSize(self, value): # type: (int) -> None self._set_attribute(self._SDM_ATT_MAP['FastPathLearnFrameSize'], value) @property def FastPathNumFrames(self): # type: () -> int """ Returns ------- - number: Specifies the number of learn frames that IxNetwork sends through fast path. """ return self._get_attribute(self._SDM_ATT_MAP['FastPathNumFrames']) @FastPathNumFrames.setter def FastPathNumFrames(self, value): # type: (int) -> None self._set_attribute(self._SDM_ATT_MAP['FastPathNumFrames'], value) @property def FastPathRate(self): # type: () -> int """ Returns ------- - number: Specifies the rate at which IxNetwork sends learn frames through fast path. """ return self._get_attribute(self._SDM_ATT_MAP['FastPathRate']) @FastPathRate.setter def FastPathRate(self, value): # type: (int) -> None self._set_attribute(self._SDM_ATT_MAP['FastPathRate'], value) @property def LearnFrameSize(self): # type: () -> int """ Returns ------- - number: Specifies the size of the learning frames. """ return self._get_attribute(self._SDM_ATT_MAP['LearnFrameSize']) @LearnFrameSize.setter def LearnFrameSize(self, value): # type: (int) -> None self._set_attribute(self._SDM_ATT_MAP['LearnFrameSize'], value) @property def LearnFrequency(self): # type: () -> str """ Returns ------- - str(never | onBinaryIteration | oncePerFramesize | oncePerTest | onTrial): Allows to choose how frequently IxNetwork sends learning frames during the test. """ return self._get_attribute(self._SDM_ATT_MAP['LearnFrequency']) @LearnFrequency.setter def LearnFrequency(self, value): # type: (str) -> None self._set_attribute(self._SDM_ATT_MAP['LearnFrequency'], value) @property def LearnNumFrames(self): # type: () -> int """ Returns ------- - number: Specifies the number of learning frames that IxNetwork sends for each address. """ return self._get_attribute(self._SDM_ATT_MAP['LearnNumFrames']) @LearnNumFrames.setter def LearnNumFrames(self, value): # type: (int) -> None self._set_attribute(self._SDM_ATT_MAP['LearnNumFrames'], value) @property def LearnRate(self): # type: () -> int """ Returns ------- - number: Specifies the rate at which IxNetwork sends learn frames to the DUT. """ return self._get_attribute(self._SDM_ATT_MAP['LearnRate']) @LearnRate.setter def LearnRate(self, value): # type: (int) -> None self._set_attribute(self._SDM_ATT_MAP['LearnRate'], value) @property def LearnSendMacOnly(self): # type: () -> bool """ Returns ------- - bool: Sends learning frames to MAC address only. """ return self._get_attribute(self._SDM_ATT_MAP['LearnSendMacOnly']) @LearnSendMacOnly.setter def LearnSendMacOnly(self, value): # type: (bool) -> None self._set_attribute(self._SDM_ATT_MAP['LearnSendMacOnly'], value) @property def LearnSendRouterSolicitation(self): # type: () -> bool """ Returns ------- - bool: Sends router solicitation messages. """ return self._get_attribute(self._SDM_ATT_MAP['LearnSendRouterSolicitation']) @LearnSendRouterSolicitation.setter def LearnSendRouterSolicitation(self, value): # type: (bool) -> None self._set_attribute(self._SDM_ATT_MAP['LearnSendRouterSolicitation'], value) @property def LearnWaitTime(self): # type: () -> int """ Returns ------- - number: Specifies the length of time in ms that IxNetwork pauses before sending all the learning frames from all the ports. """ return self._get_attribute(self._SDM_ATT_MAP['LearnWaitTime']) @LearnWaitTime.setter def LearnWaitTime(self, value): # type: (int) -> None self._set_attribute(self._SDM_ATT_MAP['LearnWaitTime'], value) @property def LearnWaitTimeBeforeTransmit(self): # type: () -> int """ Returns ------- - number: Specifies the length of time in ms that IxNetwork pauses before sending all the """ return self._get_attribute(self._SDM_ATT_MAP['LearnWaitTimeBeforeTransmit']) @LearnWaitTimeBeforeTransmit.setter def LearnWaitTimeBeforeTransmit(self, value): # type: (int) -> None self._set_attribute(self._SDM_ATT_MAP['LearnWaitTimeBeforeTransmit'], value) def update(self, FastPathEnable=None, FastPathLearnFrameSize=None, FastPathNumFrames=None, FastPathRate=None, LearnFrameSize=None, LearnFrequency=None, LearnNumFrames=None, LearnRate=None, LearnSendMacOnly=None, LearnSendRouterSolicitation=None, LearnWaitTime=None, LearnWaitTimeBeforeTransmit=None): # type: (bool, int, int, int, int, str, int, int, bool, bool, int, int) -> LearnFrames """Updates learnFrames resource on the server. Args ---- - FastPathEnable (bool): If true, enables fast path transmit. - FastPathLearnFrameSize (number): Specifies the size of the learning frames in the fast path. - FastPathNumFrames (number): Specifies the number of learn frames that IxNetwork sends through fast path. - FastPathRate (number): Specifies the rate at which IxNetwork sends learn frames through fast path. - LearnFrameSize (number): Specifies the size of the learning frames. - LearnFrequency (str(never | onBinaryIteration | oncePerFramesize | oncePerTest | onTrial)): Allows to choose how frequently IxNetwork sends learning frames during the test. - LearnNumFrames (number): Specifies the number of learning frames that IxNetwork sends for each address. - LearnRate (number): Specifies the rate at which IxNetwork sends learn frames to the DUT. - LearnSendMacOnly (bool): Sends learning frames to MAC address only. - LearnSendRouterSolicitation (bool): Sends router solicitation messages. - LearnWaitTime (number): Specifies the length of time in ms that IxNetwork pauses before sending all the learning frames from all the ports. - LearnWaitTimeBeforeTransmit (number): Specifies the length of time in ms that IxNetwork pauses before sending all the Raises ------ - ServerError: The server has encountered an uncategorized error condition """ return self._update(self._map_locals(self._SDM_ATT_MAP, locals())) def Apply(self, *args, **kwargs): # type: (*Any, **Any) -> None """Executes the apply operation on the server. Applies the specified Quick Test. apply(async_operation=bool) --------------------------- - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete. Raises ------ - NotFoundError: The requested resource does not exist on the server - ServerError: The server has encountered an uncategorized error condition """ payload = { "Arg1": self.href } for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i] for item in kwargs.items(): payload[item[0]] = item[1] return self._execute('apply', payload=payload, response_object=None) def ApplyAsync(self, *args, **kwargs): # type: (*Any, **Any) -> None """Executes the applyAsync operation on the server. applyAsync(async_operation=bool) -------------------------------- - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete. Raises ------ - NotFoundError: The requested resource does not exist on the server - ServerError: The server has encountered an uncategorized error condition """ payload = { "Arg1": self.href } for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i] for item in kwargs.items(): payload[item[0]] = item[1] return self._execute('applyAsync', payload=payload, response_object=None) def ApplyAsyncResult(self, *args, **kwargs): # type: (*Any, **Any) -> Union[bool, None] """Executes the applyAsyncResult operation on the server. applyAsyncResult(async_operation=bool)bool ------------------------------------------ - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete. - Returns bool: Raises ------ - NotFoundError: The requested resource does not exist on the server - ServerError: The server has encountered an uncategorized error condition """ payload = { "Arg1": self.href } for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i] for item in kwargs.items(): payload[item[0]] = item[1] return self._execute('applyAsyncResult', payload=payload, response_object=None) def ApplyITWizardConfiguration(self, *args, **kwargs): # type: (*Any, **Any) -> None """Executes the applyITWizardConfiguration operation on the server. Applies the specified Quick Test. applyITWizardConfiguration(async_operation=bool) ------------------------------------------------ - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete. Raises ------ - NotFoundError: The requested resource does not exist on the server - ServerError: The server has encountered an uncategorized error condition """ payload = { "Arg1": self.href } for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i] for item in kwargs.items(): payload[item[0]] = item[1] return self._execute('applyITWizardConfiguration', payload=payload, response_object=None) def GenerateReport(self, *args, **kwargs): # type: (*Any, **Any) -> Union[str, None] """Executes the generateReport operation on the server. Generate a PDF report for the last succesfull test run. generateReport(async_operation=bool)string ------------------------------------------ - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete. - Returns str: This method is asynchronous and has no return value. Raises ------ - NotFoundError: The requested resource does not exist on the server - ServerError: The server has encountered an uncategorized error condition """ payload = { "Arg1": self.href } for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i] for item in kwargs.items(): payload[item[0]] = item[1] return self._execute('generateReport', payload=payload, response_object=None) def Run(self, *args, **kwargs): # type: (*Any, **Any) -> Union[List[str], None] """Executes the run operation on the server. Starts the specified Quick Test and waits for its execution to finish. The IxNetwork model allows for multiple method Signatures with the same name while python does not. run(async_operation=bool)list ----------------------------- - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete. - Returns list(str): This method is synchronous and returns the result of the test. run(InputParameters=string, async_operation=bool)list ----------------------------------------------------- - InputParameters (str): The input arguments of the test. - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete. - Returns list(str): This method is synchronous and returns the result of the test. Raises ------ - NotFoundError: The requested resource does not exist on the server - ServerError: The server has encountered an uncategorized error condition """ payload = { "Arg1": self.href } for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i] for item in kwargs.items(): payload[item[0]] = item[1] return self._execute('run', payload=payload, response_object=None) def Start(self, *args, **kwargs): # type: (*Any, **Any) -> None """Executes the start operation on the server. Starts the specified Quick Test. The IxNetwork model allows for multiple method Signatures with the same name while python does not. start(async_operation=bool) --------------------------- - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete. start(InputParameters=string, async_operation=bool) --------------------------------------------------- - InputParameters (str): The input arguments of the test. - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete. Raises ------ - NotFoundError: The requested resource does not exist on the server - ServerError: The server has encountered an uncategorized error condition """ payload = { "Arg1": self.href } for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i] for item in kwargs.items(): payload[item[0]] = item[1] return self._execute('start', payload=payload, response_object=None) def Stop(self, *args, **kwargs): # type: (*Any, **Any) -> None """Executes the stop operation on the server. Stops the currently running Quick Test. stop(async_operation=bool) -------------------------- - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete. Raises ------ - NotFoundError: The requested resource does not exist on the server - ServerError: The server has encountered an uncategorized error condition """ payload = { "Arg1": self.href } for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i] for item in kwargs.items(): payload[item[0]] = item[1] return self._execute('stop', payload=payload, response_object=None) def WaitForTest(self, *args, **kwargs): # type: (*Any, **Any) -> Union[List[str], None] """Executes the waitForTest operation on the server. Waits for the execution of the specified Quick Test to be completed. waitForTest(async_operation=bool)list ------------------------------------- - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete. - Returns list(str): This method is synchronous and returns the result of the test. Raises ------ - NotFoundError: The requested resource does not exist on the server - ServerError: The server has encountered an uncategorized error condition """ payload = { "Arg1": self.href } for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i] for item in kwargs.items(): payload[item[0]] = item[1] return self._execute('waitForTest', payload=payload, response_object=None)
1.296875
1
factory_generator/management/commands/generate_factories.py
gamabounty/django-factory-generator
10
88
import os from django.apps import apps from django.core.management.base import BaseCommand from factory_generator.generator import FactoryAppGenerator class Command(BaseCommand): help = 'Create model factories for all installed apps' def handle(self, *args, **options): created_files = [] for app in apps.get_app_configs(): factory_app_generator = FactoryAppGenerator(app) created_files += factory_app_generator.create_files() self.stdout.write(self.style.SUCCESS('Successfully created factories:')) for created_file in created_files: self.stdout.write(self.style.SUCCESS('- ' + created_file))
1.578125
2
bin/ticker.py
aleasoluciones/infrabbitmq
0
96
# -*- coding: utf-8 -*- import time import puka import argparse import logging from infcommon import utils from infrabbitmq import factory as infrabbitmq_factory from infrabbitmq.rabbitmq import RabbitMQError from infrabbitmq.events_names import ( TICK_1_SECOND, TICK_1_MINUTE, TICK_2_MINUTES, TICK_5_MINUTES, TICK_60_MINUTES, ) def publish_event(publisher, event, network, secs, mins): logging.info("publish event {} {}".format(event, secs)) publisher.publish(event, network, data={'tick': secs, 'mins': mins}) def main(network): publisher = infrabbitmq_factory.event_publisher_json_serializer() secs = 0 mins = 0 rabbitmq_exceptions = (RabbitMQError, puka.AMQPError, KeyError,) while True: time.sleep(1) secs += 1 utils.do_stuff_with_exponential_backoff(rabbitmq_exceptions, publish_event, publisher, TICK_1_SECOND, network, secs, mins) if secs % 60 == 0: mins += 1 secs = 0 utils.do_stuff_with_exponential_backoff(rabbitmq_exceptions, publish_event, publisher, TICK_1_MINUTE, network, secs, mins) if mins % 2 == 0: utils.do_stuff_with_exponential_backoff(rabbitmq_exceptions, publish_event, publisher, TICK_2_MINUTES, network, secs, mins) if mins % 5 == 0: utils.do_stuff_with_exponential_backoff(rabbitmq_exceptions, publish_event, publisher, TICK_5_MINUTES, network, secs, mins) if mins % 60 == 0: utils.do_stuff_with_exponential_backoff(rabbitmq_exceptions, publish_event, publisher, TICK_60_MINUTES, network, secs, mins) if __name__ == '__main__': try: parser = argparse.ArgumentParser() parser.add_argument('-n', '--network', action='store', required=True, help='Network name (ilo, c2k, ...)') args = parser.parse_args() network = args.network.split('-')[0] main(network) except Exception as exc: logging.critical("Ticker Fails: {}".format(exc))
1.539063
2
tests/test_process.py
confluentinc/utils-core
0
112
import pytest from utils.process import run, silent_run, RunError from utils.fs import in_temp_dir def test_run(capsys): with in_temp_dir(): assert run('echo hello > hello.txt; echo world >> hello.txt', shell=True) out = run('ls', return_output=True) assert out == 'hello.txt\n' out = run(['cat', 'hello.txt'], return_output=True) assert out == 'hello\nworld\n' with pytest.raises(RunError): run('blah') assert not run('blah', raises=False) assert silent_run('ls -l') out, _ = capsys.readouterr() assert out == ''
1.039063
1
tests/test_model/test_recognizer/test_shufflenetv1.py
YinAoXiong/ZCls
0
128
# -*- coding: utf-8 -*- """ @date: 2021/5/16 下午10:22 @file: test_shufflenetv1.py @author: zj @description: """ import torch from zcls.config import cfg from zcls.config.key_word import KEY_OUTPUT from zcls.model.recognizers.build import build_recognizer def test_data(model): data = torch.randn(1, 3, 224, 224) outputs = model(data)[KEY_OUTPUT] print(outputs.shape) assert outputs.shape == (1, 1000) def test_shufflenet(): cfg.merge_from_file('configs/benchmarks/shufflenet/shufflenet_v1_3g2x_zcls_imagenet_224.yaml') print(cfg) model = build_recognizer(cfg, torch.device('cpu')) print(model) test_data(model) if __name__ == '__main__': test_shufflenet()
1.210938
1
algo/vigenere.py
dkushche/Crypto
3
144
import crypto_tools from itertools import cycle def vigenere_little_doc(): return "encrypt/decrypt using vigenere cypher" def vigenere_full_doc(): return """ Advanced caesar we change dict on each char """ def vigenere_str_to_list(string, vigenere_dict): result = list() for char in string: try: result.append(vigenere_dict.index(char)) except ValueError: err_msg = f"There is no {key[inx]} in alphabet" raise ValueError(err_msg) return result def vigenere_processing(data, key, lang, encrypt): vigenere_dict = crypto_tools.get_param_json_data("alphabets.json", lang) num_data = vigenere_str_to_list(data, vigenere_dict) num_key = vigenere_str_to_list(key, vigenere_dict) dict_size = len(vigenere_dict) num_key = cycle(num_key) if (encrypt == "encrypt"): num_result = [(a + b) % dict_size for a, b in zip(num_data, num_key)] else: num_result = [ (a + dict_size - b) % dict_size for a, b in zip(num_data, num_key) ] result_str = "" for val in num_result: result_str += vigenere_dict[val] return result_str @crypto_tools.file_manipulation() def vigenere(data): lang = crypto_tools.cterm('input', 'Data language: ', 'ans') key = crypto_tools.cterm('input', 'Enter key(str): ', 'ans') encrypt = crypto_tools.cterm('input', 'You want encrypt or decrypt: ', 'ans') if encrypt != "encrypt" and encrypt != "decrypt": raise ValueError("Incorrect action") data = crypto_tools.utf_decoder(data) return vigenere_processing(data, key, lang, encrypt) vigenere.little_doc = vigenere_little_doc vigenere.full_doc = vigenere_full_doc
2.1875
2
met/metadataparser/models/entity_type.py
z1digitalstudio/met
11
184
################################################################# # MET v2 Metadate Explorer Tool # # This Software is Open Source. See License: https://github.com/TERENA/met/blob/master/LICENSE.md # Copyright (c) 2012, TERENA All rights reserved. # # This Software is based on MET v1 developed for TERENA by Yaco Sistemas, http://www.yaco.es/ # MET v2 was developed for TERENA by <NAME>, DAASI International GmbH, http://www.daasi.de # Current version of MET has been revised for performance improvements by <NAME>, # Consortium GARR, http://www.garr.it ########################################################################## from django.db import models from django.utils.translation import ugettext_lazy as _ class EntityType(models.Model): """ Model describing the type of an entity. """ name = models.CharField(blank=False, max_length=20, unique=True, verbose_name=_(u'Name'), db_index=True) xmlname = models.CharField(blank=False, max_length=20, unique=True, verbose_name=_(u'Name in XML'), db_index=True) def __unicode__(self): return self.name
1.273438
1
deduplicate.py
Ghostofapacket/NewsGrabber-Deduplicate
0
192
import sys sys.path.append('/usr/local/lib/python3.4/site-packages/') from warc_dedup import deduplicate def main(): if len(sys.argv) == 1: raise Exception('Please provide the WARC file as argument.') deduplicate.Warc(*sys.argv[1:]).deduplicate() if __name__ == '__main__': main()
0.882813
1
gcp-python-fn/main.py
FuriKuri/faas-playground
1
216
def hello_world(request): request_json = request.get_json() name = 'World' if request_json and 'name' in request_json: name = request_json['name'] headers = { 'Access-Control-Allow-Origin': 'https://furikuri.net', 'Access-Control-Allow-Methods': 'GET, POST', 'Access-Control-Allow-Headers': 'Content-Type' } return ('Hello ' + name + '! From GCP + Python', 200, headers)
1.1875
1
setup.py
dantas/wifi
1
272
#!/usr/bin/env python from setuptools import setup import os __doc__ = """ Command line tool and library wrappers around iwlist and /etc/network/interfaces. """ def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() install_requires = [ 'setuptools', 'pbkdf2', ] try: import argparse except: install_requires.append('argparse') version = '1.0.0' setup( name='wifi', version=version, author='<NAME>, <NAME>', author_email='<EMAIL>', description=__doc__, long_description=read('README.rst'), packages=['wifi'], scripts=['bin/wifi'], test_suite='tests', platforms=["Debian"], license='BSD', install_requires=install_requires, classifiers=[ "License :: OSI Approved :: BSD License", "Topic :: System :: Networking", "Operating System :: POSIX :: Linux", "Environment :: Console", "Programming Language :: Python", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3.3", ], data_files=[ ('/etc/bash_completion.d/', ['extras/wifi-completion.bash']), ] )
1.117188
1
Chapter07/library/check_user_py3.py
djouani/Learning-Ansible-2.X-Third-Edition
22
280
#!/usr/bin/env python import pwd from ansible.module_utils.basic import AnsibleModule class User: def __init__(self, user): self.user = user # Check if user exists def check_if_user_exists(self): try: user = pwd.getpwnam(self.user) success = True ret_msg = 'User %s exists' % self.user except KeyError: success = False ret_msg = 'User %s does not exists' % self.user return success, ret_msg def main(): # Parsing argument file module = AnsibleModule( argument_spec = dict( user = dict(required=True) ) ) user = module.params.get('user') chkusr = User(user) success, ret_msg = chkusr.check_if_user_exists() # Error handling and JSON return if success: module.exit_json(msg=ret_msg) else: module.fail_json(msg=ret_msg) if __name__ == "__main__": main()
1.71875
2
unit_13/26-Data_Structures/4_Merge_Sort_and_Linked_Lists/3_linked_list_merge_sort.py
duliodenis/python_master_degree
19
288
# # Data Structures: Linked List Merge Sort: The Conquer Step # Python Techdegree # # Created by <NAME> on 3/24/19. # Copyright (c) 2019 ddApps. All rights reserved. # ------------------------------------------------ from linked_list import Node, LinkedList def merge_sort(linked_list): ''' Sorts a linked list in ascending order. - Recuresively divide the linked list into sublists containing a single node - Repeatedly merge the sublists to produce sorted swublists until one remains Returns a sorted linked list. Runs in O(kn log n) time. ''' if linked_list.size() == 1: return linked_list elif linked_list.is_empty(): return linked_list left_half, right_half = split(linked_list) left = merge_sort(left_half) right = merge_sort(right_half) return merge(left, right) def split(linked_list): ''' Divide the unsorted list at the midpoint into sublists. Takes O(k log n) quasilinear time. ''' if linked_list == None or linked_list.head == None: left_half = linked_list right_half = None return left_half, right_half else: # non-empty linked lists size = linked_list.size() midpoint = size // 2 mid_node = linked_list.node_at_index(midpoint-1) left_half = linked_list right_half = LinkedList() right_half = mid_node.next_node mid_node.next_node = None return left_half, right_half def merge(left, right): ''' Merges two linked lists, sorting by data in nodes. Returns a new, merged list. Runs in O(n) linear time. ''' # Create a new linked list that contains nodes from # merging left and right merged = LinkedList() # Add a fake head that is discarded later to simplify code merged.add(0) # Set current to the head of the linked list current = merged.head # Obtain head nodes for left and right linked lists left_head = left.head right_head = right.head # Iterate over left and right until we reach the tail node # of either while left_head or right_head: # If the head node of the left is None, we're past the tail # Add the node from right to merged linkned list if left_head is None: current.next_node = right_head # Call next on right to set loop condition to False right_head = right_head.next_node # If the head node of right is None, we're past the tail # Add the tail node from left to merged linked list elif right_head is None: current.next_node = left_head # Call next on left to set loop condition to False left_head = left_head.next_node else: # Not at either tail node # Obtain node data to perform comparison operations left_data = left_head.data right_data = right_head.data # If data on left is less than right, set current to left node if left_data < right_data: current.next_node = left_head # Move left head to next node left_head = left_head.next_node # If data on left is greater than right, set current to right node else: current.next_node = right_head # Move right head to next node right_head = right_head.next_node # Move current to next node current = current.next_node # Discard fake head and set first merged node as head head = merged.head.next_node merged.head = head return merged l = LinkedList() l.add(10) l.add(2) l.add(44) l.add(15) l.add(200) print(l) sorted_linked_list = merge_sort(l) print(sorted_linked_list)
3.484375
3
examples/calc.py
manatlan/htag
1
328
import os,sys; sys.path.insert(0,os.path.dirname(os.path.dirname(__file__))) from htag import Tag """ This example show you how to make a "Calc App" (with physical buttons + keyboard events) There is no work for rendering the layout ;-) Can't be simpler ! """ class Calc(Tag.div): statics=[Tag.H.style(""" .mycalc *,button {font-size:2em;font-family: monospace} """)] def init(self): self.txt="" self.aff = Tag.Div("&nbsp;",_style="border:1px solid black") self["class"]="mycalc" self <= self.aff self <= Tag.button("C", _onclick=self.bind( self.clean) ) self <= [Tag.button(i, _onclick=self.bind( self.press, i) ) for i in "0123456789+-x/."] self <= Tag.button("=", _onclick=self.bind( self.compute ) ) #-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/ with real keyboard self["onkeyup"] = self.bind( self.presskey, b"event.key" ) def presskey(self,key): if key in "0123456789+-*/.": self.press(key) elif key=="Enter": self.compute() elif key in ["Delete","Backspace"]: self.clean() #-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/ def press(self,val): self.txt += val self.aff.set( self.txt ) def compute(self): try: self.txt = str(eval(self.txt.replace("x","*"))) self.aff.set( self.txt ) except: self.txt = "" self.aff.set( "Error" ) def clean(self): self.txt="" self.aff.set("&nbsp;") if __name__=="__main__": # import logging # logging.basicConfig(format='[%(levelname)-5s] %(name)s: %(message)s',level=logging.DEBUG) # logging.getLogger("htag.tag").setLevel( logging.INFO ) # and execute it in a pywebview instance from htag.runners import * # here is another runner, in a simple browser (thru ajax calls) BrowserHTTP( Calc ).run() # PyWebWiew( Calc ).run()
1.625
2
libsaas/services/twilio/applications.py
MidtownFellowship/libsaas
155
344
from libsaas import http, parsers from libsaas.services import base from libsaas.services.twilio import resource class ApplicationsBase(resource.TwilioResource): path = 'Applications' class Application(ApplicationsBase): def create(self, *args, **kwargs): raise base.MethodNotSupported() class Applications(ApplicationsBase): @base.apimethod def get(self, FriendlyName=None, Page=None, PageSize=None, AfterSid=None): """ Fetch the Applications belonging to an account. :var FriendlyName: Only return the Account resources with friendly names that exactly match this name. :vartype FriendlyName: str :var Page: The current page number. Zero-indexed, so the first page is 0. :vartype Page: int :var PageSize: How many resources to return in each list page. The default is 50, and the maximum is 1000. :vartype PageSize: int :var AfterSid: The last Sid returned in the previous page, used to avoid listing duplicated resources if new ones are created while paging. :vartype AfterSid: str """ params = resource.get_params(None, locals()) request = http.Request('GET', self.get_url(), params) return request, parsers.parse_json def update(self, *args, **kwargs): raise base.MethodNotSupported() def delete(self, *args, **kwargs): raise base.MethodNotSupported() class ConnectAppsBase(resource.TwilioResource): path = 'ConnectApps' def create(self, *args, **kwargs): raise base.MethodNotSupported() def delete(self, *args, **kwargs): raise base.MethodNotSupported() class ConnectApp(ConnectAppsBase): pass class ConnectApps(ConnectAppsBase): @base.apimethod def get(self, Page=None, PageSize=None, AfterSid=None): """ Fetch the Connect Apps belonging to an account. :var Page: The current page number. Zero-indexed, so the first page is 0. :vartype Page: int :var PageSize: How many resources to return in each list page. The default is 50, and the maximum is 1000. :vartype PageSize: int :var AfterSid: The last Sid returned in the previous page, used to avoid listing duplicated resources if new ones are created while paging. :vartype AfterSid: str """ params = resource.get_params(None, locals()) request = http.Request('GET', self.get_url(), params) return request, parsers.parse_json def update(self, *args, **kwargs): raise base.MethodNotSupported() class AuthorizedConnectAppsBase(resource.TwilioResource): path = 'AuthorizedConnectApps' def create(self, *args, **kwargs): raise base.MethodNotSupported() def update(self, *args, **kwargs): raise base.MethodNotSupported() def delete(self, *args, **kwargs): raise base.MethodNotSupported() class AuthorizedConnectApp(AuthorizedConnectAppsBase): pass class AuthorizedConnectApps(AuthorizedConnectAppsBase): @base.apimethod def get(self, Page=None, PageSize=None, AfterSid=None): """ Fetch the Authorized Connect Apps belonging to an account. :var Page: The current page number. Zero-indexed, so the first page is 0. :vartype Page: int :var PageSize: How many resources to return in each list page. The default is 50, and the maximum is 1000. :vartype PageSize: int :var AfterSid: The last Sid returned in the previous page, used to avoid listing duplicated resources if new ones are created while paging. :vartype AfterSid: str """ params = resource.get_params(None, locals()) request = http.Request('GET', self.get_url(), params) return request, parsers.parse_json
1.71875
2
MarkReport/MarkReport.py
dedukun/MarkReport
0
360
#!/usr/bin/env python3 # Command line flags import os import glob import re import pyinotify import subprocess from sys import stdout, stderr from time import time, sleep from tempfile import gettempdir from distutils.dir_util import copy_tree from shutil import copyfile from weasyprint import HTML import argparse parser = argparse.ArgumentParser( description='Converts Markdown to elegant PDF reports') parser.add_argument('--basic', dest='basic', action='store_true', help='Do not enrich HTML with LaTeX and syntax highlighting (faster builds)') parser.add_argument('--watch', dest='watch', action='store_true', help='Watch the current folder for changes and rebuild automatically') parser.add_argument('--quiet', dest='quiet', action='store_true', help='Do not output any information') parser.add_argument("--timeout", type=int, default=2, help='Page generation timeout') parser.add_argument("--base-html", type=str, default="", help='The path to the base HTML file') parser.set_defaults(watch=False) args = parser.parse_args() # Check directory ok = False for file in os.listdir("."): if file.endswith(".md"): ok = True break if not ok: stderr.write("No markdown file found in the current folder") exit(1) if args.base_html != "": if not os.path.isfile(args.base_html): stderr.write("The given base HTML file doesn't exist") exit(1) script_path = os.path.dirname(os.path.realpath(__file__)) # Temp dir timestamp = str(int(time())) tmp_dir = gettempdir() + "/" + timestamp + "_md-report/" os.makedirs(tmp_dir, exist_ok=True) # Headless browser if not args.basic: from selenium import webdriver from selenium.webdriver.firefox.options import Options from selenium.webdriver.common.desired_capabilities import DesiredCapabilities options = Options() options.headless = True options.log.level = "trace" d = DesiredCapabilities.FIREFOX d['loggingPrefs'] = {'browser': 'ALL'} driver = webdriver.Firefox(options=options, capabilities=d) driver.set_page_load_timeout(args.timeout) prev_compile_time = 0 def recompile(notifier): if notifier is not None and (notifier.maskname != "IN_MODIFY" or notifier.pathname.endswith(".pdf")): return global prev_compile_time if time() - prev_compile_time < 1: return prev_compile_time = time() if not args.quiet: stdout.write("\rBuilding the PDF file...") stdout.flush() files = glob.glob(tmp_dir + '/*.md') for f in files: os.remove(f) if args.base_html == "": copyfile(script_path + "/base.html", tmp_dir + "/base.html") else: copyfile(args.base_html, tmp_dir + "/base.html") if not os.path.islink(tmp_dir + "/src"): os.symlink(script_path + "/src", tmp_dir + "/src") copy_tree(".", tmp_dir) # Markdown parsing subprocess.check_output(script_path + "/md-parsing " + tmp_dir, shell=True).decode('utf-8') html_file_name = tmp_dir + "output.html" # Interpret JS code if not args.basic: driver.get("file:///" + html_file_name) sleep(2) elem = driver.find_element_by_xpath("//*") interpreted_html = elem.get_attribute("outerHTML") with open(html_file_name, "w") as html_out_file: html_out_file.write(interpreted_html) # Create final PDF file pdf = HTML(html_file_name).write_pdf() f = open("output.pdf", 'wb') f.write(pdf) if not args.quiet: stdout.write("\rDone. ") stdout.flush() recompile(None) if not args.watch: if not args.basic: driver.quit() exit(0) watch_manager = pyinotify.WatchManager() event_notifier = pyinotify.Notifier(watch_manager, recompile) watch_manager.add_watch(os.path.abspath("."), pyinotify.ALL_EVENTS, rec=True) event_notifier.loop() if not args.basic: driver.quit()
1.65625
2
beansdbadmin/core/client.py
ariesdevil/beansdbadmin
11
400
#!/usr/bin/python # encoding: utf-8 '''a rich client 1. for one server (instead of multi like in libmc.Client) 2. encapsulate @, ?, gc ... use is instead of libmc.Client ''' import telnetlib import logging import libmc import string import urllib import itertools import warnings from collections import defaultdict from beansdbadmin.core.hint import parse_new_hint_body from beansdbadmin.core.data import parse_records from beansdbadmin.core.hash import get_khash64 def get_url_content(url): return urllib.urlopen(url).read() def check_bucket(bucket): assert 0 <= bucket < 16 def dir_to_dict(dir_str): d = dict() if dir_str: for line in [x for x in dir_str.split('\n') if x]: key_or_bucket, _hash, ver_or_count = line.split(' ') d[key_or_bucket] = int(_hash) & 0xffff, int(ver_or_count) return d def get_bucket_keys_count(store, bucket, depth=1): cmd = "@" sub = bucket if depth == 2: cmd = "@%x" % (bucket/16) sub = bucket % 16 result = store.get(cmd) if result: lines = result.split('\n') for line in lines: if len(line) == 0: continue d, _, c = line.split() if d.endswith('/'): bucket_ = int(d[0], 16) if bucket_ == sub: return int(c) raise Exception('get %s from %s, reply = [%s], bucket %x not found' % (cmd, store, result, bucket)) def get_buckets_keys_count(store): """ return dict: buckets -> count """ st = {} try: for line in (store.get('@') or '').split('\n'): if line: d, _, c = line.split(' ') if not d.endswith('/'): continue st[int(d[0], 16)] = int(c) return st except IOError: raise Exception("cannot get @ from %s" % (store)) def get_primary_buckets(store): """ return possible primary buckets, might be wrong on temporary nodes, result is list of buckets in integer """ ss = get_buckets_keys_count(store) bucket_list = ss.items() bucket_list = [x for x in bucket_list if x[1] > 0] if not bucket_list: return None bucket_list.sort(lambda a, b: cmp(a[1], b[1]), reverse=True) result = [bucket_list[0]] for i in bucket_list[1:]: if result[-1][1] / i[1] >= 2: break result.append(i) return [x[0] for x in result] def get_key_info_disk(store, key): '''return ver, vhash, flag, vsz, ts, fid, pos''' info = store.get('??' + key) if info: return [int(x) for x in info.split()] def is_gc_running(ip, port): s = get_gc_status(ip, port) if s and s.find('running') >= 0: return True return False def get_gc_status(ip, port): t = telnetlib.Telnet(ip, port) t.write('optimize_stat\r\n') out = t.read_until('\n') t.write('quit\r\n') t.close() return out.strip("\r\n") def connect(server, **kwargs): comp_threshold = kwargs.pop('comp_threshold', 0) prefix = kwargs.pop('prefix', None) if prefix is not None: warnings.warn('"prefix" is deprecated. ' 'use douban.wrapper.Prefix instead.') c = libmc.Client([server], do_split=0, comp_threshold=comp_threshold, prefix=prefix) c.config(libmc.MC_CONNECT_TIMEOUT, 300) # 0.3s c.config(libmc.MC_POLL_TIMEOUT, 3000) # 3s c.config(libmc.MC_RETRY_TIMEOUT, 5) # 5s return c class MCStore(object): IGNORED_LIBMC_RET = frozenset([ libmc.MC_RETURN_OK, libmc.MC_RETURN_INVALID_KEY_ERR ]) def __init__(self, addr): self.addr = addr self.host, port = addr.split(":") self.port = int(port) self.mc = connect(addr) def __repr__(self): return '<MCStore(addr=%s)>' % repr(self.addr) def __str__(self): return self.addr def set(self, key, data, rev=0): return bool(self.mc.set(key, data, rev)) def set_raw(self, key, data, rev=0, flag=0): if rev < 0: raise Exception(str(rev)) return self.mc.set_raw(key, data, rev, flag) def set_multi(self, values, return_failure=False): return self.mc.set_multi(values, return_failure=return_failure) def _check_last_error(self): last_err = self.mc.get_last_error() if last_err not in self.IGNORED_LIBMC_RET: raise IOError(last_err, self.mc.get_last_strerror()) def get(self, key): try: r = self.mc.get(key) if r is None: self._check_last_error() return r except ValueError: self.mc.delete(key) def get_raw(self, key): r, flag = self.mc.get_raw(key) if r is None: self._check_last_error() return r, flag def get_multi(self, keys): r = self.mc.get_multi(keys) self._check_last_error() return r def delete(self, key): return bool(self.mc.delete(key)) def delete_multi(self, keys, return_failure=False): return self.mc.delete_multi(keys, return_failure=return_failure) def exists(self, key): return bool(self.mc.get('?' + key)) def incr(self, key, value): return self.mc.incr(key, int(value)) class DBClient(MCStore): def __init__(self, addr): MCStore.__init__(self, addr) self._is_old = None def stats(self): stats = self.mc.stats() return stats.values()[0] if stats else None def is_old(self): if self._is_old is None: ver = self.get_server_version() self._is_old = (ver.strip().split(".")[0] == "0") return self._is_old def get_collision_summary(self, bucket): check_bucket(bucket) raw = self.get("@collision_%x" % bucket) if raw is None: return None count, hcount, khash, data_size = raw.split() return (int(count), int(hcount), int(khash, 16), int(data_size)) def get_collision(self, bucket): check_bucket(bucket) collisions = defaultdict(dict) hint_data = self.get("@collision_all_%x" % bucket) if hint_data is None: return dict() for key, meta, _ in parse_new_hint_body(hint_data): khash_str, _, ver, vhash = meta collisions[khash_str][key] = (vhash, ver) return dict(collisions) def get_records_by_khash_raw(self, khash): if self.is_old(): return [] if not isinstance(khash, str): khash = "%016x" % khash return self.get("@@" + khash) def get_records_by_khash(self, khash_str): raw = self.get_records_by_khash_raw(khash_str) if raw: return parse_records(raw, False) else: return [] def start_gc(self, bucket='', start_fid=0, end_fid=None): """ bucket must be in 0 or 00 string """ if bucket: assert isinstance(bucket, basestring) and len(bucket) <= 2 t = telnetlib.Telnet(self.host, self.port) tree = '@%s' % bucket if end_fid is None: gc_cmd = 'gc {} {}\n'.format(tree, start_fid) else: gc_cmd = 'gc {} {} {}\n'.format(tree, start_fid, end_fid) t.write(gc_cmd) out = t.read_until('\n').strip('\r\n') assert out == 'OK' t.write('quit\n') t.close() def start_gc_all_buckets(self, db_depth): hex_digits = string.digits + 'abcdef' buckets_iter = itertools.product(*[hex_digits for _ in range(db_depth)]) buckets = [''.join(i) for i in buckets_iter] self.start_gc_buckets(buckets) def start_gc_buckets(self, buckets): for b in buckets: self.start_gc(bucket=b) while True: status = self.get_gc_status() if status.find('running') >= 0: continue elif status == 'success': print "bucket %s gc done" % b break elif status == 'fail': return self.fail("optimize_stat = fail") else: self.fail(status) def get_gc_status(self): return get_gc_status(self.host, self.port) def get_version(self, key): meta = self.get("?" + key) if meta: return int(meta.split()[0]) def item_count(self): s = self.stats() if s is None: return None return int(s['total_items']) def get_key_info_mem(self, key, khash64=None): ''' return (vhash, ver) or None''' if khash64 is None: khash64 = get_khash64(key) khash32_str = "@%08x" % (khash64 >> 32) _dir = self.get_dir(khash32_str) if self.is_old(): return _dir.get(key, None) else: return _dir.get("%016x" % khash64, None) def get_khash_info_mem(self, khash): ''' return [(key, (vhash, ver))], key is "" for v2.''' khash32 = "@%08x" % (khash >> 32) _dir = self.get_dir(khash32) ret = [] if self.is_old(): for k, (vhash, ver) in _dir.iteritems(): if get_khash64(k) == khash: ret.append((k, (vhash, ver))) else: for k, (vhash, ver) in _dir.iteritems(): if int(k, 16) == khash: return [("", (int(vhash), ver))] return ret def get_server_version(self): try: st = self.stats() if st: return st["version"] except IOError: logging.error("fail to get version %s", self) except KeyError: logging.error("fail to get version %s %s", self, st) def get_dir(self, path): ''' return dict case1: map dir(0-f) to (hash, count), like {'0/': (1471, 27784005), ... }, case2: map key(or khash) to (vhash, version), like {'3000000377e9c2ad': (22212, 1), ... }''' try: content = self.get(path) except IOError: content = '' return dir_to_dict(content) def list_dir(self, d): # FIXME: d should not need prefix @? '''list all KEY in the dir! not use it if dir is large!''' for path, (vhash, ver) in sorted(self.get_dir(d).items()): if path.endswith('/') and len(path) == 2: for v in self.list_dir(d + path[:-1]): yield v else: yield path, int(vhash), int(ver) def get_bucket_keys_count(self, bucket, depth=1): return get_bucket_keys_count(self, bucket, depth) def get_key_info_disk(self, key): '''return ver, vhash, flag, vsz, ts, fid, pos''' return get_key_info_disk(self, key) def prepare(self, data): return libmc.encode_value(data, self.mc.comp_threshold) def close(self): pass def test_new(addr, bucket): b = bucket c = DBClient(addr) print "stats:", c.stats() print 'version:', c.get_server_version() print "isold:", c.is_old() print "dir root:", c.get_dir("@") print "bucket key count:", c.get_bucket_keys_count(int(b)) print "item_count:", c.item_count() print "primary_buckets", get_primary_buckets(c) leaf = c.get_dir("@" + b + "000000") print "a dir leaf:", leaf khash_str = list(leaf)[0] print "a khash_str", khash_str r = c.get_records_by_khash(khash_str)[0] k = r[0] print "key, len(value), (flag, tstamp, ver):", k, r[1], r[3:] print "key info mem:", c.get_key_info_mem(k) print "key info disk(ver, vhash, flag, vsz, ts, fid, pos):", \ c.get_key_info_disk(k) print "key version:", c.get_version(k) print "collision_summary", c.get_collision_summary(int(b)) print "gc status:", c.get_gc_status() if __name__ == '__main__': test_new("rosa3a:7900", '3')
1.554688
2
TransactionBook/gui_kivy/generic/MultiSelectPopUp.py
LukHad/AccountBook
0
408
from kivy.uix.gridlayout import GridLayout from kivy.uix.label import Label from kivy.uix.textinput import TextInput from kivy.garden.matplotlib.backend_kivyagg import FigureCanvasKivyAgg from kivy.uix.anchorlayout import AnchorLayout from kivy.uix.boxlayout import BoxLayout from kivy.uix.button import Button import matplotlib.pyplot as plt import matplotlib import datetime from TransactionBook.model.Filter import Filter from datetime import datetime from kivy.uix.popup import Popup from kivy.properties import NumericProperty, ReferenceListProperty from kivy.uix.checkbox import CheckBox from kivy.core.window import Window class MultiSelectPopUp(Popup): pHint_x = NumericProperty(0.7) pHint_y = NumericProperty(0.7) pHint = ReferenceListProperty(pHint_x, pHint_y) def __init__(self, title, option_list, option_init=None, callback=None, multiselect=True, **kwargs): super().__init__(**kwargs) self.title = title self.callback = callback self.main_layout = AnchorLayout() if option_init is None: option_init = [True] * len(option_list) self.grid = GridLayout(cols=1) self.opt_boxes = [] self.labels = [] for i, opt in enumerate(option_list): box = BoxLayout(orientation='horizontal') check_box = CheckBox(active=option_init[i]) if not multiselect: check_box.group = "Single_Select_Only_Group" label = Label(text=str(opt)) self.opt_boxes.append(check_box) self.labels.append(label) box.add_widget(check_box) box.add_widget(label) self.grid.add_widget(box) cancel_button = Button(text="Cancel") cancel_button.bind(on_press=self.cancel_callback) ok_button = Button(text="Ok") ok_button.bind(on_press=self.ok_callback) box = BoxLayout(orientation='horizontal') box.add_widget(cancel_button) box.add_widget(ok_button) self.grid.add_widget(box) self.main_layout.add_widget(self.grid) self.content = self.main_layout self.size_hint = self.pHint Window.release_all_keyboards() self.open() def ok_callback(self, _): selection = [] for i, check_box in enumerate(self.opt_boxes): if check_box.active: selection.append(self.labels[i].text) self.callback(selection) self.dismiss() def cancel_callback(self, _): self.dismiss() if __name__ == "__main__": from kivy.base import runTouchApp def cb(list_of_selection): print(list_of_selection) c = MultiSelectPopUp(title="Test", option_list=["Item1", "Item2", "Item3"], callback=cb, option_init=[True, False, True]) runTouchApp(c)
1.8125
2
microbepy/plot/mutation_plot.py
ScienceStacks/MicrobEPy
1
416
"""Provides plots of mutations for Isolates and Lines.""" from microbepy.common import constants as cn from microbepy.common.dataframe_sorter import DataframeSorter from microbepy.common.isolate import Isolate from microbepy.common import util from microbepy.correlation import genome_correlation from microbepy.data.model_data_provider import ModelDataProvider from microbepy.data import util_data from microbepy.plot.mutation_cofraction import MutationCofraction from microbepy.plot.util_plot import PlotParms import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns COLORS = ['red', 'green', 'blue'] SPECIES = {cn.SPECIES_MIX_DVH: "DVH", cn.SPECIES_MIX_MMP: "MMP", None: "both"} FONTSIZE_TITLE = 16 FONTSIZE_LABEL = 8 MAX_LINES = 9 MIN_FRACTION = 0.25 THRESHOLD_FRAC = 0.2 MAX_SIGLVL = 0.01 COLORBAR_MIN = 1.0 COLORBAR_MAX = 4.0 class MutationLinePlot(object): """ Plot mutations by occurrences within Lines. """ def __init__(self, mutation_column=cn.GGENE_ID, species=None, is_plot=True): """ :param str mutation_column: :param bool is_plot: """ self._mutation_column = mutation_column self._is_plot = is_plot self._species = species self.cofraction = MutationCofraction(species=self._species, mutation_column=mutation_column) def plotTransfers(self, parms=PlotParms(is_initialize=False), is_unit_fraction = False, is_cluster_mutations=True): """ Does a stacked bar plot of mutation frequency for all transfers. :param bool is_unit_fraction: round fraction to 1 :param bool is_cluster_mutations: Group similar mutations together :return pd.DataFrame: row=mutation, col=line + transfer, value is fraction """ permitted_mutations = self.cofraction.ordered_mutations transfers = self.cofraction.transfers num_transfers = len(transfers) fig, axes = plt.subplots(nrows=num_transfers, ncols=1) dfs = [] for idx, transfer in enumerate(transfers): parms[cn.PLT_YTICKLABELS] = True if self._species is None: parms[cn.PLT_TITLE] = "%d" % transfer else: parms[cn.PLT_TITLE] = "%s, %d" % (self._species, transfer) if idx == 0: parms[cn.PLT_YLABEL] = True else: parms[cn.PLT_YLABEL] = False if idx < num_transfers - 1: parms[cn.PLT_LEGEND] = False parms[cn.PLT_XLABEL] = False parms[cn.PLT_XTICKLABELS] = False else: parms[cn.PLT_LEGEND] = True parms[cn.PLT_XLABEL] = True parms[cn.PLT_XTICKLABELS] = True df = self.plotLine(transfer, parms=parms, is_plot=False, ax=axes[idx], permitted_mutations=permitted_mutations, is_unit_fraction=is_unit_fraction) df[cn.TRANSFER] = transfer dfs.append(df) if self._is_plot: plt.show() return pd.concat(dfs) def plotLine(self, transfer, parms=PlotParms(is_initialize=False), is_unit_fraction=False, is_plot=None, ax=None, permitted_mutations=None): """ Does a stacked bar plot of mutation frequency by line with colors :params int transfer: :params PlotParms parms: :params Axis ax: axis to use in plot :param list-str permitted_mutations: to use and how they are ordered if None, then use alphabetical order :param bool is_unit_fraction: round non-zero fraction to 1 :return pd.DataFrame: row=mutation, col=line, value is fraction """ if is_plot is None: is_plot = self._is_plot parms.setTrueIfAbsent(cn.PLT_XLABEL) parms.setTrueIfAbsent(cn.PLT_XTICKLABELS) # df_plot = self.cofraction.makeLineDF( permitted_mutations=permitted_mutations, transfer=transfer) if is_unit_fraction: df_plot = df_plot.applymap( lambda v: 1 if v> MIN_FRACTION else v) # Do the plot if not cn.PLT_FIGSIZE in parms: parms[cn.PLT_FIGSIZE] = (12, 8) if ax is None: ax = df_plot.plot(kind='bar', stacked=True, figsize=parms[cn.PLT_FIGSIZE], legend=None) else: df_plot.plot(kind='bar', stacked=True, legend=None, ax=ax, figsize=parms[cn.PLT_FIGSIZE]) ax.set_xlabel("", fontsize=FONTSIZE_LABEL) # Eliminate implicit label if parms.isFalse(cn.PLT_XTICKLABELS): labels = ax.get_xticklabels() new_labels = np.repeat("", len(labels)) ax.set_xticklabels(new_labels) if parms.isFalse(cn.PLT_YTICKLABELS): labels = ax.get_yticklabels() new_labels = np.repeat("", len(labels)) ax.set_yticklabels(new_labels) if cn.PLT_TITLE in parms: title = parms[cn.PLT_TITLE] else: title = "%s Mutations" % SPECIES[self._species] xpos = int(len(df_plot)*0.5) ypos = MAX_LINES - 3 ax.text(xpos, ypos, title, fontsize=FONTSIZE_TITLE) ax.set_ylim([0, MAX_LINES]) if parms.isTrue(cn.PLT_YLABEL): if is_unit_fraction: label = "No. Lines" else: label = "Fraction" ax.set_ylabel(label , fontsize=FONTSIZE_LABEL) if parms.isTrue(cn.PLT_XLABEL): ax.set_xlabel(self._mutation_column, fontsize=FONTSIZE_LABEL) if parms.isTrue(cn.PLT_LEGEND): ax.legend(loc=(1,2)) #ax.legend() if is_plot: plt.show() return df_plot def _makeMutationSiglvlMatrix(self, transfer=cn.TRANSFER_DEFAULT, other_transfer=None, min_fraction=MIN_FRACTION): """ Creates a significance level matrix for mutations. :param int transfer: transfer time for row mutations :param int other_transfer: transfer time for column mutations :param float min_fraction: minimum fractional occurrence of a mutation within a line for it to be considered :return pd.DataFrame: row index and columns are mutations """ def makeDF(transfer): df_line = self.cofraction.makeLineDF(transfer=transfer) df_binary = df_line.applymap( lambda v: 0 if np.isnan(v) else v) df_binary = df_line.applymap( lambda v: 1.0 if v > min_fraction else 0) return df_binary.transpose() # if other_transfer is None: other_transfer = transfer # df_binary_rows = makeDF(transfer) df_binary_columns = makeDF(other_transfer) df_matrix = genome_correlation.makeSiglvlDF(df_binary_rows, df_other=df_binary_columns) return df_matrix def _plotSiglvlDF(self, transfer=cn.TRANSFER_DEFAULT, other_transfer=None, max_siglvl=MAX_SIGLVL): """ Constructs a the dataframe used for heatmap. :param int transfer: :param float max_siglvl: :return pd.DataFrame: mutations, mutations, values are -log10 significance level """ df_matrix = self._makeMutationSiglvlMatrix(transfer=transfer, other_transfer=other_transfer) sorter = DataframeSorter(df_matrix) df_sort = sorter.orderBoth() # df_transformed = df_sort.applymap(lambda v: np.log10(v)) df_transformed = df_transformed.applymap(lambda v: -v) ubound = -np.log10(max_siglvl) df_plot = df_transformed.applymap( lambda v: np.nan if v < ubound else v) sorter = DataframeSorter(df_plot) df_plot = sorter.deleteNanRowsAndColumns() return df_plot def plotCofractions(self, is_time_lag=False, threshold_frac=THRESHOLD_FRAC, is_difference_frac=False, is_differenced=False, is_compress=False, parms=PlotParms(), **kwargs): """ Does a subplots of the fraction of lines in which mutations co-occur. :param bool is_time_lag: construct time lag subplots :param bool is_differenced: Computes the difference in count fractions :param dict kwargs: non-transfer parameters passed to next level :return dict: key is pair of transfers, value is data_frame """ def funcDF(transfer, other_transfer): if is_differenced: df = self.cofraction.makeCofractionDifferencedDF( transfer=transfer, other_transfer=other_transfer, threshold_frac=threshold_frac) else: df = self.cofraction.makeCofractionDF(transfer=transfer, is_difference_frac=is_difference_frac, other_transfer=other_transfer) if is_compress: df.dropna(axis=0, how='all', inplace=True) df.dropna(axis=1, how='all', inplace=True) return df # return self._plotTransfers(funcDF, is_time_lag, parms=parms, heat_range=[0, 1.0], **kwargs) def plotSiglvls(self, is_time_lag=False, max_siglvl=MAX_SIGLVL, parms=PlotParms(), **kwargs): """ Does a subplots of mutation correlation significance levels. :param bool is_time_lag: construct time lag subplots :param dict kwargs: non-transfer parameters passed to next level :return dict: key is pair of transfers, value is data_frame """ def funcDF(transfer, other_transfer): return self._plotSiglvlDF(transfer=transfer, max_siglvl=max_siglvl, other_transfer=other_transfer) # return self._plotTransfers(funcDF, is_time_lag, parms=parms, heat_range = [COLORBAR_MIN, COLORBAR_MAX], **kwargs) def _plotTransfers(self, funcDF, is_time_lag, parms=PlotParms(), **kwargs): """ Does a subplots of mutation mutations over transfers. :param Function funcDF: has kwargs transfer, other_transfer; returns a dataframe of mutations as columns and index; values are used in the heatmap. :param bool is_time_lag: construct time lag subplots :param dict kwargs: non-transfer parameters passed to next level :return dict: key is pair of transfers, value is data_frame """ NCOLS = 3 plot_pos = {1:1, 2:3, 3:4, 4:6} NPLOTS = 6 transfers = self.cofraction.transfers if is_time_lag: pairs = [p for p in zip(transfers[:-1], transfers[1:])] else: pairs = [p for p in zip(transfers[:-1], transfers[:-1])] # # Calculate the column order df = funcDF(transfer=cn.TRANSFER_1000G, other_transfer=cn.TRANSFER_1000G) df = df.fillna(0) # Set up for plots nrows = 2 if (len(pairs) == 4) else 3 fig = plt.figure(figsize=parms[cn.PLT_FIGSIZE]) result = {} for idx, pair in enumerate(pairs): idx += 1 ax = fig.add_subplot(nrows, NCOLS, plot_pos[idx]) if idx < len(pairs): is_plot = False else: is_plot = True if idx in [1, 2, 5]: parms[cn.PLT_XAXISTICKTOP] = True else: parms[cn.PLT_XAXISTICKTOP] = False if idx == 4: parms[cn.PLT_COLORBAR] = True else: parms[cn.PLT_COLORBAR] = False transfer = pair[0] other_transfer = pair[1] df = funcDF(transfer=transfer, other_transfer=other_transfer) df = df.applymap(lambda v: np.nan if v == 0 else v) self._plotTransferCompare(df, transfer=transfer, other_transfer=other_transfer, ordered_columns=self.cofraction.ordered_mutations, is_center_colorbar=True, fig=fig, ax=ax, parms=parms, is_plot=is_plot, **kwargs) result[pair] = df return result def plotSiglvl(self, max_siglvl=MAX_SIGLVL, transfer=cn.TRANSFER_DEFAULT, other_transfer=None, is_center_colorbar = True, **kwargs): """ Constructs a heatmap of the mutation coocurrence significance levels. :param float max_siglvl: maximum significance level :return pd.DataFrame: columns, rows are mutations """ df_plot = self._plotSiglvlDF(transfer=transfer, other_transfer=other_transfer, max_siglvl=max_siglvl) self._plotTransferCompare(df_plot, heat_range = [COLORBAR_MIN, COLORBAR_MAX], ordered_mutations=self.cofraction.ordered_mutations, transfer=transfer, other_transfer=other_transfer, is_center_colorbar=is_center_colorbar, **kwargs) return df_plot def plotCofraction(self, threshold_frac=THRESHOLD_FRAC, transfer=cn.TRANSFER_DEFAULT, other_transfer=None, is_difference_frac=False, is_differenced=False, is_center_colorbar=True, is_compress=False, parms=PlotParms(), **kwargs): """ Constructs a heatmap of the mutation coocurrence fractions. :param int transfer: Transfer for which plot is done :param bool is_differenced: Computes the difference in count fractions :param bool is_compress: Eliminate rows/columns with 0 values :return pd.DataFrame: columns, rows are mutations """ if is_differenced: df = self.cofraction.makeCofractionDifferencedDF( threshold_frac=threshold_frac, transfer=transfer, other_transfer=other_transfer, **kwargs) df = df.applymap(lambda v: np.nan if np.abs(v) < threshold_frac else v) else: df = self.cofraction.makeCofractionDF(transfer=transfer, is_difference_frac=is_difference_frac, other_transfer=other_transfer, **kwargs) df = df.applymap(lambda v: np.nan if v < threshold_frac else v) if is_compress: df.dropna(axis=0, how='all', inplace=True) df.dropna(axis=1, how='all', inplace=True) is_include_missing_mutations = False else: is_include_missing_mutations = True ordered_columns = self.cofraction.ordered_mutations self._plotTransferCompare(df, heat_range=[0, 1.0], ordered_columns=ordered_columns, parms=parms, transfer=transfer, other_transfer=other_transfer, is_center_colorbar=is_center_colorbar, is_include_missing_mutations=is_include_missing_mutations, **kwargs) return df def _plotTransferCompare(self, df_plot, heat_range, ordered_columns=None, is_center_colorbar=True, transfer=cn.TRANSFER_DEFAULT, other_transfer=None, ax=None, fig=None, is_include_missing_mutations=True, parms=PlotParms(), is_plot=None): """ Constructs a heatmap comparing values for mutations from two transfers. :param pd.DataFrame df_plot: index and columns are mutations; values are plotted on the heatmap :param list-str ordered_columns: order in which columns appear :param bool is_center_colorbar: center the colorbar in the plot :param float, float: values on the heatmap range :param int transfer: :param int other_transfer: Allow comparisons across time :param Matplotlib.Axes ax: :param PlotParms parms: Parameters for the plot :param bool is_plot: Overrides constructor plotting directive :param bool is_include_missing_mutations: """ def makeLabel(transfer, column, is_include_column=False): if is_include_column: label = "%d-%s" % (transfer, column) else: label = "%d" % transfer return label def setValue(a_dict, key, default): if not key in a_dict.keys(): a_dict[key] = default # if is_plot is None: is_plot = self._is_plot elif not self._is_plot: is_plot = self._is_plot if ordered_columns is None: ordered_columns = list(set(df_plot.columns.tolist()).union( df_plot.index)) # Do the plot if not cn.PLT_COLORBAR in parms: parms[cn.PLT_COLORBAR] = True if other_transfer is None: other_transfer = transfer if ax is None: if fig is None: fig = plt.figure(figsize=parms[cn.PLT_FIGSIZE]) ax = fig.add_subplot(1, 1, 1) # Order the columns if is_include_missing_mutations: columns = df_plot.columns.tolist() missing_columns = set(ordered_columns).difference(columns) extended_ordered_columns = list(ordered_columns) extended_ordered_columns.extend( set(columns).difference(ordered_columns)) for col in missing_columns: df_plot[col] = np.nan df_plot.loc[col, :] = np.nan df_plot = df_plot.reindex(extended_ordered_columns) df_plot = df_plot[extended_ordered_columns] rows = df_plot.columns.tolist() columns = df_plot.columns.tolist() else: extended_ordered_columns = ordered_columns rows = df_plot.index.tolist() columns = df_plot.columns.tolist() mutations = df_plot.columns.tolist() # Set up plot information parms[cn.PLT_XLABEL] = "" setValue(parms, cn.PLT_COLORBAR, True) xpos = 1.05*len(columns) ypos = -0.05*len(rows) parms[cn.PLT_XLABEL] = "" xlabel = makeLabel(other_transfer, self._mutation_column) parms[cn.PLT_YLABEL] = makeLabel( transfer, self._mutation_column) ax.text(xpos, ypos, xlabel, fontsize=parms.fontsize_label) # # Construct the plot plot = ax.pcolor(df_plot, cmap='jet', vmin=heat_range[0], vmax=heat_range[1]) if parms.isTrue(cn.PLT_COLORBAR): if is_center_colorbar: # Colorbar positions: left, bottom, width, height cbaxes = fig.add_axes([.45, 0.2, 0.01, 0.5]) cb = fig.colorbar(plot, cax = cbaxes, cmap='jet') cb.ax.tick_params(labelsize=parms.fontsize_label) else: cb = fig.colorbar(plot, cmap='jet') cb.ax.tick_params(labelsize=parms.fontsize_label) row_labels = df_plot.columns.tolist() col_labels = df_plot.index.tolist() if parms.isTrue(cn.PLT_XAXISTICKTOP): ax.xaxis.tick_top() ax.set_xticks(np.arange(0.5, len(row_labels))) ax.set_xticklabels(row_labels, rotation=90, fontsize=parms.fontsize_label) ax.set_yticks(np.arange(0.5, len(col_labels))) ax.set_yticklabels(col_labels, fontsize=parms.fontsize_label) #parms[cn.PLT_YLABEL] = "" parms.do(is_plot=False) if is_plot: parms[cn.PLT_YLABEL] = "" parms.do(is_plot=False) ylabel = makeLabel(transfer, self._mutation_column) xpos = -3 ypos = 0.5*len(rows) ypos = -1 ax.set_ylabel(ylabel, fontsize=parms.fontsize_label, x=xpos, y=ypos) #plt.show() parms.do(is_plot=is_plot) else: parms.do(is_plot=is_plot)
2.34375
2
machine.py
yukti07/Dell_Hire_hack
0
448
import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score from flask import flash import numpy as np def check(X, clf): # print("TTTTTTTTTTTTThis is XXXXXXXXXXXX") # print(X) X = np.array(X) labelencoder_X_1 = LabelEncoder() X[:, 1] = labelencoder_X_1.fit_transform(X[:, 1]) labelencoder_X_2 = LabelEncoder() X[:, 2] = labelencoder_X_2.fit_transform(X[:, 2]) labelencoder_X_5 = LabelEncoder() X[:, 5] = labelencoder_X_5.fit_transform(X[:, 5]) labelencoder_X_6 = LabelEncoder() X[:, 6] = labelencoder_X_6.fit_transform(X[:, 6]) labelencoder_X_7 = LabelEncoder() X[:, 7] = labelencoder_X_7.fit_transform(X[:, 7]) labelencoder_X_9 = LabelEncoder() X[:, 9] = labelencoder_X_9.fit_transform(X[:, 9]) labelencoder_X_12 = LabelEncoder() X[:, 12] = labelencoder_X_12.fit_transform(X[:, 12]) p = clf.predict(X) t = () for x in p: if x == 0: a = 'No' else: a = 'Yes' t = t+(a,) return t def analyze(df, clf): feature_importances = pd.DataFrame(clf.feature_importances_, index=['Age', 'BusinessTravel', 'Department', 'DistanceFromHome', 'Education', 'EducationField', 'Gender', 'JobRole', 'JobSatisfaction', 'MaritalStatus', 'MonthlyIncome', 'NumCompaniesWorked', 'OverTime', 'PercentSalaryHike', 'YearsInCurrentRole', 'YearsSinceLastPromotion'],columns=['importance']).sort_values('importance',ascending=False) feature_importances['x1'] = feature_importances.index ax = feature_importances.plot.bar(x='x1', y='importance', rot=90) plt.savefig('templates/graphs/raw/feature_importances.png', frameon=True) intervals = [x for x in range(0, 22000, 2000)] categories = ['<'+str(x) for x in range(2000, 22000, 2000)] df1 = df df1['Income_Categories'] = pd.cut(df.MonthlyIncome, intervals, labels=categories) ax = sns.countplot(x="Income_Categories", hue="Attrition", palette="Set1", data=df1) ax.set(title="Monthly Income vs Attrition", xlabel="Income group", ylabel="Total") plt.xticks(rotation=-30) plt.savefig('templates/graphs/raw/MIvsAttr.png') intervals = [x for x in range(18,63,3)] categories = ['<'+str(x) for x in range(21,63,3)] df1 = df df1['Age_Categories'] = pd.cut(df.Age, intervals, labels=categories) ax = sns.countplot(x="Age_Categories", hue="Attrition", palette="Set1", data=df1) ax.set(title="Age vs Attrition", xlabel="Age group", ylabel="Total") plt.xticks(rotation=-30) plt.savefig('templates/graphs/raw/AgevsAttr.png') intervals = [x for x in range(0,32,2)] categories = ['<'+str(x) for x in range(2,32,2)] df1 = df df1['Distance_from_home'] = pd.cut(df.DistanceFromHome, intervals, labels=categories) ax = sns.countplot(x="Distance_from_home", hue="Attrition", palette="Set1", data=df1) ax.set(title="Distance from home vs Attrition", xlabel="Distance", ylabel="Total") plt.xticks(rotation=-30) plt.savefig('templates/graphs/raw/DistanceFromHomevsAttr.png') ax = sns.countplot(x="PercentSalaryHike", hue="Attrition", palette="Set1", data=df1) ax.set(title="Salary Hike Percentage vs Attrition", xlabel="Salary Hike Percentage", ylabel="Total") plt.savefig('templates/graphs/raw/PercentSalaryHikevsAttr.png') ax = sns.countplot(x="NumCompaniesWorked", hue="Attrition", palette="Set1", data=df1) ax.set(title="Number Of Previously Worked Companies vs Attrition", xlabel="Number Of Previously Worked Companies", ylabel="Total") plt.savefig('templates/graphs/raw/NPWCvsAttr.png') intervals = [x for x in range(0,22,2)] categories = ['<'+str(x) for x in range(2,22,2)] df1 = df df1['Current_Role'] = pd.cut(df.YearsInCurrentRole, intervals, labels=categories) ax = sns.countplot(x="Current_Role", hue="Attrition", palette="Set1", data=df1) ax.set(title="Number Of Years in Current Role vs Attrition", xlabel="Number Of Years in Current Role", ylabel="Total") plt.xticks(rotation=-30) plt.savefig('templates/graphs/raw/YICRvsAttr.png') ax = sns.countplot(x="OverTime", hue="Attrition", palette="Set1", data=df1) ax.set(title="Over Time vs Attrition", xlabel="Over Time", ylabel="Total") plt.savefig('templates/graphs/raw/OverTimevsAttr.png') ax = sns.countplot(x="JobRole", hue="Attrition", palette="Set1", data=df1) ax.set(title="Job Role vs Attrition", xlabel="Job Role", ylabel="Total") plt.xticks(rotation=70) plt.savefig('templates/graphs/raw/JobRolevsAttr.png') intervals = [x for x in range(0,18,2)] categories = ['<'+str(x) for x in range(2,18,2)] df1 = df df1['Promotion'] = pd.cut(df.YearsSinceLastPromotion, intervals, labels=categories) ax = sns.countplot(x="Promotion", hue="Attrition", palette="Set1", data=df1) ax.set(title="Number of Years since Promotion vs Attrition", xlabel="Number of Years since Promotion", ylabel="Total") plt.xticks(rotation=-30) plt.savefig('templates/graphs/raw/YSCPvsAttr.png') ax = sns.countplot(x="MaritalStatus", hue="Attrition", palette="Set1", data=df1) ax.set(title="Marital Status vs Attrition", xlabel="Marital Status", ylabel="Total") plt.savefig('templates/graphs/raw/MSvsAttr.png') def run(data): df = pd.read_csv('original_dataset.csv') skills = df['Skills'].tolist() # print("SKKKKKKKKKKKKKKKILLLLLLLLLLLLLLLS") # print(skills) df = df.drop(['DailyRate', 'EmployeeCount', 'YearsAtCompany', 'TotalWorkingYears', 'JobLevel', 'HourlyRate', 'MonthlyRate', 'Over18', 'StandardHours', 'EnvironmentSatisfaction', 'JobInvolvement', 'PerformanceRating', 'TrainingTimesLastYear', 'RelationshipSatisfaction', 'StockOptionLevel', 'WorkLifeBalance', 'YearsWithCurrManager'], axis=1) df = df[['Attrition', 'Age', 'BusinessTravel', 'Department', 'DistanceFromHome', 'Education', 'EducationField', 'Gender', 'JobRole', 'JobSatisfaction', 'MaritalStatus', 'MonthlyIncome', 'NumCompaniesWorked', 'OverTime', 'PercentSalaryHike', 'YearsInCurrentRole', 'YearsSinceLastPromotion']] #print("These re SKILSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS") #print(skills) X = df.iloc[:, 1:].values y = df.iloc[:, 0].values labelencoder_X_1 = LabelEncoder() X[:, 1] = labelencoder_X_1.fit_transform(X[:, 1]) labelencoder_X_2 = LabelEncoder() X[:, 2] = labelencoder_X_2.fit_transform(X[:, 2]) labelencoder_X_5 = LabelEncoder() X[:, 5] = labelencoder_X_5.fit_transform(X[:, 5]) labelencoder_X_6 = LabelEncoder() X[:, 6] = labelencoder_X_6.fit_transform(X[:, 6]) labelencoder_X_7 = LabelEncoder() X[:, 7] = labelencoder_X_7.fit_transform(X[:, 7]) labelencoder_X_9 = LabelEncoder() X[:, 9] = labelencoder_X_9.fit_transform(X[:, 9]) labelencoder_X_12 = LabelEncoder() X[:, 12] = labelencoder_X_12.fit_transform(X[:, 12]) X = X.astype(float) labelencoder_y = LabelEncoder() y = labelencoder_y.fit_transform(y) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.40,random_state=0) clf = RandomForestClassifier(n_estimators=200) clf.fit(X_train,y_train) p = clf.predict(X_test) acc = accuracy_score(y_test,p)*100 flash(acc) X = [list(elem) for elem in data] [r.pop(0) for r in X] #print("####### THIS IS XXXX##########") #print(X) att = check(X, clf) skills = skills[:(len(att)):] print("LLLLLLLLLLLLLLLENGHT" + str(len(att)) +" " + str(len(skills))) i = 0 for row in att: X[i].insert(0, row) i = i+1 df1 = pd.DataFrame(X) df1.columns=['Attrition', 'Age', 'BusinessTravel', 'Department', 'DistanceFromHome', 'Education', 'EducationField', 'Gender', 'JobRole', 'JobSatisfaction', 'MaritalStatus', 'MonthlyIncome', 'NumCompaniesWorked', 'OverTime', 'PercentSalaryHike', 'YearsInCurrentRole', 'YearsSinceLastPromotion'] analyze(df, clf) df1.to_csv('dataset1.csv') return att, skills
2.0625
2
scripts/get_file_name_as_variable.py
amin-henteti/airflow-dags
0
472
import inspect def foo(): print(inspect.stack()[0][3]) foo()
0.917969
1
examples/cmrc2018_example/main.trainer.py
fangd123/TextBrewer
1,121
480
import logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%Y/%m/%d %H:%M:%S', level=logging.INFO, ) logger = logging.getLogger("Main") import os,random import numpy as np import torch from processing import convert_examples_to_features, read_squad_examples from processing import ChineseFullTokenizer from pytorch_pretrained_bert.my_modeling import BertConfig from optimization import BERTAdam import config from utils import read_and_convert, divide_parameters from modeling import BertForQASimple, BertForQASimpleAdaptorTraining from textbrewer import DistillationConfig, TrainingConfig, BasicTrainer from torch.utils.data import TensorDataset, DataLoader, RandomSampler from functools import partial from train_eval import predict def args_check(args): if os.path.exists(args.output_dir) and os.listdir(args.output_dir): logger.warning("Output directory () already exists and is not empty.") if args.gradient_accumulation_steps < 1: raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format( args.gradient_accumulation_steps)) if not args.do_train and not args.do_predict: raise ValueError("At least one of `do_train` or `do_predict` must be True.") if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") n_gpu = torch.cuda.device_count() if not args.no_cuda else 0 else: device = torch.device("cuda", args.local_rank) n_gpu = 1 torch.distributed.init_process_group(backend='nccl') logger.info("device %s n_gpu %d distributed training %r", device, n_gpu, bool(args.local_rank != -1)) args.n_gpu = n_gpu args.device = device return device, n_gpu def main(): #parse arguments config.parse() args = config.args for k,v in vars(args).items(): logger.info(f"{k}:{v}") #set seeds torch.manual_seed(args.random_seed) torch.cuda.manual_seed_all(args.random_seed) np.random.seed(args.random_seed) random.seed(args.random_seed) #arguments check device, n_gpu = args_check(args) os.makedirs(args.output_dir, exist_ok=True) forward_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps) args.forward_batch_size = forward_batch_size #load bert config bert_config_S = BertConfig.from_json_file(args.bert_config_file_S) assert args.max_seq_length <= bert_config_S.max_position_embeddings #read data train_examples = None train_features = None eval_examples = None eval_features = None num_train_steps = None tokenizer = ChineseFullTokenizer(vocab_file=args.vocab_file, do_lower_case=args.do_lower_case) convert_fn = partial(convert_examples_to_features, tokenizer=tokenizer, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length) if args.do_train: train_examples,train_features = read_and_convert(args.train_file,is_training=True, do_lower_case=args.do_lower_case, read_fn=read_squad_examples,convert_fn=convert_fn) if args.fake_file_1: fake_examples1,fake_features1 = read_and_convert(args.fake_file_1,is_training=True, do_lower_case=args.do_lower_case, read_fn=read_squad_examples,convert_fn=convert_fn) train_examples += fake_examples1 train_features += fake_features1 if args.fake_file_2: fake_examples2, fake_features2 = read_and_convert(args.fake_file_2,is_training=True, do_lower_case=args.do_lower_case, read_fn=read_squad_examples,convert_fn=convert_fn) train_examples += fake_examples2 train_features += fake_features2 num_train_steps = int(len(train_features)/args.train_batch_size) * args.num_train_epochs if args.do_predict: eval_examples,eval_features = read_and_convert(args.predict_file,is_training=False, do_lower_case=args.do_lower_case, read_fn=read_squad_examples,convert_fn=convert_fn) #Build Model and load checkpoint model_S = BertForQASimple(bert_config_S,args) #Load student if args.load_model_type=='bert': assert args.init_checkpoint_S is not None state_dict_S = torch.load(args.init_checkpoint_S, map_location='cpu') state_weight = {k[5:]:v for k,v in state_dict_S.items() if k.startswith('bert.')} missing_keys,_ = model_S.bert.load_state_dict(state_weight,strict=False) assert len(missing_keys)==0 elif args.load_model_type=='all': assert args.tuned_checkpoint_S is not None state_dict_S = torch.load(args.tuned_checkpoint_S,map_location='cpu') model_S.load_state_dict(state_dict_S) else: logger.info("Model is randomly initialized.") model_S.to(device) if args.local_rank != -1 or n_gpu > 1: if args.local_rank != -1: raise NotImplementedError elif n_gpu > 1: model_S = torch.nn.DataParallel(model_S) #,output_device=n_gpu-1) if args.do_train: #parameters params = list(model_S.named_parameters()) all_trainable_params = divide_parameters(params, lr=args.learning_rate) logger.info("Length of all_trainable_params: %d", len(all_trainable_params)) optimizer = BERTAdam(all_trainable_params,lr=args.learning_rate, warmup=args.warmup_proportion,t_total=num_train_steps,schedule=args.schedule, s_opt1=args.s_opt1, s_opt2=args.s_opt2, s_opt3=args.s_opt3) logger.info("***** Running training *****") logger.info(" Num orig examples = %d", len(train_examples)) logger.info(" Num split examples = %d", len(train_features)) logger.info(" Forward batch size = %d", forward_batch_size) logger.info(" Num backward steps = %d", num_train_steps) ########### DISTILLATION ########### train_config = TrainingConfig( gradient_accumulation_steps = args.gradient_accumulation_steps, ckpt_frequency = args.ckpt_frequency, log_dir = args.output_dir, output_dir = args.output_dir, device = args.device) distiller = BasicTrainer(train_config = train_config, model = model_S, adaptor = BertForQASimpleAdaptorTraining) all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long) all_doc_mask = torch.tensor([f.doc_mask for f in train_features], dtype=torch.float) all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long) all_start_positions = torch.tensor([f.start_position for f in train_features], dtype=torch.long) all_end_positions = torch.tensor([f.end_position for f in train_features], dtype=torch.long) train_dataset = TensorDataset(all_input_ids, all_segment_ids, all_input_mask, all_doc_mask, all_start_positions, all_end_positions) if args.local_rank == -1: train_sampler = RandomSampler(train_dataset) else: raise NotImplementedError train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.forward_batch_size,drop_last=True) callback_func = partial(predict, eval_examples=eval_examples, eval_features=eval_features, args=args) with distiller: distiller.train(optimizer, scheduler=None, dataloader=train_dataloader, num_epochs=args.num_train_epochs, callback=callback_func) if not args.do_train and args.do_predict: res = predict(model_S,eval_examples,eval_features,step=0,args=args) print (res) if __name__ == "__main__": main()
1.765625
2
tests/test_models/test_components/test_discriminators/test_light_cnn.py
ChenShuwei1001/mmediting
0
488
import pytest import torch from mmedit.models.builder import build_component from mmedit.models.components.discriminators.light_cnn import MaxFeature def test_max_feature(): # cpu conv2d = MaxFeature(16, 16, filter_type='conv2d') x1 = torch.rand(3, 16, 16, 16) y1 = conv2d(x1) assert y1.shape == (3, 16, 16, 16) linear = MaxFeature(16, 16, filter_type='linear') x2 = torch.rand(3, 16) y2 = linear(x2) assert y2.shape == (3, 16) # gpu if torch.cuda.is_available(): x1 = x1.cuda() x2 = x2.cuda() conv2d = conv2d.cuda() linear = linear.cuda() y1 = conv2d(x1) assert y1.shape == (3, 16, 16, 16) y2 = linear(x2) assert y2.shape == (3, 16) # filter_type should be conv2d or linear with pytest.raises(ValueError): MaxFeature(12, 12, filter_type='conv1d') def test_light_cnn(): cfg = dict(type='LightCNN', in_channels=3) net = build_component(cfg) net.init_weights(pretrained=None) # cpu inputs = torch.rand((2, 3, 128, 128)) output = net(inputs) assert output.shape == (2, 1) # gpu if torch.cuda.is_available(): net.init_weights(pretrained=None) net = net.cuda() output = net(inputs.cuda()) assert output.shape == (2, 1) # pretrained should be str or None with pytest.raises(TypeError): net.init_weights(pretrained=[1])
1.492188
1
basic_stats.py/basic_stats.py
RahmB/basic_stats
0
520
# Import the matplotlib module here. No other modules should be used. # Import plotting library import matplotlib.pyplot as plt #import.... from os import * # Import Numpy import numpy as np def mean(my_list): # This is the defintion in the head. i = 0 my_sum = 0 for number in my_list: my_sum = my_sum + my_list[i] i+=1 mu = my_sum / i print('mean = ' + str(mu)) return mu def sd(my_list): j = 0 sigma = 0 my_sumsd = 0 mu = mean(my_list) for number in my_list: my_sumsd = my_sumsd + (my_list[j] - mu)**2 j +=1 sigma = (my_sumsd/j)**(.5) print('standard deviation = ' + str(sigma)) return sigma def norm(my_list): k = 0 l = 0 mu = mean(my_list) sigma = sd(my_list) for number in my_list: if abs(my_list[l] - mu) < sigma: k += 1 l += 1 else: l += 1 dist = k / l return dist def is_norm(my_list): dist = norm(my_list) if 0.66 < dist < 0.70: print('Data is normally distributed') return True else: print('Data is not normally distributed') return False def is_skew(my_list): m = 0 skew = 0 sumsk = 0 mu = mean(my_list) sigma = sd(my_list) for numbers in my_list: sumsk = (my_list[m] - mu)**3 + sumsk m +=1 skew = sumsk /(len(my_list)*sigma**3) print('skewness = ' + str(skew)) if skew == 0: print('skewness = 0, therefore sample is normally distributed') else: print('skewness =/= 0, therefore sample is not normally distributed') def graph(my_list): plt.hist(my_list,density=True, facecolor='b') sigma = sd(my_list) #stores standard deviation mu = mean(my_list) #stores mean plt.title('my_list Histogram') plt.xlabel('Number') plt.ylabel('Probability') plt.xlim(mu - 4*sigma, mu + 4*sigma) plt.grid(True) plt.show() def stats(my_list): mu = mean(my_list) std = sd(my_list) dist = norm(my_list) graph(my_list) is_norm(my_list) is_skew(my_list) return (mu, std, dist)
3.03125
3
starry/_core/ops/lib/include/oblate/tests/test_derivs.py
rodluger/starry
116
560
import oblate import numpy as np import pytest # TODO!
0.445313
0
process_script/stat.py
vitorebatista/AVEMH
2
616
import numpy as np import pandas as pd import sys markets = ["hangseng", "dax", "ftse", "sp", "nikkei"] market = markets[int(sys.argv[1])-1] # read GD data file dat = pd.read_csv("./num_res/{}.GD.csv".format(market)) # split into two experiments exp1_GD = dat[dat.columns[:5]] exp2_GD = dat[dat.columns[5:]] # calculate statistics stat1_GD = pd.DataFrame([exp1_GD.min(), exp1_GD.median(), exp1_GD.std()]) stat1_GD.index = ["Best", "Median", "Std."] stat2_GD = pd.DataFrame([exp2_GD.min(), exp2_GD.median(), exp2_GD.std()]) stat2_GD.index = ["Best", "Median", "Std."] # find best and second best algorithm meds1_GD = stat1_GD.loc["Median"].sort_values() best1_GD = list(meds1_GD.index[:2]) meds2_GD = stat2_GD.loc["Median"].sort_values() best2_GD = list(meds2_GD.index[:2]) print("{}.GD:".format(market), best1_GD[0], best1_GD[1]) # print("{}.GD:".format(market), best2_GD[0], best2_GD[1]) # TODO: check error # read Spacing data file dat = pd.read_csv("./num_res/{}.Spacing.csv".format(market)) # split into two experiments exp1_Spacing = dat[dat.columns[:5]] exp2_Spacing = dat[dat.columns[5:]] # calculate statistics stat1_Spacing = pd.DataFrame( [exp1_Spacing.min(), exp1_Spacing.median(), exp1_Spacing.std()]) stat1_Spacing.index = ["Best", "Median", "Std."] stat2_Spacing = pd.DataFrame( [exp2_Spacing.min(), exp2_Spacing.median(), exp2_Spacing.std()]) stat2_Spacing.index = ["Best", "Median", "Std."] # find best and second best algorithm meds1_Spacing = stat1_Spacing.loc["Median"].sort_values() best1_Spacing = list(meds1_Spacing.index[:2]) meds2_Spacing = stat2_Spacing.loc["Median"].sort_values() best2_Spacing = list(meds2_Spacing.index[:2]) print("{}.Spacing:".format(market), best1_Spacing[0], best1_Spacing[1]) # print("{}.Spacing:".format(market), best2_Spacing[0], best2_Spacing[1]) # TODO: check error # read MaxSpread data file dat = pd.read_csv("./num_res/{}.MaxSpread.csv".format(market)) # split into two experiments exp1_MaxSpread = dat[dat.columns[:5]] exp2_MaxSpread = dat[dat.columns[5:]] # calculate statistics stat1_MaxSpread = pd.DataFrame( [exp1_MaxSpread.max(), exp1_MaxSpread.median(), exp1_MaxSpread.std()]) stat1_MaxSpread.index = ["Best", "Median", "Std."] stat2_MaxSpread = pd.DataFrame( [exp2_MaxSpread.max(), exp2_MaxSpread.median(), exp2_MaxSpread.std()]) stat2_MaxSpread.index = ["Best", "Median", "Std."] # find best and second best algorithm meds1_MaxSpread = stat1_MaxSpread.loc["Median"].sort_values(ascending=False) best1_MaxSpread = list(meds1_MaxSpread.index[:2]) meds2_MaxSpread = stat2_MaxSpread.loc["Median"].sort_values(ascending=False) best2_MaxSpread = list(meds2_MaxSpread.index[:2]) print("{}.MaxSpread:".format(market), best1_MaxSpread[0], best1_MaxSpread[1]) # print("{}.MaxSpread:".format(market), best2_MaxSpread[0], best2_MaxSpread[1]) # TODO: check error # read Delta data file dat = pd.read_csv("./num_res/{}.Delta.csv".format(market)) # split into two experiments exp1_Delta = dat[dat.columns[:5]] exp2_Delta = dat[dat.columns[5:]] # calculate statistics stat1_Delta = pd.DataFrame( [exp1_Delta.min(), exp1_Delta.median(), exp1_Delta.std()]) stat1_Delta.index = ["Best", "Median", "Std."] stat2_Delta = pd.DataFrame( [exp2_Delta.min(), exp2_Delta.median(), exp2_Delta.std()]) stat2_Delta.index = ["Best", "Median", "Std."] # find best and second best algorithm meds1_Delta = stat1_Delta.loc["Median"].sort_values() best1_Delta = list(meds1_Delta.index[:2]) meds2_Delta = stat2_Delta.loc["Median"].sort_values() best2_Delta = list(meds2_Delta.index[:2]) print("{}.Delta:".format(market), best1_Delta[0], best1_Delta[1]) # print("{}.Delta:".format(market), best2_Delta[0], best2_Delta[1]) # TODO: check error # read IGD data file dat = pd.read_csv("./num_res/{}.IGD.csv".format(market)) # split into two experiments exp1_IGD = dat[dat.columns[:5]] exp2_IGD = dat[dat.columns[5:]] # calculate statistics stat1_IGD = pd.DataFrame([exp1_IGD.min(), exp1_IGD.median(), exp1_IGD.std()]) stat1_IGD.index = ["Best", "Median", "Std."] stat2_IGD = pd.DataFrame([exp2_IGD.min(), exp2_IGD.median(), exp2_IGD.std()]) stat2_IGD.index = ["Best", "Median", "Std."] # find best and second best algorithm meds1_IGD = stat1_IGD.loc["Median"].sort_values() best1_IGD = list(meds1_IGD.index[:2]) meds2_IGD = stat2_IGD.loc["Median"].sort_values() best2_IGD = list(meds2_IGD.index[:2]) print("{}.IGD:".format(market), best1_IGD[0], best1_IGD[1]) # print("{}.IGD:".format(market), best2_IGD[0], best2_IGD[1]) # TODO: check error # read Hypervolume data file dat = pd.read_csv("./num_res/{}.Hypervolume.csv".format(market)) # split into two experiments exp1_Hypervolume = dat[dat.columns[:5]] exp2_Hypervolume = dat[dat.columns[5:]] # calculate statistics stat1_Hypervolume = pd.DataFrame( [exp1_Hypervolume.max(), exp1_Hypervolume.median(), exp1_Hypervolume.std()]) stat1_Hypervolume.index = ["Best", "Median", "Std."] stat2_Hypervolume = pd.DataFrame( [exp2_Hypervolume.max(), exp2_Hypervolume.median(), exp2_Hypervolume.std()]) stat2_Hypervolume.index = ["Best", "Median", "Std."] # find best and second best algorithm meds1_Hypervolume = stat1_Hypervolume.loc["Median"].sort_values( ascending=False) best1_Hypervolume = list(meds1_Hypervolume.index[:2]) meds2_Hypervolume = stat2_Hypervolume.loc["Median"].sort_values( ascending=False) best2_Hypervolume = list(meds2_Hypervolume.index[:2]) print("{}.Hypervolume:".format(market), best1_Hypervolume[0], best1_Hypervolume[1]) # print("{}.Hypervolume:".format(market), # best2_Hypervolume[0], best2_Hypervolume[1]) # TODO: check error print("{}\n----------------------------------------------".format(market)) pd.options.display.float_format = '{:.2e}'.format stat1_overall = pd.concat( [stat1_GD, stat1_Spacing, stat1_MaxSpread, stat1_Delta, stat1_IGD, stat1_Hypervolume]) stat2_overall = pd.concat( [stat2_GD, stat2_Spacing, stat2_MaxSpread, stat2_Delta, stat2_IGD, stat2_Hypervolume]) arrays = [["GD", "GD", "GD", "Spacing", "Spacing", "Spacing", "MaxSpread", "MaxSpread", "MaxSpread", "Delta", "Delta", "Delta", "IGD", "IGD", "IGD", "Hypervolume", "Hypervolume", "Hypervolume"], stat1_overall.index ] index = pd.MultiIndex.from_arrays(arrays, names=["Metric", ""]) stat1_overall.index = index stat2_overall.index = index print(stat1_overall) print("----------------------------------------------") print(stat2_overall)
2.421875
2
app/rss_feeder_api/migrations/0003_auto_20200813_1623.py
RSaab/rss-scraper
0
632
# Generated by Django 3.1 on 2020-08-13 16:23 from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('rss_feeder_api', '0002_feed_subtitle'), ] operations = [ migrations.AlterModelOptions( name='entry', options={'ordering': ('-updated_at',), 'verbose_name_plural': 'entries'}, ), migrations.AlterModelOptions( name='feed', options={'ordering': ('-updated_at',), 'verbose_name': 'Feed', 'verbose_name_plural': 'Feeds'}, ), migrations.AddField( model_name='entry', name='created_at', field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now), preserve_default=False, ), migrations.AddField( model_name='entry', name='updated_at', field=models.DateTimeField(auto_now=True), ), migrations.AlterUniqueTogether( name='entry', unique_together={('guid',)}, ), ]
1.015625
1
httprunner/compat.py
panyuan209/httprunner
0
656
""" This module handles compatibility issues between testcase format v2 and v3. 解决httprunner2 和 3 之间测试用例兼容性问题 """ import os import sys from typing import List, Dict, Text, Union, Any from loguru import logger from httprunner import exceptions from httprunner.loader import load_project_meta, convert_relative_project_root_dir from httprunner.parser import parse_data from httprunner.utils import sort_dict_by_custom_order def convert_variables( raw_variables: Union[Dict, List, Text], test_path: Text ) -> Dict[Text, Any]: if isinstance(raw_variables, Dict): return raw_variables if isinstance(raw_variables, List): # [{"var1": 1}, {"var2": 2}] variables: Dict[Text, Any] = {} for var_item in raw_variables: if not isinstance(var_item, Dict) or len(var_item) != 1: raise exceptions.TestCaseFormatError( f"Invalid variables format: {raw_variables}" ) variables.update(var_item) return variables elif isinstance(raw_variables, Text): # get variables by function, e.g. ${get_variables()} project_meta = load_project_meta(test_path) variables = parse_data(raw_variables, {}, project_meta.functions) return variables else: raise exceptions.TestCaseFormatError( f"Invalid variables format: {raw_variables}" ) def _convert_jmespath(raw: Text) -> Text: if not isinstance(raw, Text): raise exceptions.TestCaseFormatError(f"Invalid jmespath extractor: {raw}") # content.xx/json.xx => body.xx if raw.startswith("content"): raw = f"body{raw[len('content'):]}" elif raw.startswith("json"): raw = f"body{raw[len('json'):]}" raw_list = [] for item in raw.split("."): if "-" in item: # add quotes for field with separator # e.g. headers.Content-Type => headers."Content-Type" item = item.strip('"') raw_list.append(f'"{item}"') elif item.isdigit(): # convert lst.0.name to lst[0].name if len(raw_list) == 0: logger.error(f"Invalid jmespath: {raw}") sys.exit(1) last_item = raw_list.pop() item = f"{last_item}[{item}]" raw_list.append(item) else: raw_list.append(item) return ".".join(raw_list) def _convert_extractors(extractors: Union[List, Dict]) -> Dict: """ convert extract list(v2) to dict(v3) Args: extractors: [{"varA": "content.varA"}, {"varB": "json.varB"}] Returns: {"varA": "body.varA", "varB": "body.varB"} """ v3_extractors: Dict = {} if isinstance(extractors, List): # [{"varA": "content.varA"}, {"varB": "json.varB"}] for extractor in extractors: if not isinstance(extractor, Dict): logger.error(f"Invalid extractor: {extractors}") sys.exit(1) for k, v in extractor.items(): v3_extractors[k] = v elif isinstance(extractors, Dict): # {"varA": "body.varA", "varB": "body.varB"} v3_extractors = extractors else: logger.error(f"Invalid extractor: {extractors}") sys.exit(1) for k, v in v3_extractors.items(): v3_extractors[k] = _convert_jmespath(v) return v3_extractors def _convert_validators(validators: List) -> List: for v in validators: if "check" in v and "expect" in v: # format1: {"check": "content.abc", "assert": "eq", "expect": 201} v["check"] = _convert_jmespath(v["check"]) elif len(v) == 1: # format2: {'eq': ['status_code', 201]} comparator = list(v.keys())[0] v[comparator][0] = _convert_jmespath(v[comparator][0]) return validators def _sort_request_by_custom_order(request: Dict) -> Dict: custom_order = [ "method", "url", "params", "headers", "cookies", "data", "json", "files", "timeout", "allow_redirects", "proxies", "verify", "stream", "auth", "cert", ] return sort_dict_by_custom_order(request, custom_order) def _sort_step_by_custom_order(step: Dict) -> Dict: custom_order = [ "name", "variables", "request", "testcase", "setup_hooks", "teardown_hooks", "extract", "validate", "validate_script", ] return sort_dict_by_custom_order(step, custom_order) def _ensure_step_attachment(step: Dict) -> Dict: test_dict = { "name": step["name"], } if "variables" in step: test_dict["variables"] = step["variables"] if "setup_hooks" in step: test_dict["setup_hooks"] = step["setup_hooks"] if "teardown_hooks" in step: test_dict["teardown_hooks"] = step["teardown_hooks"] if "extract" in step: test_dict["extract"] = _convert_extractors(step["extract"]) if "export" in step: test_dict["export"] = step["export"] if "validate" in step: if not isinstance(step["validate"], List): raise exceptions.TestCaseFormatError( f'Invalid teststep validate: {step["validate"]}' ) test_dict["validate"] = _convert_validators(step["validate"]) if "validate_script" in step: test_dict["validate_script"] = step["validate_script"] return test_dict def ensure_testcase_v3_api(api_content: Dict) -> Dict: logger.info("convert api in v2 to testcase format v3") teststep = { "request": _sort_request_by_custom_order(api_content["request"]), } teststep.update(_ensure_step_attachment(api_content)) teststep = _sort_step_by_custom_order(teststep) config = {"name": api_content["name"]} extract_variable_names: List = list(teststep.get("extract", {}).keys()) if extract_variable_names: config["export"] = extract_variable_names return { "config": config, "teststeps": [teststep], } def ensure_testcase_v3(test_content: Dict) -> Dict: logger.info("ensure compatibility with testcase format v2") v3_content = {"config": test_content["config"], "teststeps": []} if "teststeps" not in test_content: logger.error(f"Miss teststeps: {test_content}") sys.exit(1) if not isinstance(test_content["teststeps"], list): logger.error( f'teststeps should be list type, got {type(test_content["teststeps"])}: {test_content["teststeps"]}' ) sys.exit(1) for step in test_content["teststeps"]: teststep = {} if "request" in step: teststep["request"] = _sort_request_by_custom_order(step.pop("request")) elif "api" in step: teststep["testcase"] = step.pop("api") elif "testcase" in step: teststep["testcase"] = step.pop("testcase") else: raise exceptions.TestCaseFormatError(f"Invalid teststep: {step}") teststep.update(_ensure_step_attachment(step)) teststep = _sort_step_by_custom_order(teststep) v3_content["teststeps"].append(teststep) return v3_content def ensure_cli_args(args: List) -> List: """ ensure compatibility with deprecated cli args in v2 """ # remove deprecated --failfast if "--failfast" in args: logger.warning(f"remove deprecated argument: --failfast") args.pop(args.index("--failfast")) # convert --report-file to --html if "--report-file" in args: logger.warning(f"replace deprecated argument --report-file with --html") index = args.index("--report-file") args[index] = "--html" args.append("--self-contained-html") # keep compatibility with --save-tests in v2 if "--save-tests" in args: logger.warning( f"generate conftest.py keep compatibility with --save-tests in v2" ) args.pop(args.index("--save-tests")) _generate_conftest_for_summary(args) return args def _generate_conftest_for_summary(args: List): for arg in args: if os.path.exists(arg): test_path = arg # FIXME: several test paths maybe specified break else: logger.error(f"No valid test path specified! \nargs: {args}") sys.exit(1) conftest_content = '''# NOTICE: Generated By HttpRunner. import json import os import time import pytest from loguru import logger from httprunner.utils import get_platform, ExtendJSONEncoder @pytest.fixture(scope="session", autouse=True) def session_fixture(request): """setup and teardown each task""" logger.info(f"start running testcases ...") start_at = time.time() yield logger.info(f"task finished, generate task summary for --save-tests") summary = { "success": True, "stat": { "testcases": {"total": 0, "success": 0, "fail": 0}, "teststeps": {"total": 0, "failures": 0, "successes": 0}, }, "time": {"start_at": start_at, "duration": time.time() - start_at}, "platform": get_platform(), "details": [], } for item in request.node.items: testcase_summary = item.instance.get_summary() summary["success"] &= testcase_summary.success summary["stat"]["testcases"]["total"] += 1 summary["stat"]["teststeps"]["total"] += len(testcase_summary.step_datas) if testcase_summary.success: summary["stat"]["testcases"]["success"] += 1 summary["stat"]["teststeps"]["successes"] += len( testcase_summary.step_datas ) else: summary["stat"]["testcases"]["fail"] += 1 summary["stat"]["teststeps"]["successes"] += ( len(testcase_summary.step_datas) - 1 ) summary["stat"]["teststeps"]["failures"] += 1 testcase_summary_json = testcase_summary.dict() testcase_summary_json["records"] = testcase_summary_json.pop("step_datas") summary["details"].append(testcase_summary_json) summary_path = r"{{SUMMARY_PATH_PLACEHOLDER}}" summary_dir = os.path.dirname(summary_path) os.makedirs(summary_dir, exist_ok=True) with open(summary_path, "w", encoding="utf-8") as f: json.dump(summary, f, indent=4, ensure_ascii=False, cls=ExtendJSONEncoder) logger.info(f"generated task summary: {summary_path}") ''' project_meta = load_project_meta(test_path) project_root_dir = project_meta.RootDir conftest_path = os.path.join(project_root_dir, "conftest.py") test_path = os.path.abspath(test_path) logs_dir_path = os.path.join(project_root_dir, "logs") test_path_relative_path = convert_relative_project_root_dir(test_path) if os.path.isdir(test_path): file_foder_path = os.path.join(logs_dir_path, test_path_relative_path) dump_file_name = "all.summary.json" else: file_relative_folder_path, test_file = os.path.split(test_path_relative_path) file_foder_path = os.path.join(logs_dir_path, file_relative_folder_path) test_file_name, _ = os.path.splitext(test_file) dump_file_name = f"{test_file_name}.summary.json" summary_path = os.path.join(file_foder_path, dump_file_name) conftest_content = conftest_content.replace( "{{SUMMARY_PATH_PLACEHOLDER}}", summary_path ) dir_path = os.path.dirname(conftest_path) if not os.path.exists(dir_path): os.makedirs(dir_path) with open(conftest_path, "w", encoding="utf-8") as f: f.write(conftest_content) logger.info("generated conftest.py to generate summary.json") def ensure_path_sep(path: Text) -> Text: """ ensure compatibility with different path separators of Linux and Windows """ if "/" in path: path = os.sep.join(path.split("/")) if "\\" in path: path = os.sep.join(path.split("\\")) return path
1.75
2
tests/test_mr_uplift.py
Ibotta/mr_uplift
48
680
import numpy as np import pytest from mr_uplift.dataset.data_simulation import get_no_noise_data, get_simple_uplift_data, get_observational_uplift_data_1 from mr_uplift.mr_uplift import MRUplift, get_t_data from mr_uplift.keras_model_functionality import prepare_data_optimized_loss import sys import pandas as pd class TestMRUplift(object): def test_get_t_data(self): num_obs_1 = 10 num_obs_2 = 3 test_1 = get_t_data(0, num_obs_1) test_2 = get_t_data(np.array([0, 1]), num_obs_2) test_1_values = np.zeros(num_obs_1).reshape(-1, 1) test_2_values = np.concatenate([np.zeros(num_obs_2).reshape(-1, 1), np.ones(num_obs_2).reshape(-1, 1)], axis=1) assert np.mean(test_1 == test_1_values) == 1 assert np.mean(test_2 == test_2_values) == 1 def test_model_mean_outputs(self): true_ATE = np.array([[0, 0], [1, .5]]) rmse_tolerance = .05 num_obs = 10000 y_no_noise, x_no_noise, tmt_no_noise = get_no_noise_data(num_obs) uplift_model = MRUplift() uplift_model.fit(x_no_noise, y_no_noise, tmt_no_noise.reshape(-1, 1), n_jobs=1) oos_ice = uplift_model.predict_ice(response_transformer = True) assert np.sqrt(np.mean((oos_ice.mean(axis=1) -true_ATE)**2)) < rmse_tolerance def test_model_pred_oos_shapes(self): num_obs = 1000 y, x, t = get_simple_uplift_data(num_obs) t = np.concatenate([t.reshape(-1, 1), np.random.binomial(1, .5, num_obs).reshape(-1, 1)], axis=1) param_grid = dict(num_nodes=[8], dropout=[.1], activation=[ 'relu'], num_layers=[1], epochs=[1], batch_size=[1000]) uplift_model = MRUplift() uplift_model.fit(x, y, t, param_grid = param_grid, n_jobs=1) x_1 = x.copy() x_1 = pd.DataFrame(x_1) x_1.columns = ['var_'+str(x) for x in range(x.shape[1])] y_1 = y.copy() y_1 = pd.DataFrame(y_1) y_1.columns = ['var_'+str(x) for x in range(y.shape[1])] uplift_model_named = MRUplift() uplift_model_named.fit(x_1, y_1, t, param_grid = param_grid, n_jobs=1) assert uplift_model.predict_ice().shape == ( np.unique(t, axis=0).shape[0], num_obs * .7, y.shape[1]) assert uplift_model.predict_ice(x=x).shape == (np.unique(t,axis=0).shape[0], num_obs, y.shape[1]) assert uplift_model.get_erupt_curves() assert uplift_model.get_erupt_curves(x = x, y = y, t = t) assert uplift_model_named.get_erupt_curves() def test_model_pred_oos_shapes_single_col_tmt(self): num_obs = 1000 y, x, t = get_simple_uplift_data(num_obs) t = t.reshape(-1, 1) param_grid = dict(num_nodes=[8], dropout=[.1], activation=[ 'relu'], num_layers=[1], epochs=[1], batch_size=[1000]) uplift_model = MRUplift() uplift_model.fit(x, y, t, param_grid = param_grid, n_jobs=1) assert uplift_model.predict_ice().shape == ( np.unique(t, axis=0).shape[0], num_obs * .7, y.shape[1]) assert uplift_model.predict_ice(x=x).shape == (np.unique(t,axis=0).shape[0], num_obs, y.shape[1]) assert uplift_model.get_erupt_curves() assert uplift_model.get_erupt_curves(x = x, y = y, t = t) def test_model_pred_oos_shapes_single_col_tmt_propensity(self): num_obs = 1000 y, x, t = get_simple_uplift_data(num_obs) t = t.reshape(-1, 1) param_grid = dict(num_nodes=[8], dropout=[.1], activation=['relu'], num_layers=[2], epochs=[1], batch_size=[100], alpha = [.5], copy_several_times = [1]) uplift_model = MRUplift() uplift_model.fit(x, y, t, param_grid = param_grid, n_jobs=1, optimized_loss = True, use_propensity = True) assert uplift_model.predict_ice().shape == ( np.unique(t, axis=0).shape[0], num_obs * .7, y.shape[1]) assert uplift_model.predict_ice(x=x).shape == (np.unique(t,axis=0).shape[0], num_obs, y.shape[1]) assert uplift_model.get_erupt_curves() assert uplift_model.get_erupt_curves(x = x, y = y, t = t) def test_prepare_data_optimized_loss_one_col_tmt(self): num_obs = 1000 y, x, t = get_simple_uplift_data(num_obs) t = t.reshape(len(t),1) unique_treatments = np.unique(t, axis = 0) masks = np.ones(num_obs).reshape(num_obs,1) x, utility_weights, missing_utility, missing_y_mat, masks, weights = prepare_data_optimized_loss(x,y,t, masks ,unique_treatments) assert(utility_weights.shape == (num_obs, y.shape[1])) assert(missing_y_mat.shape == (num_obs, unique_treatments.shape[0], y.shape[1])) for q in range(unique_treatments.shape[0]): assert( ((missing_utility[:,q]==0) == (missing_y_mat[:,q,0] == -999)).mean() ==1 ) def test_prepare_data_optimized_loss_two_col_tmt(self): num_obs = 1000 y, x, t = get_simple_uplift_data(num_obs) t = np.concatenate([t.reshape(-1, 1), np.random.binomial(1, .5, num_obs).reshape(-1, 1)], axis=1) unique_treatments = np.unique(t, axis = 0) masks = np.ones(num_obs*len(unique_treatments)).reshape(num_obs,len(unique_treatments)) x, utility_weights, missing_utility, missing_y_mat, masks, weights = prepare_data_optimized_loss(x,y,t,masks, unique_treatments) assert(utility_weights.shape == (num_obs, y.shape[1])) assert(missing_y_mat.shape == (num_obs, unique_treatments.shape[0], y.shape[1])) for q in range(unique_treatments.shape[0]): assert( ((missing_utility[:,q]==0) == (missing_y_mat[:,q,0] == -999)).mean() ==1 ) def test_model_optim_mean_outputs(self): true_ATE = np.array([[0, 0], [1, .5]]) rmse_tolerance = .05 num_obs = 10000 param_grid = dict(num_nodes=[8], dropout=[.1], activation=['relu'], num_layers=[2], epochs=[30], batch_size=[100]) y_no_noise, x_no_noise, tmt_no_noise = get_no_noise_data(num_obs) uplift_model = MRUplift() uplift_model.fit(x_no_noise, y_no_noise, tmt_no_noise.reshape(-1, 1), n_jobs=1, param_grid = param_grid, optimized_loss = False) oos_ice = uplift_model.predict_ice(response_transformer = True) assert np.sqrt(np.mean((oos_ice.mean(axis=1) - true_ATE)**2)) < rmse_tolerance def test_model_get_random_erupts(self): true_ATE = np.array([[0, 0], [1, .5]]) rmse_tolerance = .05 num_obs = 10000 param_grid = dict(num_nodes=[8], dropout=[.1], activation=['relu'], num_layers=[2], epochs=[30], batch_size=[100], alpha = [.5], copy_several_times = [2]) y_no_noise, x_no_noise, tmt_no_noise = get_no_noise_data(num_obs) uplift_model = MRUplift() uplift_model.fit(x_no_noise, y_no_noise, tmt_no_noise.reshape(-1, 1), n_jobs=1, param_grid = param_grid, optimized_loss = True) oos_re = uplift_model.get_random_erupts() uplift_model_propensity = MRUplift() uplift_model_propensity.fit(x_no_noise, y_no_noise, tmt_no_noise.reshape(-1, 1), n_jobs=1, param_grid = param_grid, optimized_loss = True, use_propensity = True) oos_re_propensity = uplift_model_propensity.get_random_erupts() assert oos_re['mean'].iloc[0] > 0 assert oos_re_propensity['mean'].iloc[0] > 0 def test_varimp(self): num_obs = 10000 param_grid = dict(num_nodes=[8], dropout=[.1], activation=['relu'], num_layers=[2], epochs=[30], batch_size=[100]) y, x, t = get_simple_uplift_data(num_obs) uplift_model = MRUplift() uplift_model.fit(x, y, t.reshape(-1, 1), n_jobs=1, param_grid = param_grid) varimp = uplift_model.permutation_varimp(objective_weights = np.array([.7,-.3,0]).reshape(1,-1)) param_grid = dict(num_nodes=[8], dropout=[.1], activation=['relu'], num_layers=[2], epochs=[30], batch_size=[100], alpha = [.5], copy_several_times = [2]) uplift_model_propensity = MRUplift() uplift_model_propensity.fit(x, y, t.reshape(-1, 1), n_jobs=1, param_grid = param_grid, optimized_loss = True, use_propensity = True) varimp_propensity = uplift_model_propensity.permutation_varimp(objective_weights = np.array([.7,-.3,0]).reshape(1,-1)) assert varimp['permutation_varimp_metric'].iloc[0]>varimp['permutation_varimp_metric'].iloc[1] assert varimp_propensity['permutation_varimp_metric'].iloc[0]>varimp_propensity['permutation_varimp_metric'].iloc[1] def test_model_propensity(self): num_obs = 10000 TOLERANCE = .98 y, x, t, rule_assignment = get_observational_uplift_data_1(num_obs) param_grid = dict(num_nodes=[8], dropout=[.1], activation=['relu'], num_layers=[1], epochs=[20], batch_size=[512], alpha = [.9999,.99], copy_several_times = [1]) uplift_model = MRUplift() uplift_model.fit(x, y[:,0].reshape(-1,1), t, param_grid = param_grid, n_jobs=1, optimized_loss = True, use_propensity = True, test_size = 0) uplift_model.best_params_net y_test, x_test, t_test, rule_assignment_test = get_observational_uplift_data_1(num_obs) experiment_groups = np.zeros(num_obs)+2 experiment_groups[np.where(x_test[:,-2]<.5)[0]] = 1 experiment_groups[np.where(x_test[:,-2]<.33)[0]] = 0 experiment_groups[np.where(x_test[:,-1]>.8)[0]] = 3 optim_treatments_no_cuttoff = uplift_model.predict_optimal_treatments(x = x_test, use_propensity_score_cutoff = False) optim_treatments_cuttoff = uplift_model.predict_optimal_treatments(x = x_test, use_propensity_score_cutoff = True) optim_treatments_cuttoff_cat = optim_treatments_cuttoff.argmax(axis = 1) optim_treatments_no_cuttoff_cat = optim_treatments_no_cuttoff.argmax(axis = 1) correct_tmts_1 = np.array([x in [0,1] for x in optim_treatments_cuttoff_cat[np.where(experiment_groups == 0)[0]] ]).mean() correct_tmts_2 = np.array([x in [1,2] for x in optim_treatments_cuttoff_cat[np.where(experiment_groups == 1)[0]] ]).mean() correct_tmts_3 = np.array([x in [0,2] for x in optim_treatments_cuttoff_cat[np.where(experiment_groups == 2)[0]] ]).mean() correct_tmts_4 = np.array([x in [0] for x in optim_treatments_cuttoff_cat[np.where(experiment_groups == 3)[0]] ]).mean() correct_tmts_experiment_groups_1 = ((optim_treatments_cuttoff_cat[np.where(experiment_groups == 1)[0]] == 1) == x_test[np.where(experiment_groups == 1)[0],0]).mean() correct_tmts_no_cutoff = np.mean((optim_treatments_no_cuttoff_cat==1 ) == x_test[:,0]) assert correct_tmts_1>TOLERANCE assert correct_tmts_2>TOLERANCE assert correct_tmts_3>TOLERANCE assert correct_tmts_4>TOLERANCE assert correct_tmts_experiment_groups_1>TOLERANCE assert np.array_equal(optim_treatments_cuttoff_cat,optim_treatments_no_cuttoff_cat) is False assert correct_tmts_no_cutoff>TOLERANCE
1.671875
2
examples/pylab_examples/image_masked.py
pierre-haessig/matplotlib
16
688
#!/usr/bin/env python '''imshow with masked array input and out-of-range colors. The second subplot illustrates the use of BoundaryNorm to get a filled contour effect. ''' from pylab import * from numpy import ma import matplotlib.colors as colors delta = 0.025 x = y = arange(-3.0, 3.0, delta) X, Y = meshgrid(x, y) Z1 = bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0) Z2 = bivariate_normal(X, Y, 1.5, 0.5, 1, 1) Z = 10 * (Z2-Z1) # difference of Gaussians # Set up a colormap: palette = cm.gray palette.set_over('r', 1.0) palette.set_under('g', 1.0) palette.set_bad('b', 1.0) # Alternatively, we could use # palette.set_bad(alpha = 0.0) # to make the bad region transparent. This is the default. # If you comment out all the palette.set* lines, you will see # all the defaults; under and over will be colored with the # first and last colors in the palette, respectively. Zm = ma.masked_where(Z > 1.2, Z) # By setting vmin and vmax in the norm, we establish the # range to which the regular palette color scale is applied. # Anything above that range is colored based on palette.set_over, etc. subplot(1,2,1) im = imshow(Zm, interpolation='bilinear', cmap=palette, norm = colors.Normalize(vmin = -1.0, vmax = 1.0, clip = False), origin='lower', extent=[-3,3,-3,3]) title('Green=low, Red=high, Blue=bad') colorbar(im, extend='both', orientation='horizontal', shrink=0.8) subplot(1,2,2) im = imshow(Zm, interpolation='nearest', cmap=palette, norm = colors.BoundaryNorm([-1, -0.5, -0.2, 0, 0.2, 0.5, 1], ncolors=256, clip = False), origin='lower', extent=[-3,3,-3,3]) title('With BoundaryNorm') colorbar(im, extend='both', spacing='proportional', orientation='horizontal', shrink=0.8) show()
2.21875
2
Sec_10_expr_lambdas_fun_integradas/f_generators.py
PauloAlexSilva/Python
0
736
"""" Generator Expression Em aulas anteriores foi abordado: - List Comprehension; - Dictionary Comprehension; - Set Comprehension. Não foi abordado: - Tuple Comprehension ... porque elas se chamam Generators nomes = ['Carlos', 'Camila', 'Carla', 'Cristiana', 'Cristina', 'Vanessa'] print(any8[nomes[0] == 'C' for nome in nomes]) # Poderia ter sido feito usando os Generators nomes = ['Carlos', 'Camila', 'Carla', 'Cristiana', 'Cristina', 'Vanessa'] print(any(nome[0] == 'C' for nome in nomes)) # List Comprehension res = [nome[0] == 'C' for nome in nomes] print(type(res)) print(res) # [True, True, True, True, True, False] # Generator - mais efeciente res2 = (nome[0] == 'C' for nome in nomes) print(type(res2)) print(res2) # O que faz a função de getsizeof()? -> retorna a quantidade de bytes em memória do elemento # passado como parâmetro from sys import getsizeof # Mostra quantos bytes a string 'Paulo' está ocupando em memória. # Quanto maior a string mais espaço ocupa. print(getsizeof('Paulo')) print(getsizeof('Quanto maior a string mais espaço ocupa.')) print(getsizeof(9)) print(getsizeof(91)) print(getsizeof(12345667890)) print(getsizeof(True)) from sys import getsizeof # Gerando uma lista de números com List Comprehension list_comp = getsizeof([x * 10 for x in range(1000)]) # Gerando uma lista de números com Set Comprehension set_comp = getsizeof({x * 10 for x in range(1000)}) # Gerando uma lista de números com Dictionary Comprehension dic_comp = getsizeof({x: x * 10 for x in range(1000)}) # Gerando uma lista de números com Generator gen = getsizeof(x * 10 for x in range(1000)) print('Para fazer a mesma gastamos em memória: ') print(f'List Comprehension: {list_comp} bytes!') print(f'Set Comprehension: {set_comp} bytes!') print(f'Dictionary Comprehension: {dic_comp} bytes!') print(f'Generator Expression: {gen} bytes!') Para fazer a mesma gastamos em memória: List Comprehension: 8856 bytes! Set Comprehension: 32984 bytes! Dictionary Comprehension: 36960 bytes! Generator Expression: 112 bytes! """ # Posso iterar no Generator Expression? Sim gen = (x * 10 for x in range(1000)) print(gen) print(type(gen)) for num in gen: print(num)
2.65625
3
advanced/itertools_funcs.py
ariannasg/python3-essential-training
1
776
#!usr/bin/env python3 import itertools # itertools is a module that's not technically a set of built-in functions but # it is part of the standard library that comes with python. # it's useful for for creating and using iterators. def main(): print('some infinite iterators') # cycle iterator can be used to cycle over a collection over and over seq1 = ["Joe", "John", "Mike"] cycle1 = itertools.cycle(seq1) print(next(cycle1)) print(next(cycle1)) print(next(cycle1)) print(next(cycle1)) print(next(cycle1)) # use count to create a simple counter count1 = itertools.count(100, 3) print(next(count1)) print(next(count1)) print(next(count1)) print('some non-infinite iterators') values = [10, 5, 20, 30, 40, 50, 40, 30] # accumulate creates an iterator that accumulates/aggregates values print(list(itertools.accumulate(values))) # this defaults to addition print(list(itertools.accumulate(values, max))) print(list(itertools.accumulate(values, min))) # use chain to connect sequences together x = itertools.chain('ABCD', '1234') print(list(x)) # dropwhile and takewhile will return values until # a certain condition is met that stops them. they are similar to the # filter built-in function. # dropwhile will drop the values from the sequence as long as the # condition of the function is true and then returns the rest of values print(list(itertools.dropwhile(is_less_than_forty, values))) # takewhile will keep the values from the sequence as long as the # condition of the function is true and then stops giving data print(list(itertools.takewhile(is_less_than_forty, values))) def is_less_than_forty(x): return x < 40 if __name__ == "__main__": main() # CONSOLE OUTPUT: # some infinite iterators # Joe # John # Mike # Joe # John # 100 # 103 # 106 # some non-infinite iterators # [10, 15, 35, 65, 105, 155, 195, 225] # [10, 10, 20, 30, 40, 50, 50, 50] # [10, 5, 5, 5, 5, 5, 5, 5] # ['A', 'B', 'C', 'D', '1', '2', '3', '4'] # [40, 50, 40, 30] # [10, 5, 20, 30]
3.5
4
README.md exists but content is empty. Use the Edit dataset card button to edit it.
Downloads last month
0
Edit dataset card