max_stars_repo_path
string
max_stars_repo_name
string
max_stars_count
int64
id
string
content
string
score
float64
int_score
int64
pypagai/models/model_lstm.py
gcouti/pypagAI
1
7
from keras import Model, Input from keras.layers import Dense, concatenate, LSTM, Reshape, Permute, Embedding, Dropout, Convolution1D, Flatten from keras.optimizers import Adam from pypagai.models.base import KerasModel class SimpleLSTM(KerasModel): """ Use a simple lstm neural network """ @staticmethod def default_config(): config = KerasModel.default_config() config['hidden'] = 32 return config def __init__(self, cfg): super().__init__(cfg) self._cfg_ = cfg def _create_network_(self): hidden = self._cfg_['hidden'] story = Input((self._story_maxlen, ), name='story') question = Input((self._query_maxlen, ), name='question') conc = concatenate([story, question],) conc = Reshape((1, int(conc.shape[1])))(conc) conc = Permute((2, 1))(conc) response = LSTM(hidden, dropout=0.2, recurrent_dropout=0.2)(conc) response = Dense(self._vocab_size, activation='softmax')(response) self._model = Model(inputs=[story, question], outputs=response) self._model.compile(optimizer=Adam(lr=2e-4), loss='sparse_categorical_crossentropy', metrics=['accuracy']) class EmbedLSTM(KerasModel): """ Use a simple lstm neural network """ @staticmethod def default_config(): config = KerasModel.default_config() config['hidden'] = 32 return config def __init__(self, cfg): super().__init__(cfg) self._cfg_ = cfg def _create_network_(self): hidden = self._cfg_['hidden'] story = Input((self._story_maxlen, ), name='story') question = Input((self._query_maxlen, ), name='question') eb_story = Embedding(self._vocab_size, 64)(story) eb_story = Dropout(0.3)(eb_story) eb_question = Embedding(self._vocab_size, 64)(question) eb_question = Dropout(0.3)(eb_question) conc = concatenate([eb_story, eb_question], axis=1) response = LSTM(hidden, dropout=0.2, recurrent_dropout=0.2)(conc) response = Dense(self._vocab_size, activation='softmax')(response) self._model = Model(inputs=[story, question], outputs=response) self._model.compile(optimizer=Adam(lr=2e-4), loss='sparse_categorical_crossentropy', metrics=['accuracy']) class ConvLSTM(KerasModel): """ Use a simple lstm neural network """ @staticmethod def default_config(): config = KerasModel.default_config() config['hidden'] = 32 return config def __init__(self, model_cfg): super().__init__(model_cfg) self._cfg = model_cfg def _create_network_(self): hidden = self._cfg['hidden'] story = Input((self._story_maxlen, ), name='story') question = Input((self._query_maxlen, ), name='question') eb_story = Embedding(self._vocab_size, 64)(story) eb_story = Convolution1D(64, 3, padding='same')(eb_story) eb_story = Convolution1D(32, 3, padding='same')(eb_story) eb_story = Convolution1D(16, 3, padding='same')(eb_story) # eb_story = Flatten()(eb_story) eb_question = Embedding(self._vocab_size, 64)(question) eb_question = Convolution1D(64, 3, padding='same')(eb_question) eb_question = Convolution1D(32, 3, padding='same')(eb_question) eb_question = Convolution1D(16, 3, padding='same')(eb_question) # eb_question = Flatten()(eb_question) conc = concatenate([eb_story, eb_question], axis=1) response = LSTM(hidden, dropout=0.2, recurrent_dropout=0.2)(conc) response = Dense(self._vocab_size, activation='softmax')(response) self._model = Model(inputs=[story, question], outputs=response) self._model.compile(optimizer=Adam(lr=2e-4), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
2.46875
2
Object_detection_image.py
hiperus0988/pyao
1
23
######## Image Object Detection Using Tensorflow-trained Classifier ######### # # Author: <NAME> # Date: 1/15/18 # Description: # This program uses a TensorFlow-trained classifier to perform object detection. # It loads the classifier uses it to perform object detection on an image. # It draws boxes and scores around the objects of interest in the image. ## Some of the code is copied from Google's example at ## https://github.com/tensorflow/models/blob/master/research/object_detection/object_detection_tutorial.ipynb ## and some is copied from Dat Tran's example at ## https://github.com/datitran/object_detector_app/blob/master/object_detection_app.py ## but I changed it to make it more understandable to me. # Import packages import os import cv2 import numpy as np import tensorflow as tf import sys # This is needed since the notebook is stored in the object_detection folder. sys.path.append("..") # Import utilites from utils import label_map_util from utils import visualization_utils as vis_util # Name of the directory containing the object detection module we're using MODEL_NAME = 'inference_graph' IMAGE_NAME = 'test1.jpg' # Grab path to current working directory CWD_PATH = os.getcwd() # Path to frozen detection graph .pb file, which contains the model that is used # for object detection. PATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,'frozen_inference_graph.pb') # Path to label map file PATH_TO_LABELS = os.path.join(CWD_PATH,'training','labelmap.pbtxt') # Path to image PATH_TO_IMAGE = os.path.join(CWD_PATH,IMAGE_NAME) # Number of classes the object detector can identify NUM_CLASSES = 6 # Load the label map. # Label maps map indices to category names, so that when our convolution # network predicts `5`, we know that this corresponds to `king`. # Here we use internal utility functions, but anything that returns a # dictionary mapping integers to appropriate string labels would be fine label_map = label_map_util.load_labelmap(PATH_TO_LABELS) categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True) category_index = label_map_util.create_category_index(categories) # Load the Tensorflow model into memory. detection_graph = tf.Graph() with detection_graph.as_default(): od_graph_def = tf.GraphDef() with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid: serialized_graph = fid.read() od_graph_def.ParseFromString(serialized_graph) tf.import_graph_def(od_graph_def, name='') sess = tf.Session(graph=detection_graph) # Define input and output tensors (i.e. data) for the object detection classifier # Input tensor is the image image_tensor = detection_graph.get_tensor_by_name('image_tensor:0') # Output tensors are the detection boxes, scores, and classes # Each box represents a part of the image where a particular object was detected detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0') # Each score represents level of confidence for each of the objects. # The score is shown on the result image, together with the class label. detection_scores = detection_graph.get_tensor_by_name('detection_scores:0') detection_classes = detection_graph.get_tensor_by_name('detection_classes:0') # Number of objects detected num_detections = detection_graph.get_tensor_by_name('num_detections:0') # Load image using OpenCV and # expand image dimensions to have shape: [1, None, None, 3] # i.e. a single-column array, where each item in the column has the pixel RGB value image = cv2.imread(PATH_TO_IMAGE) image_expanded = np.expand_dims(image, axis=0) # Perform the actual detection by running the model with the image as input (boxes, scores, classes, num) = sess.run( [detection_boxes, detection_scores, detection_classes, num_detections], feed_dict={image_tensor: image_expanded}) # Draw the results of the detection (aka 'visulaize the results') vis_util.visualize_boxes_and_labels_on_image_array( image, np.squeeze(boxes), np.squeeze(classes).astype(np.int32), np.squeeze(scores), category_index, use_normalized_coordinates=True, line_thickness=8, min_score_thresh=0.60) # All the results have been drawn on image. Now display the image. cv2.imshow('Object detector', image) # Press any key to close the image cv2.waitKey(0) # Clean up cv2.destroyAllWindows()
2.90625
3
setup.py
giggslam/python-messengerbot-sdk
23
31
#!/usr/bin/env python # -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import sys from setuptools import setup from setuptools.command.test import test as TestCommand __version__ = '' with open('facebookbot/__about__.py', 'r') as fd: reg = re.compile(r'__version__ = [\'"]([^\'"]*)[\'"]') for line in fd: m = reg.match(line) if m: __version__ = m.group(1) break def _requirements(): with open('requirements.txt', 'r') as fd: return [name.strip() for name in fd.readlines()] with open('README.rst', 'r') as fd: long_description = fd.read() setup( name="fbsdk", version=__version__, author="<NAME>", author_email="<EMAIL>", maintainer="<NAME>", maintainer_email="<EMAIL>", url="https://github.com/boompieman/fbsdk", description="Facebook Messaging API SDK for Python", long_description=long_description, license='Apache License 2.0', packages=[ "facebookbot", "facebookbot.models" ], install_requires=_requirements(), classifiers=[ "Development Status :: 5 - Production/Stable", "License :: OSI Approved :: Apache Software License", "Intended Audience :: Developers", "Programming Language :: Python :: 3", "Topic :: Software Development" ] )
1.34375
1
examples/mouse.py
ginkage/trackball-python
22
47
#!/usr/bin/env python import time import os import math from trackball import TrackBall print("""Trackball: Mouse Use the trackball as a mouse in Raspbian, with right-click when the switch is pressed. Press Ctrl+C to exit! """) trackball = TrackBall(interrupt_pin=4) trackball.set_rgbw(0, 0, 0, 0) # Check for xte (used to control mouse) use_xte = os.system('which xte') == 0 if use_xte == 0: raise RuntimeError("xte not found. Did you sudo apt install xautomation?") while True: up, down, left, right, switch, state = trackball.read() # Send movements and clicks to xte if switch: cmd = 'xte "mouseclick 1"' os.system(cmd) elif right or up or left or down: x = right - left x = math.copysign(x**2, x) y = down - up y = math.copysign(y**2, y) cmd = 'xte "mousermove {} {}"'.format(int(x), int(y)) os.system(cmd) time.sleep(0.0001)
2.53125
3
python/ray/ml/tests/test_torch_trainer.py
mgelbart/ray
22
63
import pytest import torch import ray from ray.ml.predictors.integrations.torch import TorchPredictor from ray.ml.train.integrations.torch import TorchTrainer from ray import train from ray.ml.examples.pytorch.torch_linear_example import train_func as linear_train_func @pytest.fixture def ray_start_4_cpus(): address_info = ray.init(num_cpus=4) yield address_info # The code after the yield will run as teardown code. ray.shutdown() @pytest.mark.parametrize("num_workers", [1, 2]) def test_torch_linear(ray_start_4_cpus, num_workers): def train_func(config): result = linear_train_func(config) assert len(result) == epochs assert result[-1]["loss"] < result[0]["loss"] num_workers = num_workers epochs = 3 scaling_config = {"num_workers": num_workers} config = {"lr": 1e-2, "hidden_size": 1, "batch_size": 4, "epochs": epochs} trainer = TorchTrainer( train_loop_per_worker=train_func, train_loop_config=config, scaling_config=scaling_config, ) trainer.fit() def test_torch_e2e(ray_start_4_cpus): def train_func(): model = torch.nn.Linear(1, 1) train.save_checkpoint(model=model) scaling_config = {"num_workers": 2} trainer = TorchTrainer( train_loop_per_worker=train_func, scaling_config=scaling_config ) result = trainer.fit() predict_dataset = ray.data.range(3) class TorchScorer: def __init__(self): self.pred = TorchPredictor.from_checkpoint(result.checkpoint) def __call__(self, x): return self.pred.predict(x, dtype=torch.float) predictions = predict_dataset.map_batches( TorchScorer, batch_format="pandas", compute="actors" ) assert predictions.count() == 3 def test_torch_e2e_state_dict(ray_start_4_cpus): def train_func(): model = torch.nn.Linear(1, 1).state_dict() train.save_checkpoint(model=model) scaling_config = {"num_workers": 2} trainer = TorchTrainer( train_loop_per_worker=train_func, scaling_config=scaling_config ) result = trainer.fit() # If loading from a state dict, a model definition must be passed in. with pytest.raises(ValueError): TorchPredictor.from_checkpoint(result.checkpoint) class TorchScorer: def __init__(self): self.pred = TorchPredictor.from_checkpoint( result.checkpoint, model=torch.nn.Linear(1, 1) ) def __call__(self, x): return self.pred.predict(x, dtype=torch.float) predict_dataset = ray.data.range(3) predictions = predict_dataset.map_batches( TorchScorer, batch_format="pandas", compute="actors" ) assert predictions.count() == 3 if __name__ == "__main__": import pytest import sys sys.exit(pytest.main(["-v", "-x", __file__]))
1.976563
2
eoxserver/services/ows/wps/v10/encoders/parameters.py
constantinius/eoxserver_combined
1
79
#------------------------------------------------------------------------------- # # WPS 1.0 parameters' XML encoders # # Project: EOxServer <http://eoxserver.org> # Authors: <NAME> <<EMAIL>> # <NAME> <<EMAIL>> # #------------------------------------------------------------------------------- # Copyright (C) 2013 EOX IT Services GmbH # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies of this Software or works derived from this Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. #------------------------------------------------------------------------------- from eoxserver.services.ows.wps.parameters import ( LiteralData, ComplexData, BoundingBoxData, AllowedAny, AllowedEnum, AllowedRange, AllowedRangeCollection, AllowedByReference, ) from eoxserver.services.ows.wps.v10.util import ( OWS, WPS, NIL, ns_ows, ) #------------------------------------------------------------------------------- def encode_input_descr(prm): """ Encode process description input.""" elem = NIL("Input", *_encode_param_common(prm)) elem.attrib["minOccurs"] = ("1", "0")[bool(prm.is_optional)] elem.attrib["maxOccurs"] = "1" if isinstance(prm, LiteralData): elem.append(_encode_literal(prm, True)) elif isinstance(prm, ComplexData): elem.append(_encode_complex(prm, True)) elif isinstance(prm, BoundingBoxData): elem.append(_encode_bbox(prm, True)) return elem def encode_output_descr(prm): """ Encode process description output.""" elem = NIL("Output", *_encode_param_common(prm)) if isinstance(prm, LiteralData): elem.append(_encode_literal(prm, False)) elif isinstance(prm, ComplexData): elem.append(_encode_complex(prm, False)) elif isinstance(prm, BoundingBoxData): elem.append(_encode_bbox(prm, False)) return elem def encode_input_exec(prm): """ Encode common part of the execure response data input.""" return WPS("Input", *_encode_param_common(prm, False)) def encode_output_exec(prm): """ Encode common part of the execure response data output.""" return WPS("Output", *_encode_param_common(prm)) def encode_output_def(outdef): """ Encode the execure response output definition.""" attrib = {} if outdef.uom is not None: attrib['uom'] = outdef.uom if outdef.crs is not None: attrib['crs'] = outdef.crs if outdef.mime_type is not None: attrib['mimeType'] = outdef.mime_type if outdef.encoding is not None: attrib['encoding'] = outdef.encoding if outdef.schema is not None: attrib['schema'] = outdef.schema if outdef.as_reference is not None: attrib['asReference'] = 'true' if outdef.as_reference else 'false' return WPS("Output", *_encode_param_common(outdef, False), **attrib) def _encode_param_common(prm, title_required=True): """ Encode common sub-elements of all XML parameters.""" elist = [OWS("Identifier", prm.identifier)] if prm.title or title_required: elist.append(OWS("Title", prm.title or prm.identifier)) if prm.abstract: elist.append(OWS("Abstract", prm.abstract)) return elist #------------------------------------------------------------------------------- def _encode_literal(prm, is_input): dtype = prm.dtype elem = NIL("LiteralData" if is_input else "LiteralOutput") elem.append(OWS("DataType", dtype.name, **{ ns_ows("reference"): "http://www.w3.org/TR/xmlschema-2/#%s"%dtype.name, })) if prm.uoms: elem.append(NIL("UOMs", NIL("Default", OWS("UOM", prm.uoms[0])), NIL("Supported", *[OWS("UOM", u) for u in prm.uoms]) )) if is_input: elem.append(_encode_allowed_value(prm.allowed_values)) if prm.default is not None: elem.append(NIL("DefaultValue", str(prm.default))) return elem def _encode_allowed_value(avobj): enum, ranges, elist = None, [], [] if isinstance(avobj, AllowedAny): return OWS("AnyValue") elif isinstance(avobj, AllowedByReference): return WPS("ValuesReference", **{ ns_ows("reference"): avobj.url, "valuesForm": avobj.url, }) elif isinstance(avobj, AllowedEnum): enum = avobj elif isinstance(avobj, AllowedRange): ranges = [avobj] elif isinstance(avobj, AllowedRangeCollection): enum, ranges = avobj.enum, avobj.ranges else: raise TypeError("Invalid allowed value object! OBJ=%r"%avobj) dtype = avobj.dtype ddtype = dtype.get_diff_dtype() if enum is not None: elist.extend(OWS("Value", dtype.encode(v)) for v in enum.values) for range_ in ranges: attr, elms = {}, [] if range_.closure != 'closed': attr = {ns_ows("rangeClosure"): range_.closure} if range_.minval is not None: elms.append(OWS("MinimumValue", dtype.encode(range_.minval))) if range_.maxval is not None: elms.append(OWS("MaximumValue", dtype.encode(range_.maxval))) if range_.spacing is not None: elms.append(OWS("Spacing", ddtype.encode(range_.spacing))) elist.append(OWS("Range", *elms, **attr)) return OWS("AllowedValues", *elist) #------------------------------------------------------------------------------- def _encode_complex(prm, is_input): return NIL("ComplexData" if is_input else "ComplexOutput", NIL("Default", _encode_format(prm.default_format)), NIL("Supported", *[_encode_format(f) for f in prm.formats.itervalues()]) ) def _encode_format(frmt): elem = NIL("Format", NIL("MimeType", frmt.mime_type)) if frmt.encoding is not None: elem.append(NIL("Encoding", frmt.encoding)) if frmt.schema is not None: elem.append(NIL("Schema", frmt.schema)) return elem #------------------------------------------------------------------------------- def _encode_bbox(prm, is_input): return NIL("BoundingBoxData" if is_input else "BoundingBoxOutput", NIL("Default", NIL("CRS", prm.encode_crs(prm.default_crs))), NIL("Supported", *[NIL("CRS", prm.encode_crs(crs)) for crs in prm.crss]) )
1.226563
1
Complab assignment.py
peteboi/Python-Scripts
0
87
# -*- coding: utf-8 -*- import numpy as np import matplotlib.pyplot as plt def orbit(u): x,y,v_x,v_y = u r=np.hypot(x,y) #r= 1.521e+06 #M,G=1.989e+30,6.7e-11 M,G=20,110 f=G*M/r**3 return np.array([v_x,v_y,-f*x,-f*y]) def RK4(f,u,dt): k1=f(u)*dt k2=f(u+0.5*k1)*dt k3=f(u+0.5*k2)*dt k4=f(u+k3)*dt return u+(k1+2*k2+2*k3+k4)/6 def RK4_int(f,y0,tspan): y=np.zeros([len(tspan),len(y0)]) y[0,:] =y0 for k in range (1,len(tspan)): y[k,:] = RK4(f,y[k-1],tspan[k]-tspan[k-1]) return y dt=0.1 t = np.arange(0,10,dt) y0=np.array([10, 0.0, 10, 10]) sol_rk4=RK4_int(orbit,y0,t) x,y,v_x,v_y = sol_rk4.T plt.grid() plt.plot(x,y) plt.show()
2.296875
2
python_utilities/plotting/util.py
sdaxen/python_utilities
2
111
"""Utility functions for plotting. Author: <NAME> E-mail: <EMAIL>""" from collections import deque import numpy as np def rgb_to_hsv(rgb): """Convert RGB colors to HSV colors.""" r, g, b = tuple(map(float, rgb)) if any([r > 1, g > 1, b > 1]): r /= 255. g /= 255. b /= 255. mmax = max(r, g, b) mmin = min(r, g, b) c = mmax - mmin if (c == 0.): hp = 0. elif (mmax == r): hp = ((g - b) / c) % 6 elif (mmax == g): hp = ((b - r) / c) + 2 elif (mmax == b): hp = ((r - g) / c) + 4 h = 60 * hp v = mmax if (c == 0): s = 0 else: s = c / v return (h, s, v) def hsv_to_rgb(hsv): """Convert HSV colors to RGB colors.""" h, s, v = tuple(map(float, hsv)) c = v * s m = v - c hp = h / 60. x = c * (1. - abs((hp % 2) - 1.)) hp = int(hp) rgb = deque((c + m, x + m, m)) if (hp % 2): rgb.reverse() rgb.rotate((hp - 3) / 2) else: rgb.rotate(hp / 2) return tuple(rgb) def rgb_to_yuv(rgb): """Convert RGB colors to Y'UV colors, useful for comparison.""" rgbv = np.array(rgb).reshape(3, 1) if np.any(rgbv > 1.): rgbv = rgbv / 255. yuv = np.dot(np.array([[ .299, .587, .114], [-.14713, -.28886, .436], [ .615, -.51499, -.10001]], dtype=np.double), rgbv) return list(yuv) def yuv_to_rgb(yuv): """Convert Y'UV colors to RGB colors.""" yuvv = np.array(yuv).reshape(3, 1) rgb = np.dot(np.array([[1., 0., 1.13983], [1., -.39465, -.58060], [1., 2.03211, 0.]], dtype=np.double), yuvv) return list(rgb) def compute_yuv_dist(rgb1, rgb2): """Compute Euclidean Y'UV distance between RGB colors.""" yuv1 = rgb_to_yuv(rgb1) yuv2 = rgb_to_yuv(rgb2) return float(sum((np.array(yuv1) - np.array(yuv2))**2)**.5) def lighten_rgb(rgb, p=0.): """Lighten RGB colors by percentage p of total.""" h, s, v = rgb_to_hsv(rgb) hsv = (h, s, min(1, v + p)) return hsv_to_rgb(hsv)
2.625
3
mypy/server/aststrip.py
mmaryada27/mypy
0
135
"""Strip/reset AST in-place to match state after semantic analysis pass 1. Fine-grained incremental mode reruns semantic analysis (passes 2 and 3) and type checking for *existing* AST nodes (targets) when changes are propagated using fine-grained dependencies. AST nodes attributes are often changed during semantic analysis passes 2 and 3, and running semantic analysis again on those nodes would produce incorrect results, since these passes aren't idempotent. This pass resets AST nodes to reflect the state after semantic analysis pass 1, so that we can rerun semantic analysis. (The above is in contrast to behavior with modules that have source code changes, for which we reparse the entire module and reconstruct a fresh AST. No stripping is required in this case. Both modes of operation should have the same outcome.) Notes: * This is currently pretty fragile, as we must carefully undo whatever changes can be made in semantic analysis passes 2 and 3, including changes to symbol tables. * We reuse existing AST nodes because it makes it relatively straightforward to reprocess only a single target within a module efficiently. If there was a way to parse a single target within a file, in time proportional to the size of the target, we'd rather create fresh AST nodes than strip them. Alas, no such facility exists and building it is non-trivial. * Currently we don't actually reset all changes, but only those known to affect non-idempotent semantic analysis behavior. TODO: It would be more principled and less fragile to reset everything changed in semantic analysis pass 2 and later. * Reprocessing may recreate AST nodes (such as Var nodes, and TypeInfo nodes created with assignment statements) that will get different identities from the original AST. Thus running an AST merge is necessary after stripping, even though some identities are preserved. """ import contextlib from typing import Union, Iterator, Optional from mypy.nodes import ( Node, FuncDef, NameExpr, MemberExpr, RefExpr, MypyFile, FuncItem, ClassDef, AssignmentStmt, ImportFrom, Import, TypeInfo, SymbolTable, Var, CallExpr, Decorator, OverloadedFuncDef, SuperExpr, UNBOUND_IMPORTED, GDEF, MDEF, IndexExpr ) from mypy.traverser import TraverserVisitor def strip_target(node: Union[MypyFile, FuncItem, OverloadedFuncDef]) -> None: """Reset a fine-grained incremental target to state after semantic analysis pass 1. NOTE: Currently we opportunistically only reset changes that are known to otherwise cause trouble. """ visitor = NodeStripVisitor() if isinstance(node, MypyFile): visitor.strip_file_top_level(node) else: node.accept(visitor) class NodeStripVisitor(TraverserVisitor): def __init__(self) -> None: self.type = None # type: Optional[TypeInfo] self.names = None # type: Optional[SymbolTable] self.is_class_body = False # By default, process function definitions. If False, don't -- this is used for # processing module top levels. self.recurse_into_functions = True def strip_file_top_level(self, file_node: MypyFile) -> None: """Strip a module top-level (don't recursive into functions).""" self.names = file_node.names self.recurse_into_functions = False file_node.accept(self) def visit_class_def(self, node: ClassDef) -> None: """Strip class body and type info, but don't strip methods.""" node.info.type_vars = [] node.info.bases = [] node.info.abstract_attributes = [] node.info.mro = [] node.info.add_type_vars() node.info.tuple_type = None node.info.typeddict_type = None node.info._cache = set() node.info._cache_proper = set() node.base_type_exprs.extend(node.removed_base_type_exprs) node.removed_base_type_exprs = [] with self.enter_class(node.info): super().visit_class_def(node) def visit_func_def(self, node: FuncDef) -> None: if not self.recurse_into_functions: return node.expanded = [] node.type = node.unanalyzed_type with self.enter_method(node.info) if node.info else nothing(): super().visit_func_def(node) def visit_decorator(self, node: Decorator) -> None: node.var.type = None for expr in node.decorators: expr.accept(self) if self.recurse_into_functions: node.func.accept(self) def visit_overloaded_func_def(self, node: OverloadedFuncDef) -> None: if not self.recurse_into_functions: return if node.impl: # Revert change made during semantic analysis pass 2. assert node.items[-1] is not node.impl node.items.append(node.impl) super().visit_overloaded_func_def(node) @contextlib.contextmanager def enter_class(self, info: TypeInfo) -> Iterator[None]: # TODO: Update and restore self.names old_type = self.type old_is_class_body = self.is_class_body self.type = info self.is_class_body = True yield self.type = old_type self.is_class_body = old_is_class_body @contextlib.contextmanager def enter_method(self, info: TypeInfo) -> Iterator[None]: # TODO: Update and restore self.names old_type = self.type old_is_class_body = self.is_class_body self.type = info self.is_class_body = False yield self.type = old_type self.is_class_body = old_is_class_body def visit_assignment_stmt(self, node: AssignmentStmt) -> None: node.type = node.unanalyzed_type if self.type and not self.is_class_body: # TODO: Handle multiple assignment if len(node.lvalues) == 1: lvalue = node.lvalues[0] if isinstance(lvalue, MemberExpr) and lvalue.is_new_def: # Remove defined attribute from the class symbol table. If is_new_def is # true for a MemberExpr, we know that it must be an assignment through # self, since only those can define new attributes. del self.type.names[lvalue.name] super().visit_assignment_stmt(node) def visit_import_from(self, node: ImportFrom) -> None: if node.assignments: node.assignments = [] else: if self.names: # Reset entries in the symbol table. This is necessary since # otherwise the semantic analyzer will think that the import # assigns to an existing name instead of defining a new one. for name, as_name in node.names: imported_name = as_name or name symnode = self.names[imported_name] symnode.kind = UNBOUND_IMPORTED symnode.node = None def visit_import(self, node: Import) -> None: if node.assignments: node.assignments = [] else: if self.names: # Reset entries in the symbol table. This is necessary since # otherwise the semantic analyzer will think that the import # assigns to an existing name instead of defining a new one. for name, as_name in node.ids: imported_name = as_name or name initial = imported_name.split('.')[0] symnode = self.names[initial] symnode.kind = UNBOUND_IMPORTED symnode.node = None def visit_name_expr(self, node: NameExpr) -> None: # Global assignments are processed in semantic analysis pass 1, and we # only want to strip changes made in passes 2 or later. if not (node.kind == GDEF and node.is_new_def): # Remove defined attributes so that they can recreated during semantic analysis. if node.kind == MDEF and node.is_new_def: self.strip_class_attr(node.name) self.strip_ref_expr(node) def visit_member_expr(self, node: MemberExpr) -> None: self.strip_ref_expr(node) # These need to cleared for member expressions but not for other RefExprs since # these can change based on changed in a base class. node.is_new_def = False node.is_inferred_def = False if self.is_duplicate_attribute_def(node): # This is marked as an instance variable definition but a base class # defines an attribute with the same name, and we can't have # multiple definitions for an attribute. Defer to the base class # definition. self.strip_class_attr(node.name) node.def_var = None super().visit_member_expr(node) def visit_index_expr(self, node: IndexExpr) -> None: node.analyzed = None # was a type alias super().visit_index_expr(node) def strip_class_attr(self, name: str) -> None: if self.type is not None: del self.type.names[name] def is_duplicate_attribute_def(self, node: MemberExpr) -> bool: if not node.is_inferred_def: return False assert self.type is not None, "Internal error: Member defined outside class" if node.name not in self.type.names: return False return any(info.get(node.name) is not None for info in self.type.mro[1:]) def strip_ref_expr(self, node: RefExpr) -> None: node.kind = None node.node = None node.fullname = None node.is_new_def = False node.is_inferred_def = False def visit_call_expr(self, node: CallExpr) -> None: node.analyzed = None super().visit_call_expr(node) def visit_super_expr(self, node: SuperExpr) -> None: node.info = None super().visit_super_expr(node) # TODO: handle more node types def is_self_member_ref(memberexpr: MemberExpr) -> bool: """Does memberexpr refer to an attribute of self?""" # TODO: Merge with is_self_member_ref in semanal.py. if not isinstance(memberexpr.expr, NameExpr): return False node = memberexpr.expr.node return isinstance(node, Var) and node.is_self @contextlib.contextmanager def nothing() -> Iterator[None]: yield
1.726563
2
src/main/python/taf/foundation/api/ui/aut.py
WesleyPeng/uiXautomation
6
143
# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from taf.foundation.utils import ConnectionCache class AUT(object): cache = None current = None def __init__( self, name=None, identifier=None, **kwargs ): if not AUT.cache: AUT.cache = ConnectionCache(identifier) self.id = self.cache.register( self._create_instance(name, **kwargs), identifier ) AUT.current = self @staticmethod def launch(app_location, **kwargs): raise NotImplementedError( 'Launch application' ) def activate(self): if self.id != self.cache.current_key: self.cache.current_key = self.id AUT.current = self def take_screenshot(self): self.activate() return self.get_screenshot_data() def close(self): self.cache.close(self.id) if not self.cache.current: AUT.cache = None AUT.current = None def get_screenshot_data(self): raise NotImplementedError( 'Get screenshot data from AUT' ) def _create_instance(self, name, **kwargs): raise NotImplementedError( 'Create instance of AUT' )
1.234375
1
superneurons/tools/img_val/main.py
Phaeton-lang/baselines
0
159
# Created by ay27 at 17/4/9 import os import matplotlib.pyplot as plt import struct import numpy as np def trans(row): return list(map(lambda x: np.uint8(x), row)) def read_image(filename): with open(filename, mode='rb') as file: n = file.read(8) n = struct.unpack("<Q", n)[0] c = file.read(8) c = struct.unpack("<Q", c)[0] h = file.read(8) h = struct.unpack("<Q", h)[0] w = file.read(8) w = struct.unpack("<Q", w)[0] print(n, c, h, w) for ii in range(n): r = trans(file.read(h*w)) g = trans(file.read(h*w)) b = trans(file.read(h*w)) if ii == 100: break print(file.tell() == os.fstat(file.fileno()).st_size) img = np.array([r,g,b]).transpose(1,0).reshape(h,w,c) print(img.shape) plt.imshow(img) plt.show() def read_label(path, ground_truth=None): with open(path, 'rb') as file: n = file.read(8) n = struct.unpack("<Q", n)[0] c = file.read(8) c = struct.unpack("<Q", c)[0] h = file.read(8) h = struct.unpack("<Q", h)[0] w = file.read(8) w = struct.unpack("<Q", w)[0] print(n, c, h, w) label = [] sets = set() while not (file.tell() == os.fstat(file.fileno()).st_size): ch = file.read(4) num = struct.unpack("<l", ch)[0] label.append(num) sets.add(num) # print(file.tell() == os.fstat(file.fileno()).st_size) print(label) print(len(label)) # print(label[900],label[901], label[902], label[903], label[904]) return label # if ground_truth: # g = [] # with open(ground_truth) as file: # for line in file: # g.append(int(line.split(' ')[1])) # np.testing.assert_array_equal(g, label) if __name__ == '__main__': # read_image('../../data/ilsvrc2012/img.bin') # read_label('../../data/ilsvrc2012/label.bin', '../../data/ilsvrc2012/val.txt') # read_image('../../build/cifar100_train_image.bin') # read_label('../../build/cifar100_train_label.bin') read_image('../../build/val_data_8.bin') for i in range(10): read_label('../../build/val_label_%d.bin' % i) # labels = [] # for i in range(10): # labels.append(read_label('../../build/val_label_%d.bin' % i)) # # ground = [] # with open('../../build/shuffled_list') as file: # ground.append()
2.109375
2
napari/_qt/dialogs/qt_plugin_dialog.py
kne42/napari
0
167
import os import sys from pathlib import Path from typing import Sequence from napari_plugin_engine.dist import standard_metadata from napari_plugin_engine.exceptions import PluginError from qtpy.QtCore import QEvent, QProcess, QProcessEnvironment, QSize, Qt, Slot from qtpy.QtGui import QFont, QMovie from qtpy.QtWidgets import ( QCheckBox, QDialog, QFrame, QHBoxLayout, QLabel, QLineEdit, QListWidget, QListWidgetItem, QPushButton, QSizePolicy, QSplitter, QTextEdit, QVBoxLayout, QWidget, ) import napari.resources from ...plugins import plugin_manager from ...plugins.pypi import ( ProjectInfo, iter_napari_plugin_info, normalized_name, ) from ...utils._appdirs import user_plugin_dir, user_site_packages from ...utils.misc import parse_version, running_as_bundled_app from ...utils.translations import trans from ..qthreading import create_worker from ..widgets.qt_eliding_label import ElidingLabel from ..widgets.qt_plugin_sorter import QtPluginSorter from .qt_plugin_report import QtPluginErrReporter # TODO: add error icon and handle pip install errors # TODO: add queue to handle clicks when already processing class Installer: def __init__(self, output_widget: QTextEdit = None): from ...plugins import plugin_manager # create install process self._output_widget = None self.process = QProcess() self.process.setProgram(sys.executable) self.process.setProcessChannelMode(QProcess.MergedChannels) self.process.readyReadStandardOutput.connect(self._on_stdout_ready) # setup process path env = QProcessEnvironment() combined_paths = os.pathsep.join( [user_site_packages(), env.systemEnvironment().value("PYTHONPATH")] ) env.insert("PYTHONPATH", combined_paths) # use path of parent process env.insert( "PATH", QProcessEnvironment.systemEnvironment().value("PATH") ) self.process.setProcessEnvironment(env) self.process.finished.connect(lambda: plugin_manager.discover()) self.process.finished.connect(lambda: plugin_manager.prune()) self.set_output_widget(output_widget) def set_output_widget(self, output_widget: QTextEdit): if output_widget: self._output_widget = output_widget self.process.setParent(output_widget) def _on_stdout_ready(self): if self._output_widget: text = self.process.readAllStandardOutput().data().decode() self._output_widget.append(text) def install(self, pkg_list: Sequence[str]): cmd = ['-m', 'pip', 'install', '--upgrade'] if running_as_bundled_app() and sys.platform.startswith('linux'): cmd += [ '--no-warn-script-location', '--prefix', user_plugin_dir(), ] self.process.setArguments(cmd + list(pkg_list)) if self._output_widget: self._output_widget.clear() self.process.start() def uninstall(self, pkg_list: Sequence[str]): args = ['-m', 'pip', 'uninstall', '-y'] self.process.setArguments(args + list(pkg_list)) if self._output_widget: self._output_widget.clear() self.process.start() for pkg in pkg_list: plugin_manager.unregister(pkg) class PluginListItem(QFrame): def __init__( self, package_name: str, version: str = '', url: str = '', summary: str = '', author: str = '', license: str = "UNKNOWN", *, plugin_name: str = None, parent: QWidget = None, enabled: bool = True, ): super().__init__(parent) self.setup_ui(enabled) if plugin_name: self.plugin_name.setText(plugin_name) self.package_name.setText(f"{package_name} {version}") self.summary.setText(summary) self.package_author.setText(author) self.action_button.setText(trans._("uninstall")) self.action_button.setObjectName("remove_button") self.enabled_checkbox.setChecked(enabled) if PluginError.get(plugin_name=plugin_name): def _show_error(): rep = QtPluginErrReporter( parent=self._get_dialog(), initial_plugin=plugin_name ) rep.setWindowFlags(Qt.Sheet) close = QPushButton(trans._("close"), rep) rep.layout.addWidget(close) rep.plugin_combo.hide() close.clicked.connect(rep.close) rep.open() self.error_indicator.clicked.connect(_show_error) self.error_indicator.show() self.summary.setIndent(18) else: self.summary.setIndent(38) else: self.plugin_name.setText(package_name) self.package_name.setText(version) self.summary.setText(summary) self.package_author.setText(author) self.action_button.setText(trans._("install")) self.enabled_checkbox.hide() def _get_dialog(self) -> QDialog: p = self.parent() while not isinstance(p, QDialog) and p.parent(): p = p.parent() return p def setup_ui(self, enabled=True): self.v_lay = QVBoxLayout(self) self.v_lay.setContentsMargins(-1, 6, -1, 6) self.v_lay.setSpacing(0) self.row1 = QHBoxLayout() self.row1.setSpacing(6) self.enabled_checkbox = QCheckBox(self) self.enabled_checkbox.setChecked(enabled) self.enabled_checkbox.stateChanged.connect(self._on_enabled_checkbox) self.enabled_checkbox.setToolTip(trans._("enable/disable")) sizePolicy = QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.enabled_checkbox.sizePolicy().hasHeightForWidth() ) self.enabled_checkbox.setSizePolicy(sizePolicy) self.enabled_checkbox.setMinimumSize(QSize(20, 0)) self.enabled_checkbox.setText("") self.row1.addWidget(self.enabled_checkbox) self.plugin_name = QLabel(self) sizePolicy = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Minimum) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.plugin_name.sizePolicy().hasHeightForWidth() ) self.plugin_name.setSizePolicy(sizePolicy) font15 = QFont() font15.setPointSize(15) self.plugin_name.setFont(font15) self.row1.addWidget(self.plugin_name) self.package_name = QLabel(self) self.package_name.setAlignment( Qt.AlignRight | Qt.AlignTrailing | Qt.AlignVCenter ) self.row1.addWidget(self.package_name) self.action_button = QPushButton(self) sizePolicy = QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.action_button.sizePolicy().hasHeightForWidth() ) self.action_button.setSizePolicy(sizePolicy) self.row1.addWidget(self.action_button) self.v_lay.addLayout(self.row1) self.row2 = QHBoxLayout() self.error_indicator = QPushButton() self.error_indicator.setObjectName("warning_icon") self.error_indicator.setCursor(Qt.PointingHandCursor) self.error_indicator.hide() self.row2.addWidget(self.error_indicator) self.row2.setContentsMargins(-1, 4, 0, -1) self.summary = ElidingLabel(parent=self) sizePolicy = QSizePolicy( QSizePolicy.MinimumExpanding, QSizePolicy.Preferred ) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.summary.sizePolicy().hasHeightForWidth() ) self.summary.setSizePolicy(sizePolicy) self.summary.setObjectName("small_text") self.row2.addWidget(self.summary) self.package_author = QLabel(self) sizePolicy = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.package_author.sizePolicy().hasHeightForWidth() ) self.package_author.setSizePolicy(sizePolicy) self.package_author.setObjectName("small_text") self.row2.addWidget(self.package_author) self.v_lay.addLayout(self.row2) def _on_enabled_checkbox(self, state: int): """Called with `state` when checkbox is clicked.""" plugin_manager.set_blocked(self.plugin_name.text(), not state) class QPluginList(QListWidget): def __init__(self, parent: QWidget, installer: Installer): super().__init__(parent) self.installer = installer self.setSortingEnabled(True) @Slot(ProjectInfo) def addItem( self, project_info: ProjectInfo, plugin_name=None, enabled=True ): # don't add duplicates if ( self.findItems(project_info.name, Qt.MatchFixedString) and not plugin_name ): return # including summary here for sake of filtering below. searchable_text = project_info.name + " " + project_info.summary item = QListWidgetItem(searchable_text, parent=self) item.version = project_info.version super().addItem(item) widg = PluginListItem( *project_info, parent=self, plugin_name=plugin_name, enabled=enabled, ) method = getattr( self.installer, 'uninstall' if plugin_name else 'install' ) widg.action_button.clicked.connect(lambda: method([project_info.name])) item.setSizeHint(widg.sizeHint()) self.setItemWidget(item, widg) @Slot(ProjectInfo) def tag_outdated(self, project_info: ProjectInfo): for item in self.findItems(project_info.name, Qt.MatchFixedString): current = item.version latest = project_info.version if parse_version(current) >= parse_version(latest): continue if hasattr(item, 'outdated'): # already tagged it continue item.outdated = True widg = self.itemWidget(item) update_btn = QPushButton( trans._("update (v{latest})", latest=latest), widg ) update_btn.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed) update_btn.clicked.connect( lambda: self.installer.install([item.text()]) ) widg.row1.insertWidget(3, update_btn) def filter(self, text: str): """Filter items to those containing `text`.""" shown = self.findItems(text, Qt.MatchContains) for i in range(self.count()): item = self.item(i) item.setHidden(item not in shown) class QtPluginDialog(QDialog): def __init__(self, parent=None): super().__init__(parent) self.installer = Installer() self.setup_ui() self.installer.set_output_widget(self.stdout_text) self.installer.process.started.connect(self._on_installer_start) self.installer.process.finished.connect(self._on_installer_done) self.refresh() def _on_installer_start(self): self.show_status_btn.setChecked(True) self.working_indicator.show() self.process_error_indicator.hide() def _on_installer_done(self, exit_code, exit_status): self.working_indicator.hide() if exit_code: self.process_error_indicator.show() else: self.show_status_btn.setChecked(False) self.refresh() self.plugin_sorter.refresh() def refresh(self): self.installed_list.clear() self.available_list.clear() # fetch installed from ...plugins import plugin_manager plugin_manager.discover() # since they might not be loaded yet already_installed = set() for plugin_name, mod_name, distname in plugin_manager.iter_available(): # not showing these in the plugin dialog if plugin_name in ('napari_plugin_engine',): continue if distname: already_installed.add(distname) meta = standard_metadata(distname) else: meta = {} self.installed_list.addItem( ProjectInfo( normalized_name(distname or ''), meta.get('version', ''), meta.get('url', ''), meta.get('summary', ''), meta.get('author', ''), meta.get('license', ''), ), plugin_name=plugin_name, enabled=plugin_name in plugin_manager.plugins, ) # self.v_splitter.setSizes([70 * self.installed_list.count(), 10, 10]) # fetch available plugins self.worker = create_worker(iter_napari_plugin_info) def _handle_yield(project_info): if project_info.name in already_installed: self.installed_list.tag_outdated(project_info) else: self.available_list.addItem(project_info) self.worker.yielded.connect(_handle_yield) self.worker.finished.connect(self.working_indicator.hide) self.worker.finished.connect(self._update_count_in_label) self.worker.start() def setup_ui(self): self.resize(1080, 640) vlay_1 = QVBoxLayout(self) self.h_splitter = QSplitter(self) vlay_1.addWidget(self.h_splitter) self.h_splitter.setOrientation(Qt.Horizontal) self.v_splitter = QSplitter(self.h_splitter) self.v_splitter.setOrientation(Qt.Vertical) self.v_splitter.setMinimumWidth(500) self.plugin_sorter = QtPluginSorter(parent=self.h_splitter) self.plugin_sorter.layout().setContentsMargins(2, 0, 0, 0) self.plugin_sorter.hide() installed = QWidget(self.v_splitter) lay = QVBoxLayout(installed) lay.setContentsMargins(0, 2, 0, 2) self.installed_label = QLabel(trans._("Installed Plugins")) self.installed_filter = QLineEdit() self.installed_filter.setPlaceholderText("search...") self.installed_filter.setMaximumWidth(350) self.installed_filter.setClearButtonEnabled(True) mid_layout = QHBoxLayout() mid_layout.addWidget(self.installed_label) mid_layout.addWidget(self.installed_filter) mid_layout.addStretch() lay.addLayout(mid_layout) self.installed_list = QPluginList(installed, self.installer) self.installed_filter.textChanged.connect(self.installed_list.filter) lay.addWidget(self.installed_list) uninstalled = QWidget(self.v_splitter) lay = QVBoxLayout(uninstalled) lay.setContentsMargins(0, 2, 0, 2) self.avail_label = QLabel(trans._("Available Plugins")) self.avail_filter = QLineEdit() self.avail_filter.setPlaceholderText("search...") self.avail_filter.setMaximumWidth(350) self.avail_filter.setClearButtonEnabled(True) mid_layout = QHBoxLayout() mid_layout.addWidget(self.avail_label) mid_layout.addWidget(self.avail_filter) mid_layout.addStretch() lay.addLayout(mid_layout) self.available_list = QPluginList(uninstalled, self.installer) self.avail_filter.textChanged.connect(self.available_list.filter) lay.addWidget(self.available_list) self.stdout_text = QTextEdit(self.v_splitter) self.stdout_text.setReadOnly(True) self.stdout_text.setObjectName("pip_install_status") self.stdout_text.hide() buttonBox = QHBoxLayout() self.working_indicator = QLabel(trans._("loading ..."), self) sp = self.working_indicator.sizePolicy() sp.setRetainSizeWhenHidden(True) self.working_indicator.setSizePolicy(sp) self.process_error_indicator = QLabel(self) self.process_error_indicator.setObjectName("error_label") self.process_error_indicator.hide() load_gif = str(Path(napari.resources.__file__).parent / "loading.gif") mov = QMovie(load_gif) mov.setScaledSize(QSize(18, 18)) self.working_indicator.setMovie(mov) mov.start() self.direct_entry_edit = QLineEdit(self) self.direct_entry_edit.installEventFilter(self) self.direct_entry_edit.setPlaceholderText( trans._('install by name/url, or drop file...') ) self.direct_entry_btn = QPushButton(trans._("Install"), self) self.direct_entry_btn.clicked.connect(self._install_packages) self.show_status_btn = QPushButton(trans._("Show Status"), self) self.show_status_btn.setFixedWidth(100) self.show_sorter_btn = QPushButton(trans._("<< Show Sorter"), self) self.close_btn = QPushButton(trans._("Close"), self) self.close_btn.clicked.connect(self.accept) buttonBox.addWidget(self.show_status_btn) buttonBox.addWidget(self.working_indicator) buttonBox.addWidget(self.direct_entry_edit) buttonBox.addWidget(self.direct_entry_btn) buttonBox.addWidget(self.process_error_indicator) buttonBox.addSpacing(60) buttonBox.addWidget(self.show_sorter_btn) buttonBox.addWidget(self.close_btn) buttonBox.setContentsMargins(0, 0, 4, 0) vlay_1.addLayout(buttonBox) self.show_status_btn.setCheckable(True) self.show_status_btn.setChecked(False) self.show_status_btn.toggled.connect(self._toggle_status) self.show_sorter_btn.setCheckable(True) self.show_sorter_btn.setChecked(False) self.show_sorter_btn.toggled.connect(self._toggle_sorter) self.v_splitter.setStretchFactor(1, 2) self.h_splitter.setStretchFactor(0, 2) self.avail_filter.setFocus() def _update_count_in_label(self): count = self.available_list.count() self.avail_label.setText( trans._("Available Plugins ({count})", count=count) ) def eventFilter(self, watched, event): if event.type() == QEvent.DragEnter: # we need to accept this event explicitly to be able # to receive QDropEvents! event.accept() if event.type() == QEvent.Drop: md = event.mimeData() if md.hasUrls(): files = [url.toLocalFile() for url in md.urls()] self.direct_entry_edit.setText(files[0]) return True return super().eventFilter(watched, event) def _toggle_sorter(self, show): if show: self.show_sorter_btn.setText(trans._(">> Hide Sorter")) self.plugin_sorter.show() else: self.show_sorter_btn.setText(trans._("<< Show Sorter")) self.plugin_sorter.hide() def _toggle_status(self, show): if show: self.show_status_btn.setText(trans._("Hide Status")) self.stdout_text.show() else: self.show_status_btn.setText(trans._("Show Status")) self.stdout_text.hide() def _install_packages(self, packages: Sequence[str] = ()): if not packages: _packages = self.direct_entry_edit.text() if os.path.exists(_packages): packages = [_packages] else: packages = _packages.split() self.direct_entry_edit.clear() if packages: self.installer.install(packages) if __name__ == "__main__": from qtpy.QtWidgets import QApplication app = QApplication([]) w = QtPluginDialog() w.show() app.exec_()
1.578125
2
main.py
vkumarma/Complete-Interpreter
0
191
import re import sys class Lexer: def __init__(self, inp_str): self.index = 0 self.s = inp_str def get_char(self): if self.index < len(self.s): var = self.s[self.index] self.index += 1 return var input_file = open(str(sys.argv[1]), 'r') # Open file for reading line = input_file.read() # "if z then while x * 4 - 2 do skip endwhile else x := 7 endif; y := 1" input_string = line.strip("\n") lexer = Lexer(input_string) hashtable = {} tokens_list = [] def token_check(input): if re.fullmatch("if|then|else|endif|while|do|endwhile|skip", input): hashtable[input] = "KEYWORD" tokens_list.append(input) elif re.search("([a-z]|[A-Z])([a-z]|[A-Z]|[0-9])*", input): hashtable[input] = "IDENTIFIER" tokens_list.append(input) elif re.search("[0-9]+", input): hashtable[input] = "NUMBER" tokens_list.append(input) elif re.fullmatch("\+|\-|\*|/|\(|\)|:=|;", input): hashtable[input] = "SYMBOL" tokens_list.append(input) else: hashtable[input] = "ERROR READING" def digit(curr_char, lexer): sub = "" while (curr_char.isdigit()): sub += curr_char curr_char = lexer.get_char() if curr_char == None: break new.append(curr_char) return sub def longest_sub_string(curr_char, lexer): sub = "" while (curr_char.isalpha() or curr_char.isdigit()): sub += curr_char curr_char = lexer.get_char() if curr_char == None: break new.append(curr_char) return sub def symbol(curr_char, lexer): # print(curr_char) sym = curr_char curr_char = lexer.get_char() new.append(curr_char) return sym def assignment(curr_char, lexer): sub = curr_char next_char = lexer.get_char() if next_char == "=": sub += next_char new.append(next_char) return sub new.append(lexer.get_char()) return sub new = [] # keeping track of current char. curr_char = lexer.get_char() while (curr_char != None): while (curr_char == ' ' or curr_char == ''): curr_char = lexer.get_char() if (curr_char.isdigit()): token_check(digit(curr_char, lexer)) curr_char = new.pop() elif (curr_char.isalpha()): token_check(longest_sub_string(curr_char, lexer)) curr_char = new.pop() elif curr_char in "+-/*();": token_check(symbol(curr_char, lexer)) curr_char = new.pop() elif curr_char == ":": token_check(assignment(curr_char, lexer)) curr_char = new.pop() if curr_char == "=": curr_char = lexer.get_char() else: token_check(curr_char) curr_char = lexer.get_char() def tokens(): return hashtable # print(tokens_list) # print(tokens())
2.71875
3
aws-regions.py
groorj/cloud-regions
0
199
import json import logging import os import inspect import urllib import urllib.request from urllib.error import HTTPError # logger logger = logging.getLogger() logger_level = logging.getLevelName(os.environ['LOGGER_LEVEL']) logger.setLevel(logger_level) # validate access def validate_access(event, context): logger.debug("Inside function: [%s]", inspect.currentframe().f_code.co_name) logger.debug("RESTRICTED_ACCESS_ENABLED: [%s]", os.environ['RESTRICTED_ACCESS_ENABLED']) error_message = "You are not allowed, get out!" if os.environ['RESTRICTED_ACCESS_ENABLED'] == 'true': logger.info("Restricted access is enabled") logger.info("Value for header [%s] is: [%s]", os.environ['RESTRICTED_ACCESS_HTTP_HEADER'], event["headers"][os.environ['RESTRICTED_ACCESS_HTTP_HEADER']]) if event["headers"][os.environ['RESTRICTED_ACCESS_HTTP_HEADER']] != os.environ['RESTRICTED_ACCESS_SECRET']: logger.info("Key provided is not valid") logger.debug("Error: [%s]", error_message) http_code = 403 raise ValueError(http_code, error_message) else: logger.info("Key provided is valid") else: logger.info("Restricted access is NOT enabled") # create response def create_response_new(status_code, message_body): logger.debug("Inside function: [%s]", inspect.currentframe().f_code.co_name) return { 'statusCode': str(status_code), 'body': json.dumps(message_body), 'headers': { 'Content-Type': 'application/json', 'Access-Control-Allow-Origin': '*' }, } # download json file def get_json(): logger.debug("Inside function: [%s]", inspect.currentframe().f_code.co_name) try: response = urllib.request.urlopen(os.environ['AWS_REGIONS_JSON_URL']) except HTTPError as err: # catch HTTP error logger.debug("HTTP error: [%s]", err) raise json_data = json.loads(response.read()) return json_data # entry point -> return region info def get_region_info(event, context): logger.debug("Inside function: [%s]", inspect.currentframe().f_code.co_name) return_info_final = {} # validate the access to this resource try: validate_access(event, context) except ValueError as err: return_info_final['request'] = { "request_status": "Fail", "error_message": err.args[1], "http_error_code": err.args[0] } return create_response_new(err.args[0], return_info_final) # get region info region_code = event['pathParameters']['region_code'] logger.debug("region_code: [%s]", region_code) try: json_data = get_json() except HTTPError as err: # http_code = err.code http_code = 500 return_info_final['request'] = { "request_status": "Fail", "error_message": "Error getting Regions information.", "http_error_code": err.code } return create_response_new(http_code, return_info_final) # logger.debug("json_data: [%s]", json_data) # logger.debug("type(json_data): [%s]", type(json_data)) for element in json_data['data']: # logger.debug("code: [%s] && region_code: [%s]", element['code'], region_code) if element['code'] == region_code: logger.info("region_code found") http_code = 200 return_info_final['request'] = { "request_status": "Success" } return_info_final['info'] = json_data['info'] return_info_final['data'] = element break else: logger.info("region_code NOT found") return_info = "Region code NOT found." http_code = 404 return_info_final['request'] = { "request_status": "Fail", "error_message": "Region code NOT found.", "http_error_code": http_code } return create_response_new(http_code, return_info_final) # entry point -> return region info def get_all_regions_info(event, context): logger.debug("Inside function: [%s]", inspect.currentframe().f_code.co_name) return_info_final = {} # validate the access to this resource try: validate_access(event, context) except ValueError as err: return_info_final['request'] = { "request_status": "Fail", "error_message": err.args[1], "http_error_code": err.args[0] } return create_response_new(err.args[0], return_info_final) # get regions info try: json_data = get_json() except HTTPError as err: # http_code = err.code http_code = 500 return_info_final['request'] = { "request_status": "Fail", "error_message": "Error getting Regions information.", "http_error_code": err.code } return create_response_new(http_code, return_info_final) logger.debug("json_data: [%s]", json_data) http_code = 200 return_info_final['request'] = { "request_status": "Success" } return_info_final['info'] = json_data['info'] return_info_final['data'] = json_data['data'] return create_response_new(http_code, return_info_final) # End;
1.796875
2
apps/core/migrations/0001_initial.py
Visualway/Vitary
4
263
# Generated by Django 4.0.2 on 2022-03-02 03:29 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('vit', '0001_initial'), ] operations = [ migrations.CreateModel( name='Badge', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=50)), ('description', models.TextField()), ('color', models.CharField(choices=[('success', 'Green'), ('info', 'Blue'), ('link', 'Purple'), ('primary', 'Turquoise'), ('warning', 'Yellow'), ('danger', 'Red'), ('dark', 'Black'), ('white', 'White')], max_length=50)), ('special', models.BooleanField(default=False)), ], options={ 'ordering': ['name'], }, ), migrations.CreateModel( name='Requirments', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=50)), ('description', models.TextField()), ('badge', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.badge')), ], options={ 'ordering': ['name'], }, ), migrations.CreateModel( name='Abuse', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('abuse_type', models.CharField(choices=[('ABUSE', 'Abuse'), ('INAPPROPRIATE', 'Inappropriate'), ('SPAM', 'Spam'), ('BULLYING', 'Bullying'), ('SEXUAL_CONTENT', 'Sexual Content'), ('OTHER', 'Other')], max_length=50)), ('description', models.TextField()), ('date', models.DateTimeField(auto_now_add=True)), ('to_vit', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='vit.vit')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], options={ 'verbose_name_plural': 'Abuses', 'ordering': ['-date'], }, ), ]
0.964844
1
rpython/annotator/annrpython.py
microvm/pypy-mu
0
295
from __future__ import absolute_import import types from collections import defaultdict from rpython.tool.ansi_print import AnsiLogger from rpython.tool.pairtype import pair from rpython.tool.error import (format_blocked_annotation_error, gather_error, source_lines) from rpython.flowspace.model import Variable, Constant, checkgraph from rpython.translator import simplify, transform from rpython.annotator import model as annmodel, signature from rpython.annotator.model import ( typeof, s_ImpossibleValue, SomeInstance, intersection, difference) from rpython.annotator.bookkeeper import Bookkeeper from rpython.rtyper.normalizecalls import perform_normalizations log = AnsiLogger("annrpython") class RPythonAnnotator(object): """Block annotator for RPython. See description in doc/translation.txt.""" def __init__(self, translator=None, policy=None, bookkeeper=None): import rpython.rtyper.extfuncregistry # has side effects if translator is None: # interface for tests from rpython.translator.translator import TranslationContext translator = TranslationContext() translator.annotator = self self.translator = translator self.pendingblocks = {} # map {block: graph-containing-it} self.annotated = {} # set of blocks already seen self.added_blocks = None # see processblock() below self.links_followed = {} # set of links that have ever been followed self.notify = {} # {block: {positions-to-reflow-from-when-done}} self.fixed_graphs = {} # set of graphs not to annotate again self.blocked_blocks = {} # set of {blocked_block: (graph, index)} # --- the following information is recorded for debugging --- self.blocked_graphs = {} # set of graphs that have blocked blocks # --- end of debugging information --- self.frozen = False if policy is None: from rpython.annotator.policy import AnnotatorPolicy self.policy = AnnotatorPolicy() else: self.policy = policy if bookkeeper is None: bookkeeper = Bookkeeper(self) self.bookkeeper = bookkeeper def __getstate__(self): attrs = """translator pendingblocks annotated links_followed notify bookkeeper frozen policy added_blocks""".split() ret = self.__dict__.copy() for key, value in ret.items(): if key not in attrs: assert type(value) is dict, ( "%r is not dict. please update %s.__getstate__" % (key, self.__class__.__name__)) ret[key] = {} return ret #___ convenience high-level interface __________________ def build_types(self, function, input_arg_types, complete_now=True, main_entry_point=False): """Recursively build annotations about the specific entry point.""" assert isinstance(function, types.FunctionType), "fix that!" from rpython.annotator.policy import AnnotatorPolicy policy = AnnotatorPolicy() # make input arguments and set their type args_s = [self.typeannotation(t) for t in input_arg_types] # XXX hack annmodel.TLS.check_str_without_nul = ( self.translator.config.translation.check_str_without_nul) flowgraph, inputs_s = self.get_call_parameters(function, args_s, policy) if main_entry_point: self.translator.entry_point_graph = flowgraph return self.build_graph_types(flowgraph, inputs_s, complete_now=complete_now) def get_call_parameters(self, function, args_s, policy): desc = self.bookkeeper.getdesc(function) prevpolicy = self.policy self.policy = policy self.bookkeeper.enter(None) try: return desc.get_call_parameters(args_s) finally: self.bookkeeper.leave() self.policy = prevpolicy def annotate_helper(self, function, args_s, policy=None): if policy is None: from rpython.annotator.policy import AnnotatorPolicy policy = AnnotatorPolicy() # XXX hack annmodel.TLS.check_str_without_nul = ( self.translator.config.translation.check_str_without_nul) graph, inputcells = self.get_call_parameters(function, args_s, policy) self.build_graph_types(graph, inputcells, complete_now=False) self.complete_helpers(policy) return graph def complete_helpers(self, policy): saved = self.policy, self.added_blocks self.policy = policy try: self.added_blocks = {} self.complete() # invoke annotation simplifications for the new blocks self.simplify(block_subset=self.added_blocks) finally: self.policy, self.added_blocks = saved def build_graph_types(self, flowgraph, inputcells, complete_now=True): checkgraph(flowgraph) nbarg = len(flowgraph.getargs()) assert len(inputcells) == nbarg # wrong number of args # register the entry point self.addpendinggraph(flowgraph, inputcells) # recursively proceed until no more pending block is left if complete_now: self.complete() return self.annotation(flowgraph.getreturnvar()) def gettype(self, variable): """Return the known type of a control flow graph variable, defaulting to 'object'.""" if isinstance(variable, Constant): return type(variable.value) elif isinstance(variable, Variable): s_variable = variable.annotation if s_variable: return s_variable.knowntype else: return object else: raise TypeError("Variable or Constant instance expected, " "got %r" % (variable,)) def getuserclassdefinitions(self): """Return a list of ClassDefs.""" return self.bookkeeper.classdefs #___ medium-level interface ____________________________ def addpendinggraph(self, flowgraph, inputcells): self.addpendingblock(flowgraph, flowgraph.startblock, inputcells) def addpendingblock(self, graph, block, cells): """Register an entry point into block with the given input cells.""" if graph in self.fixed_graphs: # special case for annotating/rtyping in several phases: calling # a graph that has already been rtyped. Safety-check the new # annotations that are passed in, and don't annotate the old # graph -- it's already low-level operations! for a, s_newarg in zip(block.inputargs, cells): s_oldarg = self.binding(a) assert annmodel.unionof(s_oldarg, s_newarg) == s_oldarg else: assert not self.frozen if block not in self.annotated: self.bindinputargs(graph, block, cells) else: self.mergeinputargs(graph, block, cells) if not self.annotated[block]: self.pendingblocks[block] = graph def complete_pending_blocks(self): while self.pendingblocks: block, graph = self.pendingblocks.popitem() self.processblock(graph, block) def complete(self): """Process pending blocks until none is left.""" while True: self.complete_pending_blocks() self.policy.no_more_blocks_to_annotate(self) if not self.pendingblocks: break # finished # make sure that the return variables of all graphs is annotated if self.added_blocks is not None: newgraphs = [self.annotated[block] for block in self.added_blocks] newgraphs = dict.fromkeys(newgraphs) got_blocked_blocks = False in newgraphs else: newgraphs = self.translator.graphs #all of them got_blocked_blocks = False in self.annotated.values() if got_blocked_blocks: for graph in self.blocked_graphs.values(): self.blocked_graphs[graph] = True blocked_blocks = [block for block, done in self.annotated.items() if done is False] assert len(blocked_blocks) == len(self.blocked_blocks) text = format_blocked_annotation_error(self, self.blocked_blocks) #raise SystemExit() raise annmodel.AnnotatorError(text) for graph in newgraphs: v = graph.getreturnvar() if v.annotation is None: self.setbinding(v, s_ImpossibleValue) def validate(self): """Check that the annotation results are valid""" self.bookkeeper.check_no_flags_on_instances() def annotation(self, arg): "Gives the SomeValue corresponding to the given Variable or Constant." if isinstance(arg, Variable): return arg.annotation elif isinstance(arg, Constant): return self.bookkeeper.immutablevalue(arg.value) else: raise TypeError('Variable or Constant expected, got %r' % (arg,)) def binding(self, arg): "Gives the SomeValue corresponding to the given Variable or Constant." s_arg = self.annotation(arg) if s_arg is None: raise KeyError return s_arg def typeannotation(self, t): return signature.annotation(t, self.bookkeeper) def setbinding(self, arg, s_value): s_old = arg.annotation if s_old is not None: if not s_value.contains(s_old): log.WARNING("%s does not contain %s" % (s_value, s_old)) log.WARNING("%s" % annmodel.unionof(s_value, s_old)) assert False arg.annotation = s_value def warning(self, msg, pos=None): if pos is None: try: pos = self.bookkeeper.position_key except AttributeError: pos = '?' if pos != '?': pos = self.whereami(pos) log.WARNING("%s/ %s" % (pos, msg)) #___ interface for annotator.bookkeeper _______ def recursivecall(self, graph, whence, inputcells): if isinstance(whence, tuple): parent_graph, parent_block, parent_index = whence tag = parent_block, parent_index self.translator.update_call_graph(parent_graph, graph, tag) # self.notify[graph.returnblock] is a dictionary of call # points to this func which triggers a reflow whenever the # return block of this graph has been analysed. callpositions = self.notify.setdefault(graph.returnblock, {}) if whence is not None: if callable(whence): def callback(): whence(self, graph) else: callback = whence callpositions[callback] = True # generalize the function's input arguments self.addpendingblock(graph, graph.startblock, inputcells) # get the (current) return value v = graph.getreturnvar() try: return self.binding(v) except KeyError: # the function didn't reach any return statement so far. # (some functions actually never do, they always raise exceptions) return s_ImpossibleValue def reflowfromposition(self, position_key): graph, block, index = position_key self.reflowpendingblock(graph, block) def call_sites(self): newblocks = self.added_blocks if newblocks is None: newblocks = self.annotated # all of them for block in newblocks: for op in block.operations: if op.opname in ('simple_call', 'call_args'): yield op # some blocks are partially annotated if op.result.annotation is None: break # ignore the unannotated part #___ simplification (should be moved elsewhere?) _______ def simplify(self, block_subset=None, extra_passes=None): # Generic simplifications transform.transform_graph(self, block_subset=block_subset, extra_passes=extra_passes) if block_subset is None: graphs = self.translator.graphs else: graphs = {} for block in block_subset: graph = self.annotated.get(block) if graph: graphs[graph] = True for graph in graphs: simplify.eliminate_empty_blocks(graph) self.bookkeeper.compute_at_fixpoint() if block_subset is None: perform_normalizations(self) #___ flowing annotations in blocks _____________________ def processblock(self, graph, block): # Important: this is not called recursively. # self.flowin() can only issue calls to self.addpendingblock(). # The analysis of a block can be in three states: # * block not in self.annotated: # never seen the block. # * self.annotated[block] == False: # the input variables of the block have bindings but we # still have to consider all the operations in the block. # * self.annotated[block] == graph-containing-block: # analysis done (at least until we find we must generalize the # input variables). #print '* processblock', block, cells self.annotated[block] = graph if block in self.blocked_blocks: del self.blocked_blocks[block] try: self.flowin(graph, block) except BlockedInference as e: self.annotated[block] = False # failed, hopefully temporarily self.blocked_blocks[block] = (graph, e.opindex) except Exception as e: # hack for debug tools only if not hasattr(e, '__annotator_block'): setattr(e, '__annotator_block', block) raise # The dict 'added_blocks' is used by rpython.annlowlevel to # detect which are the new blocks that annotating an additional # small helper creates. if self.added_blocks is not None: self.added_blocks[block] = True def reflowpendingblock(self, graph, block): assert not self.frozen assert graph not in self.fixed_graphs self.pendingblocks[block] = graph assert block in self.annotated self.annotated[block] = False # must re-flow self.blocked_blocks[block] = (graph, None) def bindinputargs(self, graph, block, inputcells): # Create the initial bindings for the input args of a block. assert len(block.inputargs) == len(inputcells) for a, cell in zip(block.inputargs, inputcells): self.setbinding(a, cell) self.annotated[block] = False # must flowin. self.blocked_blocks[block] = (graph, None) def mergeinputargs(self, graph, block, inputcells): # Merge the new 'cells' with each of the block's existing input # variables. oldcells = [self.binding(a) for a in block.inputargs] try: unions = [annmodel.unionof(c1,c2) for c1, c2 in zip(oldcells,inputcells)] except annmodel.UnionError as e: # Add source code to the UnionError e.source = '\n'.join(source_lines(graph, block, None, long=True)) raise # if the merged cells changed, we must redo the analysis if unions != oldcells: self.bindinputargs(graph, block, unions) def apply_renaming(self, s_out, renaming): if hasattr(s_out, 'is_type_of'): renamed_is_type_of = [] for v in s_out.is_type_of: renamed_is_type_of += renaming[v] assert s_out.knowntype is type newcell = typeof(renamed_is_type_of) if s_out.is_constant(): newcell.const = s_out.const s_out = newcell if hasattr(s_out, 'knowntypedata'): renamed_knowntypedata = {} for value, constraints in s_out.knowntypedata.items(): renamed_knowntypedata[value] = {} for v, s in constraints.items(): new_vs = renaming.get(v, []) for new_v in new_vs: renamed_knowntypedata[value][new_v] = s assert isinstance(s_out, annmodel.SomeBool) newcell = annmodel.SomeBool() if s_out.is_constant(): newcell.const = s_out.const s_out = newcell s_out.set_knowntypedata(renamed_knowntypedata) return s_out def whereami(self, position_key): graph, block, i = position_key blk = "" if block: at = block.at() if at: blk = " block"+at opid="" if i is not None: opid = " op=%d" % i return repr(graph) + blk + opid def flowin(self, graph, block): try: i = 0 while i < len(block.operations): op = block.operations[i] with self.bookkeeper.at_position((graph, block, i)): new_ops = op.transform(self) if new_ops is not None: block.operations[i:i+1] = new_ops if not new_ops: continue new_ops[-1].result = op.result op = new_ops[0] self.consider_op(op) i += 1 except BlockedInference as e: if e.op is block.raising_op: # this is the case where the last operation of the block will # always raise an exception which is immediately caught by # an exception handler. We then only follow the exceptional # branches. exits = [link for link in block.exits if link.exitcase is not None] elif e.op.opname in ('simple_call', 'call_args', 'next'): # XXX warning, keep the name of the call operations in sync # with the flow object space. These are the operations for # which it is fine to always raise an exception. We then # swallow the BlockedInference and that's it. # About 'next': see test_annotate_iter_empty_container(). return else: # other cases are problematic (but will hopefully be solved # later by reflowing). Throw the BlockedInference up to # processblock(). e.opindex = i raise except annmodel.HarmlesslyBlocked: return except annmodel.AnnotatorError as e: # note that UnionError is a subclass e.source = gather_error(self, graph, block, i) raise else: # dead code removal: don't follow all exits if the exitswitch # is known exits = block.exits if isinstance(block.exitswitch, Variable): s_exitswitch = self.binding(block.exitswitch) if s_exitswitch.is_constant(): exits = [link for link in exits if link.exitcase == s_exitswitch.const] if block.canraise: op = block.raising_op s_exception = self.get_exception(op) for link in exits: case = link.exitcase if case is None: self.follow_link(graph, link, {}) continue if s_exception == s_ImpossibleValue: break s_case = SomeInstance(self.bookkeeper.getuniqueclassdef(case)) s_matching_exc = intersection(s_exception, s_case) if s_matching_exc != s_ImpossibleValue: self.follow_raise_link(graph, link, s_matching_exc) s_exception = difference(s_exception, s_case) else: if isinstance(block.exitswitch, Variable): knowntypedata = getattr( block.exitswitch.annotation, "knowntypedata", {}) else: knowntypedata = {} for link in exits: constraints = knowntypedata.get(link.exitcase, {}) self.follow_link(graph, link, constraints) if block in self.notify: # reflow from certain positions when this block is done for callback in self.notify[block]: if isinstance(callback, tuple): self.reflowfromposition(callback) # callback is a position else: callback() def follow_link(self, graph, link, constraints): assert not (isinstance(link.exitcase, (types.ClassType, type)) and issubclass(link.exitcase, BaseException)) ignore_link = False inputs_s = [] renaming = defaultdict(list) for v_out, v_input in zip(link.args, link.target.inputargs): renaming[v_out].append(v_input) for v_out in link.args: s_out = self.annotation(v_out) if v_out in constraints: s_constraint = constraints[v_out] s_out = pair(s_out, s_constraint).improve() # ignore links that try to pass impossible values if s_out == s_ImpossibleValue: ignore_link = True s_out = self.apply_renaming(s_out, renaming) inputs_s.append(s_out) if ignore_link: return self.links_followed[link] = True self.addpendingblock(graph, link.target, inputs_s) def follow_raise_link(self, graph, link, s_last_exc_value): v_last_exc_type = link.last_exception v_last_exc_value = link.last_exc_value assert (isinstance(link.exitcase, (types.ClassType, type)) and issubclass(link.exitcase, BaseException)) assert v_last_exc_type and v_last_exc_value if isinstance(v_last_exc_value, Variable): self.setbinding(v_last_exc_value, s_last_exc_value) if isinstance(v_last_exc_type, Variable): self.setbinding(v_last_exc_type, typeof([v_last_exc_value])) inputs_s = [] renaming = defaultdict(list) for v_out, v_input in zip(link.args, link.target.inputargs): renaming[v_out].append(v_input) for v_out, v_input in zip(link.args, link.target.inputargs): if v_out == v_last_exc_type: s_out = typeof(renaming[v_last_exc_value]) if isinstance(v_last_exc_type, Constant): s_out.const = v_last_exc_type.value elif v_last_exc_type.annotation.is_constant(): s_out.const = v_last_exc_type.annotation.const inputs_s.append(s_out) else: s_out = self.annotation(v_out) s_out = self.apply_renaming(s_out, renaming) inputs_s.append(s_out) self.links_followed[link] = True self.addpendingblock(graph, link.target, inputs_s) #___ creating the annotations based on operations ______ def consider_op(self, op): # let's be careful about avoiding propagated SomeImpossibleValues # to enter an op; the latter can result in violations of the # more general results invariant: e.g. if SomeImpossibleValue enters is_ # is_(SomeImpossibleValue, None) -> SomeBool # is_(SomeInstance(not None), None) -> SomeBool(const=False) ... # boom -- in the assert of setbinding() for arg in op.args: if isinstance(self.annotation(arg), annmodel.SomeImpossibleValue): raise BlockedInference(self, op, -1) resultcell = op.consider(self) if resultcell is None: resultcell = s_ImpossibleValue elif resultcell == s_ImpossibleValue: raise BlockedInference(self, op, -1) # the operation cannot succeed assert isinstance(resultcell, annmodel.SomeObject) assert isinstance(op.result, Variable) self.setbinding(op.result, resultcell) # bind resultcell to op.result def get_exception(self, operation): """ Return the annotation for all exceptions that `operation` may raise. """ can_only_throw = operation.get_can_only_throw(self) if can_only_throw is None: return SomeInstance(self.bookkeeper.getuniqueclassdef(Exception)) else: return self.bookkeeper.new_exception(can_only_throw) class BlockedInference(Exception): """This exception signals the type inference engine that the situation is currently blocked, and that it should try to progress elsewhere.""" def __init__(self, annotator, op, opindex): self.annotator = annotator try: self.break_at = annotator.bookkeeper.position_key except AttributeError: self.break_at = None self.op = op self.opindex = opindex def __repr__(self): if not self.break_at: break_at = "?" else: break_at = self.annotator.whereami(self.break_at) return "<BlockedInference break_at %s [%s]>" %(break_at, self.op) __str__ = __repr__
1.828125
2
app/logic/httpcommon/Page.py
imvu/bluesteel
10
311
""" Page object file """ class Page(): """ Page object, it contains information about the pare we are refering, index, items per page, etc. """ page_index = 0 items_per_page = 0 def __init__(self, items_per_page, page_index): """ Creates the page """ self.page_index = int(page_index) self.items_per_page = int(items_per_page)
1.757813
2
app/api/v1/views/auth_views.py
emdeechege/Questionaire-API
0
319
from flask import jsonify, Blueprint, request, json, make_response from werkzeug.security import generate_password_hash, check_password_hash from datetime import datetime from ..utils.validators import Validation from ..models.auth_models import Users v1_auth_blueprint = Blueprint('auth', __name__, url_prefix='/api/v1') USER = Users() VALIDATOR = Validation() @v1_auth_blueprint.route('/signup', methods=['POST']) def signup(): """View that controls creation of new users""" try: data = request.get_json() except: return jsonify({ "status": 400, "message": "Invalid input" }), 400 firstname = data.get('firstname') lastname = data.get('lastname') othername = data.get('othername') email = data.get('email') phone_number = data.get('phone_number') username = data.get('username') is_admin = data.get('is_admin') password = data.get('password') if not firstname or not firstname.split(): return make_response(jsonify({ "status": 400, "message": "Firstname is required" })), 400 if not lastname or not lastname.split(): return make_response(jsonify({ "status": 400, "message": "Lastname is required" })), 400 if not email or not email.split(): return make_response(jsonify({ "status": 400, "message": "Email is required" })), 400 if not phone_number: return make_response(jsonify({ "status": 400, "message": "Phone number is required" })), 400 if not username or not username.split(): return make_response(jsonify({ "status": 400, "message": "Username is required" })), 400 if not password or not password.split(): return make_response(jsonify({ "status": 400, "message": "Password is required" })), 400 if not VALIDATOR.validate_phone_number(phone_number): return jsonify({ "status": 400, "message": "Please input valid phone number" }), 400 if VALIDATOR.validate_password(password): return jsonify({ "status": 400, "message": "Password not valid" }), 400 if not VALIDATOR.validate_email(email): return jsonify({ "status": 400, "message": "Invalid email" }), 400 if VALIDATOR.username_exists(username): return jsonify({ "status": 400, "message": "Username exists" }), 400 if VALIDATOR.email_exists(email): return jsonify({ "status": 400, "message": "Email exists" }), 400 password = generate_password_hash( password, method='pbkdf2:sha256', salt_length=8) res = USER.signup( firstname, lastname, othername, email, phone_number, username, is_admin, password) return jsonify({ "status": 201, "data": [{ "firstname": firstname, "lastname": lastname, "othername": othername, "email": email, "phone_number": phone_number, "username": username, "is_admin": is_admin }] }), 201 @v1_auth_blueprint.route('/login', methods=['POST']) def login(): """ A view to control users login """ try: data = request.get_json() except: return make_response(jsonify({ "status": 400, "message": "Wrong input" })), 400 username = data.get('username') password = data.get('password') if not username: return make_response(jsonify({ "status": 400, "message": "Username is required" })), 400 if not password: return make_response(jsonify({ "status": 400, "message": "Password is required" })), 400 if not VALIDATOR.username_exists(username): return jsonify({ "status": 404, "message": "User does not exist" }), 404 auth_token = user.generate_auth_token(username) return make_response(jsonify({ "status": 200, "message": 'Logged in successfuly', "token": auth_token })), 200
1.859375
2
iap/validate_jwt.py
spitfire55/python-docs-samples
4
327
# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Sample showing how to validate the Identity-Aware Proxy (IAP) JWT. This code should be used by applications in Google Compute Engine-based environments (such as Google App Engine flexible environment, Google Compute Engine, or Google Container Engine) to provide an extra layer of assurance that a request was authorized by IAP. For applications running in the App Engine standard environment, use App Engine's Users API instead. """ # [START iap_validate_jwt] import jwt import requests def validate_iap_jwt_from_app_engine(iap_jwt, cloud_project_number, cloud_project_id): """Validate a JWT passed to your App Engine app by Identity-Aware Proxy. Args: iap_jwt: The contents of the X-Goog-IAP-JWT-Assertion header. cloud_project_number: The project *number* for your Google Cloud project. This is returned by 'gcloud projects describe $PROJECT_ID', or in the Project Info card in Cloud Console. cloud_project_id: The project *ID* for your Google Cloud project. Returns: (user_id, user_email, error_str). """ expected_audience = '/projects/{}/apps/{}'.format( cloud_project_number, cloud_project_id) return _validate_iap_jwt(iap_jwt, expected_audience) def validate_iap_jwt_from_compute_engine(iap_jwt, cloud_project_number, backend_service_id): """Validate an IAP JWT for your (Compute|Container) Engine service. Args: iap_jwt: The contents of the X-Goog-IAP-JWT-Assertion header. cloud_project_number: The project *number* for your Google Cloud project. This is returned by 'gcloud projects describe $PROJECT_ID', or in the Project Info card in Cloud Console. backend_service_id: The ID of the backend service used to access the application. See https://cloud.google.com/iap/docs/signed-headers-howto for details on how to get this value. Returns: (user_id, user_email, error_str). """ expected_audience = '/projects/{}/global/backendServices/{}'.format( cloud_project_number, backend_service_id) return _validate_iap_jwt(iap_jwt, expected_audience) def _validate_iap_jwt(iap_jwt, expected_audience): try: key_id = jwt.get_unverified_header(iap_jwt).get('kid') if not key_id: return (None, None, '**ERROR: no key ID**') key = get_iap_key(key_id) decoded_jwt = jwt.decode( iap_jwt, key, algorithms=['ES256'], audience=expected_audience) return (decoded_jwt['sub'], decoded_jwt['email'], '') except (jwt.exceptions.InvalidTokenError, requests.exceptions.RequestException) as e: return (None, None, '**ERROR: JWT validation error {}**'.format(e)) def get_iap_key(key_id): """Retrieves a public key from the list published by Identity-Aware Proxy, re-fetching the key file if necessary. """ key_cache = get_iap_key.key_cache key = key_cache.get(key_id) if not key: # Re-fetch the key file. resp = requests.get( 'https://www.gstatic.com/iap/verify/public_key') if resp.status_code != 200: raise Exception( 'Unable to fetch IAP keys: {} / {} / {}'.format( resp.status_code, resp.headers, resp.text)) key_cache = resp.json() get_iap_key.key_cache = key_cache key = key_cache.get(key_id) if not key: raise Exception('Key {!r} not found'.format(key_id)) return key # Used to cache the Identity-Aware Proxy public keys. This code only # refetches the file when a JWT is signed with a key not present in # this cache. get_iap_key.key_cache = {} # [END iap_validate_jwt]
1.765625
2
projects/eyetracking/gen_adhd_sin.py
nirdslab/streaminghub
0
335
#!/usr/bin/env python3 import glob import os import pandas as pd import dfs SRC_DIR = f"{dfs.get_data_dir()}/adhd_sin_orig" OUT_DIR = f"{dfs.get_data_dir()}/adhd_sin" if __name__ == '__main__': files = glob.glob(f"{SRC_DIR}/*.csv") file_names = list(map(os.path.basename, files)) for file_name in file_names: df: pd.DataFrame = pd.read_csv(f'{SRC_DIR}/{file_name}').set_index('EyeTrackerTimestamp').sort_index()[ ['GazePointX (ADCSpx)', 'GazePointY (ADCSpx)', 'PupilLeft', 'PupilRight']].reset_index() df.columns = ['t', 'x', 'y', 'dl', 'dr'] # fill blanks (order=interpolate(inter)->bfill+ffill(edges))->zerofill df = df.apply(lambda x: x.interpolate().fillna(method="bfill").fillna(method="ffill")).fillna(0) df['x'] = df['x'] / 1920 df['y'] = df['y'] / 1080 df['d'] = (df['dl'] + df['dr']) / 2 # start with t=0, and set unit to ms df['t'] = (df['t'] - df['t'].min()) / 1000 df = df[['t', 'x', 'y', 'd']].round(6).set_index('t') df.to_csv(f'{OUT_DIR}/{file_name}') print(f'Processed: {file_name}')
1.695313
2
Projects/DeepLearningTechniques/MobileNet_v2/tiny_imagenet/data_loader.py
Tim232/Python-Things
2
359
import os import re import numpy as np from Projects.DeepLearningTechniques.MobileNet_v2.tiny_imagenet.constants import * class DataLoader: # todo train/test/validation => (클래스 당 500/50/50) def __init__(self): self.image_width = flags.FLAGS.image_width self.image_height = flags.FLAGS.image_height self.batch_size = flags.FLAGS.batch_size self.data_path = flags.FLAGS.data_path self.img_reg = re.compile('.*\\.jpeg', re.IGNORECASE) self.init_class() self.init_annotation() def init_class(self): self.cls = {} for idx, dir in enumerate(os.listdir(os.path.join(self.data_path, 'train'))): self.cls[dir] = idx def init_annotation(self): self.anno = {} for line in open(os.path.join(self.data_path, 'val', 'val_annotations.txt')): filename, label, *_ = line.split('\t') self.anno[filename] = label def init_train(self): train_x, train_y = [], [] for (path, dirs, files) in os.walk(os.path.join(self.data_path, 'train')): for file in files: if self.img_reg.match(file): train_x.append(os.path.join(path, file)) train_y.append(self.cls[re.match('(.+)\\_\d+\\.jpeg', file, re.IGNORECASE).group(1)]) self.train_len = len(train_y) #todo train data random sort random_sort = np.random.permutation(self.train_len) train_x, train_y = np.asarray(train_x, dtype=np.string_)[random_sort], np.asarray(train_y, dtype=np.int64)[random_sort] #todo (Numpy / List) => Tensor 로 변환 with tf.variable_scope(name_or_scope='data_tensor'): self.train_x = tf.convert_to_tensor(value=train_x, dtype=tf.string, name='train_x') self.train_y = tf.convert_to_tensor(value=train_y, dtype=tf.int64, name='train_y') def init_validation(self): valid_x, valid_y = [], [] for (path, dirs, files) in os.walk(os.path.join(self.data_path, 'val')): for file in files: if self.img_reg.match(file): valid_x.append(os.path.join(path, file)) valid_y.append(self.cls[self.anno[file]]) self.valid_len = len(valid_y) #todo validataion data random sort random_sort = np.random.permutation(self.valid_len) valid_x, valid_y = np.asarray(valid_x, dtype=np.string_)[random_sort], np.asarray(valid_y, dtype=np.int64)[random_sort] #todo (Numpy / List) -> Tensor 로 변환 with tf.variable_scope(name_or_scope='data_tensor'): self.valid_x = tf.convert_to_tensor(value=valid_x, dtype=tf.string, name='valid_x') self.valid_y = tf.convert_to_tensor(value=valid_y, dtype=tf.int64, name='valid_y') def init_test(self): test_x = [] for (path, dirs, files) in os.walk(os.path.join(self.data_path, 'test')): for file in files: test_x.append(os.path.join(path, file)) self.test_len = len(test_x) #todo (Numpy / List) -> Tensor 로 변환 with tf.variable_scope(name_or_scope='data_tensor'): self.test_x = tf.convert_to_tensor(value=test_x, dtype=tf.string, name='test_x') def train_normal(self, x, y): with tf.variable_scope(name_or_scope='train_normal'): x = tf.read_file(filename=x) x = tf.image.decode_png(contents=x, channels=3, name='decode_png') x = tf.divide(tf.cast(x, tf.float32), 255.) x = tf.subtract(x, [0.4921, 0.4833, 0.4484]) x = tf.divide(x, [0.2465, 0.2431, 0.2610]) return x, y def train_random_crop(self, x, y): with tf.variable_scope(name_or_scope='train_random_crop'): x = tf.read_file(filename=x) x = tf.image.decode_png(contents=x, channels=3, name='decode_png') x = tf.pad(x, [[0, 0], [4, 4], [4, 4], [0, 0]], name='padding') # x = tf.image.resize_images(images=x, size=(self.image_height+8, self.image_width+8), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR) x = tf.random_crop(value=x, size=(self.image_height, self.image_width, 3)) x = tf.divide(tf.cast(x, tf.float32), 255.) x = tf.subtract(x, [0.4921, 0.4833, 0.4484]) x = tf.divide(x, [0.2465, 0.2431, 0.2610]) return x, y def valid_normal(self, x, y): with tf.variable_scope(name_or_scope='valid_normal'): x = tf.read_file(filename=x) x = tf.image.decode_png(contents=x, channels=3, name='decode_png') x = tf.divide(tf.cast(x, tf.float32), 255.) x = tf.subtract(x, [0.4921, 0.4833, 0.4484]) x = tf.divide(x, [0.2465, 0.2431, 0.2610]) return x, y def test_normal(self, x): with tf.variable_scope(name_or_scope='test_normal'): x = tf.read_file(filename=x) x = tf.image.decode_png(contents=x, channels=3, name='decode_png') x = tf.divide(tf.cast(x, tf.float32), 255.) x = tf.subtract(x, [0.4921, 0.4833, 0.4484]) x = tf.divide(x, [0.2465, 0.2431, 0.2610]) return x def dataset_batch_loader(self, dataset, ref_func, name): with tf.variable_scope(name_or_scope=name): dataset_map = dataset.map(ref_func).batch(self.batch_size) iterator = dataset_map.make_one_shot_iterator() batch_input = iterator.get_next() return batch_input def train_loader(self): with tf.variable_scope('train_loader'): ''' repeat(): 데이터셋이 끝에 도달했을 때 다시 처음부터 수행하게 하는 함수 shuffle(): 데이터셋에 대해 random sort 기능을 수행하는 함수 (괄호안에 값이 전체 데이터 수보다 크면 전체 데이터에 대한 random sort) ''' dataset = tf.data.Dataset.from_tensor_slices((self.train_x, self.train_y)).repeat() normal_batch = self.dataset_batch_loader(dataset, self.train_normal, name='normal_batch') random_crop_batch = self.dataset_batch_loader(dataset, self.train_random_crop, name='random_crop_batch') return normal_batch, random_crop_batch def valid_loader(self): with tf.variable_scope('valid_loader'): dataset = tf.data.Dataset.from_tensor_slices((self.valid_x, self.valid_y)).repeat() normal_batch = self.dataset_batch_loader(dataset, self.valid_normal, name='normal_batch') return normal_batch def test_loader(self): with tf.variable_scope('test_loader'): dataset = tf.data.Dataset.from_tensor_slices(self.test_x).repeat() normal_batch = self.dataset_batch_loader(dataset, self.test_normal, name='normal_batch') return normal_batch
1.960938
2
src/telr/TELR_assembly.py
dominik-handler/TELR
22
391
import sys import os import subprocess import shutil import time import logging from Bio import SeqIO from multiprocessing import Pool import pysam from telr.TELR_utility import mkdir, check_exist, format_time def get_local_contigs( assembler, polisher, contig_dir, vcf_parsed, out, sample_name, bam, raw_reads, thread, presets, polish_iterations, ): """Perform local assembly using reads from parsed VCF file in parallel""" # Prepare reads used for local assembly and polishing sv_reads_dir = os.path.join(out, "sv_reads") try: prep_assembly_inputs( vcf_parsed, out, sample_name, bam, raw_reads, sv_reads_dir, read_type="sv" ) except Exception as e: print(e) print("Prepare local assembly input data failed, exiting...") sys.exit(1) mkdir(contig_dir) k = 0 asm_pa_list = [] with open(vcf_parsed, "r") as input: for line in input: entry = line.replace("\n", "").split("\t") contig_name = "_".join([entry[0], entry[1], entry[2]]) # rename variant reads sv_reads = sv_reads_dir + "/contig" + str(k) sv_reads_rename = sv_reads_dir + "/" + contig_name + ".reads.fa" os.rename(sv_reads, sv_reads_rename) thread_asm = 1 asm_pa = [ sv_reads_rename, contig_dir, contig_name, thread_asm, presets, assembler, polisher, polish_iterations, ] asm_pa_list.append(asm_pa) k = k + 1 # run assembly in parallel logging.info("Perform local assembly of non-reference TE loci...") start_time = time.time() try: pool = Pool(processes=thread) contig_list = pool.map(run_assembly_polishing, asm_pa_list) pool.close() pool.join() except Exception as e: print(e) print("Local assembly failed, exiting...") sys.exit(1) proc_time = time.time() - start_time # merge all contigs assembly_passed_loci = set() merged_contigs = os.path.join(out, sample_name + ".contigs.fa") with open(merged_contigs, "w") as merged_output_handle: for contig in contig_list: if check_exist(contig): contig_name = os.path.basename(contig).replace(".cns.fa", "") assembly_passed_loci.add(contig_name) parsed_contig = os.path.join(contig_dir, contig_name + ".cns.ctg1.fa") with open(contig, "r") as input: records = SeqIO.parse(input, "fasta") for record in records: if record.id == "ctg1" or record.id == "contig_1": record.id = contig_name record.description = "len=" + str(len(record.seq)) SeqIO.write(record, merged_output_handle, "fasta") with open(parsed_contig, "w") as parsed_output_handle: SeqIO.write(record, parsed_output_handle, "fasta") logging.info("Local assembly finished in " + format_time(proc_time)) return merged_contigs, assembly_passed_loci def run_assembly_polishing(args): reads = args[0] asm_dir = args[1] contig_name = args[2] thread = args[3] presets = args[4] assembler = args[5] polisher = args[6] polish_iterations = args[7] # run assembly if assembler == "wtdbg2": asm_cns = run_wtdbg2_assembly(reads, asm_dir, contig_name, thread, presets) else: asm_cns = run_flye_assembly(reads, asm_dir, contig_name, thread, presets) if not check_exist(asm_cns): print("assembly failed") return None # run polishing if polish_iterations > 0: if polisher == "wtdbg2": asm_cns = run_wtdbg2_polishing( asm_cns, reads, thread, polish_iterations, presets ) else: asm_cns = run_flye_polishing( asm_cns, reads, asm_dir, contig_name, thread, polish_iterations, presets ) if check_exist(asm_cns): return asm_cns else: return None def run_flye_polishing( asm_cns, reads, asm_dir, contig_name, thread, polish_iterations, presets ): """Run Flye polishing""" if presets == "pacbio": presets_flye = "--pacbio-raw" else: presets_flye = "--nano-raw" tmp_out_dir = os.path.join(asm_dir, contig_name) mkdir(tmp_out_dir) try: subprocess.call( [ "flye", "--polish-target", asm_cns, presets_flye, reads, "--out-dir", tmp_out_dir, "--thread", str(thread), "--iterations", str(polish_iterations), ] ) except Exception as e: print(e) print("Polishing failed, exiting...") return None # rename contig file polished_contig = os.path.join( tmp_out_dir, "polished_" + str(polish_iterations) + ".fasta" ) if check_exist(polished_contig): os.rename(polished_contig, asm_cns) shutil.rmtree(tmp_out_dir) return asm_cns else: return None def run_wtdbg2_polishing(asm_cns, reads, threads, polish_iterations, presets): """Run wtdbg2 polishing""" if presets == "pacbio": presets_minimap2 = "map-pb" else: presets_minimap2 = "map-ont" # polish consensus threads = str(min(threads, 4)) bam = asm_cns + ".bam" k = 0 while True: # align reads to contigs command = ( "minimap2 -t " + threads + " -ax " + presets_minimap2 + " -r2k " + asm_cns + " " + reads + " | samtools sort -@" + threads + " > " + bam ) try: subprocess.run( command, shell=True, timeout=300, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT, ) except subprocess.TimeoutExpired: print("fail to map reads to contig: " + asm_cns) return # run wtpoa-cns to get polished contig cns_tmp = asm_cns + ".tmp" command = ( "samtools view -F0x900 " + bam + " | wtpoa-cns -t " + threads + " -d " + asm_cns + " -i - -fo " + cns_tmp ) try: subprocess.run( command, shell=True, timeout=300, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT, ) except subprocess.TimeoutExpired: print("fail to polish contig: " + asm_cns) return if check_exist(cns_tmp): os.rename(cns_tmp, asm_cns) os.remove(bam) else: break k = k + 1 if k >= polish_iterations: break if check_exist(asm_cns): return asm_cns else: print("polishing failed for " + asm_cns + "\n") return None def run_flye_assembly(sv_reads, asm_dir, contig_name, thread, presets): """Run Flye assembly""" if presets == "pacbio": presets_flye = "--pacbio-raw" else: presets_flye = "--nano-raw" tmp_out_dir = os.path.join(asm_dir, contig_name) mkdir(tmp_out_dir) try: subprocess.call( [ "flye", presets_flye, sv_reads, "--out-dir", tmp_out_dir, "--thread", str(thread), "--iterations", "0", ] ) except Exception as e: print(e) print("Assembly failed, exiting...") return # rename contigs contig_path = os.path.join(tmp_out_dir, "assembly.fasta") contig_path_new = os.path.join(asm_dir, contig_name + ".cns.fa") if check_exist(contig_path): os.rename(contig_path, contig_path_new) # remove tmp files shutil.rmtree(tmp_out_dir) return contig_path_new else: print("assembly failed") return None def run_wtdbg2_assembly(sv_reads, asm_dir, contig_name, thread, presets): """Run wtdbg2 assembly""" if presets == "pacbio": presets_wtdbg2 = "rs" else: presets_wtdbg2 = "ont" prefix = sv_reads.replace(".reads.fa", "") try: subprocess.run( [ "wtdbg2", "-x", presets_wtdbg2, "-q", "-AS", "1", "-g", "30k", "-t", str(thread), "-i", sv_reads, "-fo", prefix, ], timeout=300, ) except subprocess.TimeoutExpired: print("fail to build contig layout for contig: " + contig_name) return except Exception as e: print(e) print("wtdbg2 failed, exiting...") return None # derive consensus contig_layout = prefix + ".ctg.lay.gz" if check_exist(contig_layout): cns_thread = str(min(thread, 4)) consensus = prefix + ".cns.fa" try: subprocess.run( [ "wtpoa-cns", "-q", "-t", cns_thread, "-i", contig_layout, "-fo", consensus, ], timeout=300, ) except subprocess.TimeoutExpired: print("fail to assemble contig: " + contig_name) return None if check_exist(consensus): consensus_rename = os.path.join(asm_dir, contig_name + ".cns.fa") os.rename(consensus, consensus_rename) return consensus_rename else: return None def prep_assembly_inputs( vcf_parsed, out, sample_name, bam, raw_reads, reads_dir, read_type="sv" ): """Prepare reads for local assembly""" # logging.info("Prepare reads for local assembly") if read_type == "sv": # TODO: figure out what this does # extract read IDs read_ids = os.path.join(out, sample_name + ".id") with open(vcf_parsed, "r") as input, open(read_ids, "w") as output: for line in input: entry = line.replace("\n", "").split("\t") read_list = entry[8].split(",") for read in read_list: output.write(read + "\n") else: # TODO: think about using this for assembly, filter for cigar reads window = 1000 samfile = pysam.AlignmentFile(bam, "rb") read_ids = os.path.join(out, sample_name + ".id") vcf_parsed_new = vcf_parsed + ".new" with open(vcf_parsed, "r") as input, open(read_ids, "w") as output, open( vcf_parsed_new, "w" ) as VCF: for line in input: entry = line.replace("\n", "").split("\t") # get sniffles read list read_list = entry[8].split(",") reads_sniffles = set(read_list) ins_chr = entry[0] ins_breakpoint = round((int(entry[1]) + int(entry[2])) / 2) start = ins_breakpoint - window end = ins_breakpoint + window reads = set() # coverage = 0 for read in samfile.fetch(ins_chr, start, end): reads.add(read.query_name) for read in reads: output.write(read + "\n") # write out_line = line.replace("\n", "") + "\t" + str(len(reads)) VCF.write(out_line + "\n") vcf_parsed = vcf_parsed_new # generate unique ID list read_ids_unique = read_ids + ".unique" command = "cat " + read_ids + " | sort | uniq" with open(read_ids_unique, "w") as output: subprocess.call(command, stdout=output, shell=True) # filter raw reads using read list subset_fa = os.path.join(out, sample_name + ".subset.fa") command = "seqtk subseq " + raw_reads + " " + read_ids_unique + " | seqtk seq -a" with open(subset_fa, "w") as output: subprocess.call(command, stdout=output, shell=True) # reorder reads subset_fa_reorder = out + "/" + sample_name + ".subset.reorder.fa" extract_reads(subset_fa, read_ids, subset_fa_reorder) # separate reads into multiple files, using csplit mkdir(reads_dir) csplit_prefix = reads_dir + "/contig" m = [] k = 1 with open(vcf_parsed, "r") as input: for line in input: entry = line.replace("\n", "").split("\t") if read_type == "sv": k = k + 2 * (len(entry[8].split(","))) else: k = k + 2 * int(entry[14]) m.append(k) if len(m) == 1: subprocess.call(["cp", subset_fa_reorder, reads_dir + "/contig0"]) elif len(m) == 0: print("No insertion detected, exiting...") else: m = m[:-1] index = " ".join(str(i) for i in m) command = ( "csplit -s -f " + csplit_prefix + " -n 1 " + subset_fa_reorder + " " + index ) subprocess.call(command, shell=True) # remove tmp files os.remove(read_ids) os.remove(read_ids_unique) os.remove(subset_fa) os.remove(subset_fa_reorder) def extract_reads(reads, list, out): """Extract reads from fasta using read ID list""" record_dict = SeqIO.index(reads, "fasta") with open(out, "wb") as output_handle, open(list, "r") as ID: for entry in ID: entry = entry.replace("\n", "") output_handle.write(record_dict.get_raw(entry))
1.554688
2
bcloud-snap/bcloud-3.9.1/bcloud/hasher.py
jiaxiaolei/my_snap_demo
0
399
# Copyright (C) 2014-2015 LiuLang <<EMAIL>> # Use of this source code is governed by GPLv3 license that can be found # in http://www.gnu.org/licenses/gpl-3.0.html import hashlib import os import zlib CHUNK = 2 ** 20 def crc(path): _crc = 0 fh = open(path, 'rb') while True: chunk = fh.read(CHUNK) if not chunk: break _crc = zlib.crc32(chunk, _crc) fh.close() return '%X' % (_crc & 0xFFFFFFFF) def md5(path, start=0, stop=-1): _md5 = hashlib.md5() fh = open(path, 'rb') if start > 0: fh.seek(start) if stop == -1: stop = os.path.getsize(path) pos = start while pos < stop: size = min(CHUNK, stop - pos) chunk = fh.read(size) if not chunk: break pos += len(chunk) _md5.update(chunk) fh.close() return _md5.hexdigest() def sha1(path): _sha1 = hashlib.sha1() fh = open(path, 'rb') while True: chunk = fh.read(CHUNK) if not chunk: break _sha1.update(chunk) fh.close() return _sha1.hexdigest() def sha224(path): _sha224 = hashlib.sha224() fh = open(path, 'rb') while True: chunk = fh.read(CHUNK) if not chunk: break _sha224.update(chunk) fh.close() return _sha224.hexdigest() def sha256(path): _sha256 = hashlib.sha256() fh = open(path, 'rb') while True: chunk = fh.read(CHUNK) if not chunk: break _sha256.update(chunk) fh.close() return _sha256.hexdigest() def sha384(path): _sha384 = hashlib.sha384() fh = open(path, 'rb') while True: chunk = fh.read(CHUNK) if not chunk: break _sha384.update(chunk) fh.close() return _sha384.hexdigest() def sha512(path): _sha512 = hashlib.sha512() fh = open(path, 'rb') while True: chunk = fh.read(CHUNK) if not chunk: break _sha512.update(chunk) fh.close() return _sha512.hexdigest()
1.84375
2
python/re_user.py
seckcoder/lang-learn
1
407
#!/usr/bin/env python #-*- coding=utf-8 -*- # # Copyright 2012 Jike Inc. All Rights Reserved. # Author: <EMAIL> import re from urlparse import urlparse def parse1(): p = re.compile(r"/(?P<uid>\d+)/(?P<mid>\w+)") o = urlparse("http://weibo.com/2827699110/yz62AlEjF") m = p.search(o.path) print m.group('uid') print m.group('mid') def parse2(): exc_type_str = "<type 'exceptions.IndexError'>" parse1()
1.375
1
MAIN/Screens/Settings/category_2/__init__.py
aragubas/fogoso
0
431
#!/usr/bin/python3.7 # Copyright 2020 Aragubas # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # # -- Imports -- # from ENGINE import APPDATA as reg from ENGINE import UTILS as utils import ENGINE as tge from Fogoso.MAIN import ClassesUtils as gameObjs from Fogoso import MAIN as gameMain import pygame, sys import importlib import time from random import randint OptionsScreen_DebugModeEnabled = gameObjs.UpDownButton OptionsScreen_RandomWindowTitle = gameObjs.UpDownButton OptionsScreen_NumberFormatting = gameObjs.UpDownButton ElementsX = 0 ElementsY = 0 def Initialize(): global OptionsScreen_DebugModeEnabled global OptionsScreen_RandomWindowTitle global OptionsScreen_NumberFormatting OptionsScreen_DebugModeEnabled = gameObjs.UpDownButton(0,0,14) OptionsScreen_RandomWindowTitle = gameObjs.UpDownButton(0,0,14) OptionsScreen_NumberFormatting = gameObjs.UpDownButton(0,0,14) def Update(): global OptionsScreen_DebugModeEnabled global OptionsScreen_RandomWindowTitle global OptionsScreen_NumberFormatting global ElementsX global ElementsY if OptionsScreen_DebugModeEnabled .ButtonState == 2 or OptionsScreen_DebugModeEnabled.ButtonState == 1: current_val = gameMain.DefaultCnt.Get_RegKey("/OPTIONS/debug_enabled", bool) if current_val: gameMain.DefaultCnt.Write_RegKey("/OPTIONS/debug_enabled", "False") if not current_val: gameMain.DefaultCnt.Write_RegKey("/OPTIONS/debug_enabled", "True") if OptionsScreen_RandomWindowTitle .ButtonState == 2 or OptionsScreen_RandomWindowTitle.ButtonState == 1: current_val = gameMain.DefaultCnt.Get_RegKey("/OPTIONS/random_title", bool) if current_val: gameMain.DefaultCnt.Write_RegKey("/OPTIONS/random_title", "False") if not current_val: gameMain.DefaultCnt.Write_RegKey("/OPTIONS/random_title", "True") if OptionsScreen_NumberFormatting .ButtonState == 2 or OptionsScreen_NumberFormatting.ButtonState == 1: current_val = gameMain.DefaultCnt.Get_RegKey("/OPTIONS/format_numbers", bool) if current_val: gameMain.DefaultCnt.Write_RegKey("/OPTIONS/format_numbers", "False") if not current_val: gameMain.DefaultCnt.Write_RegKey("/OPTIONS/format_numbers", "True") OptionsScreen_DebugModeEnabled.Set_X(ElementsX + 20) OptionsScreen_RandomWindowTitle.Set_X(ElementsX + 20) OptionsScreen_NumberFormatting.Set_X(ElementsX + 20) OptionsScreen_DebugModeEnabled.Set_Y(ElementsY + 50) OptionsScreen_RandomWindowTitle.Set_Y(ElementsY + 75) OptionsScreen_NumberFormatting.Set_Y(ElementsY + 100) def Render(DISPLAY): global OptionsScreen_DebugModeEnabled global OptionsScreen_RandomWindowTitle global OptionsScreen_NumberFormatting OptionsScreen_DebugModeEnabled.Render(DISPLAY) OptionsScreen_RandomWindowTitle.Render(DISPLAY) OptionsScreen_NumberFormatting.Render(DISPLAY) # -- Debug Mode -- # gameMain.DefaultCnt.FontRender(DISPLAY, "/PressStart2P.ttf", 14, gameMain.DefaultCnt.Get_RegKey("/strings/settings/debug_mode") + str(gameMain.DefaultCnt.Get_RegKey("/OPTIONS/debug_enabled")), (240, 240, 240), ElementsX + 95, ElementsY + 52, gameMain.DefaultCnt.Get_RegKey("/OPTIONS/font_aa")) # -- Random Title -- # gameMain.DefaultCnt.FontRender(DISPLAY, "/PressStart2P.ttf", 14, gameMain.DefaultCnt.Get_RegKey("/strings/settings/random_title") + str(gameMain.DefaultCnt.Get_RegKey("/OPTIONS/random_title")), (240, 240, 240), ElementsX + 95, ElementsY + 77, gameMain.DefaultCnt.Get_RegKey("/OPTIONS/font_aa")) # -- Number Formatting -- # gameMain.DefaultCnt.FontRender(DISPLAY, "/PressStart2P.ttf", 14, gameMain.DefaultCnt.Get_RegKey("/strings/settings/number_formatting") + str(gameMain.DefaultCnt.Get_RegKey("/OPTIONS/format_numbers")), (240, 240, 240), ElementsX + 95, ElementsY + 102, gameMain.DefaultCnt.Get_RegKey("/OPTIONS/font_aa")) def EventUpdate(event): global OptionsScreen_DebugModeEnabled global OptionsScreen_RandomWindowTitle global OptionsScreen_NumberFormatting OptionsScreen_DebugModeEnabled.Update(event) OptionsScreen_RandomWindowTitle.Update(event) OptionsScreen_NumberFormatting.Update(event)
1.226563
1
tests/integration/test_cmk_describe.py
oglok/CPU-Manager-for-Kubernetes
0
439
# Copyright (c) 2017 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .. import helpers from . import integration def test_cmk_describe_ok(): args = ["describe", "--conf-dir={}".format(helpers.conf_dir("ok"))] assert helpers.execute(integration.cmk(), args) == b"""{ "path": "/cmk/tests/data/config/ok", "pools": { "exclusive": { "cpuLists": { "4,12": { "cpus": "4,12", "tasks": [ 2000 ] }, "5,13": { "cpus": "5,13", "tasks": [ 2001 ] }, "6,14": { "cpus": "6,14", "tasks": [ 2002 ] }, "7,15": { "cpus": "7,15", "tasks": [ 2003 ] } }, "exclusive": true, "name": "exclusive" }, "infra": { "cpuLists": { "0-2,8-10": { "cpus": "0-2,8-10", "tasks": [ 3000, 3001, 3002 ] } }, "exclusive": false, "name": "infra" }, "shared": { "cpuLists": { "3,11": { "cpus": "3,11", "tasks": [ 1000, 1001, 1002, 1003 ] } }, "exclusive": false, "name": "shared" } } } """ def test_cmk_describe_minimal(): args = ["describe", "--conf-dir={}".format(helpers.conf_dir("minimal"))] assert helpers.execute(integration.cmk(), args) == b"""{ "path": "/cmk/tests/data/config/minimal", "pools": { "exclusive": { "cpuLists": { "0": { "cpus": "0", "tasks": [] } }, "exclusive": true, "name": "exclusive" }, "shared": { "cpuLists": { "0": { "cpus": "0", "tasks": [] } }, "exclusive": false, "name": "shared" } } } """
1.289063
1
vimfiles/bundle/vim-python/submodules/pylint/tests/functional/s/super/super_with_arguments.py
ciskoinch8/vimrc
463
447
class Foo: pass class Bar(Foo): def __init__(self): super(Bar, self).__init__() # [super-with-arguments] class Baz(Foo): def __init__(self): super().__init__() class Qux(Foo): def __init__(self): super(Bar, self).__init__() class NotSuperCall(Foo): def __init__(self): super.test(Bar, self).__init__() class InvalidSuperCall(Foo): def __init__(self): super(InvalidSuperCall.__class__, self).__init__() def method_accepting_cls(cls, self): # Using plain `super()` is not valid here, since there's no `__class__` cell found # (Exact exception would be 'RuntimeError: super(): __class__ cell not found') # Instead, we expect to *not* see a warning about `super-with-arguments`. # Explicitly passing `cls`, and `self` to `super()` is what's required. super(cls, self).__init__()
1.539063
2
betterloader/standard_transforms.py
BinItAI/BetterLoader
39
455
import numpy as np from torchvision import transforms np.random.seed(1) class TransformWhileSampling(object): def __init__(self, transform): self.transform = transform def __call__(self, sample): x1 = self.transform(sample) x2 = self.transform(sample) return x1, x2
1.398438
1
examples/my_model_test.py
gzpyy/qlib
0
463
#encoding=utf-8 import qlib import pandas as pd import pickle import xgboost as xgb import numpy as np import re from qlib.constant import REG_US from qlib.utils import exists_qlib_data, init_instance_by_config from qlib.workflow import R from qlib.workflow.record_temp import SignalRecord, PortAnaRecord from qlib.utils import flatten_dict from qlib.data import LocalExpressionProvider from qlib.data.ops import Operators, OpsList from qlib.data.base import Feature from pyecharts import options as opts from pyecharts.charts import Kline, Line, Grid from my_data_handler import MyAlphaHandler # model_file = r'.\mlruns\1\d6536b056ba84a74be6b33971f443cf6\artifacts\trained_model' model_file = r'.\mlruns\1\148ef1cd7acd48deac3eadc339ad3008\artifacts\trained_model' with open(model_file, 'rb') as fi: model = pickle.load(fi) exprs, columns = MyAlphaHandler.get_custom_config() raw_data = pd.read_csv('../stock_data/TSLA.csv', parse_dates=['time']) raw_data['data_time'] = raw_data['time'].dt.strftime("%Y-%m-%d %H:%M:00") raw_data.set_index('time', inplace=True) raw_data["vwap"] = np.nan raw_data.sort_index(inplace=True) # print(raw_data) class MyFeature(Feature): def _load_internal(self, instrument, start_index, end_index, freq): print("load", self._name, instrument, start_index, end_index, freq) return raw_data.loc[start_index:end_index][self._name] Operators.register(OpsList + [MyFeature]) def my_parse_field(field): if not isinstance(field, str): field = str(field) for pattern, new in [(r"\$(\w+)", rf'MyFeature("\1")'), (r"(\w+\s*)\(", r"Operators.\1(")]: # Features # Operators field = re.sub(pattern, new, field) return field obj = dict() for field in exprs: expression = eval(my_parse_field(field)) series = expression.load('TSLA', "2022-01-02", "2022-02-28", "1min") series = series.astype(np.float32) obj[field] = series data = pd.DataFrame(obj) data.columns = columns view_time_start = '2022-02-11' view_time_end = '2022-02-12' pre_data = raw_data.loc[view_time_start:view_time_end].copy() pred=model.model.predict(xgb.DMatrix(data.loc[view_time_start:view_time_end])) pre_data['pred_score'] = pred records = pre_data.to_dict("records") cash = 50000 position = {} hold_thresh = 5 score_thresh = 0.001 x_axises, y_axises, mark_points, money = [], [], [], [] for record in records: x_axises.append(record['data_time']) y_axises.append([ record['open'], record['close'], record['low'], record['high'] ]) if 'hold_cnt' in position: position['hold_cnt'] += 1 if position and (record['open'] >= position['close'] * 1.01 or record['open'] < position['close'] * 0.995 or record['pred_score'] < -score_thresh or position['hold_cnt'] >= hold_thresh): cash += position['amount'] * record['open'] position = {} #print("sell") mark_points.append(opts.MarkPointItem( coord=[record['data_time'], record['high']], symbol='triangle', symbol_size=7, itemstyle_opts=opts.ItemStyleOpts(color="green") )) elif record['pred_score'] > score_thresh and not position: position = dict(record) position['amount'] = int(cash / position['open']) cash -= position['amount'] * position['open'] # buy #print("buy") position['hold_cnt'] = 0 mark_points.append(opts.MarkPointItem( coord=[record['data_time'], record['high']], symbol='arrow', symbol_size=7, itemstyle_opts=opts.ItemStyleOpts(color="yellow") )) cur_money = cash if position: cur_money += position['amount'] * record['close'] money.append(cur_money) if position: cash += position['amount'] * records[-1]['close'] print("cash:", cash) kline_graph = ( Kline() .add_xaxis(x_axises) .add_yaxis( "kline", y_axises, markpoint_opts=opts.MarkPointOpts( data=mark_points ), ) .set_global_opts( xaxis_opts=opts.AxisOpts(is_scale=True), yaxis_opts=opts.AxisOpts( is_scale=True, splitarea_opts=opts.SplitAreaOpts( is_show=True, areastyle_opts=opts.AreaStyleOpts(opacity=1) ), ), title_opts=opts.TitleOpts(title="%s_%s" % (view_time_start, view_time_end)), datazoom_opts=[opts.DataZoomOpts(type_="inside", xaxis_index=[0, 1],)], ) ) kline_line = ( Line() .add_xaxis(xaxis_data=x_axises) .add_yaxis( series_name="cur_money", y_axis=money, is_smooth=True, linestyle_opts=opts.LineStyleOpts(opacity=0.5), label_opts=opts.LabelOpts(is_show=False), markline_opts=opts.MarkLineOpts( data=[opts.MarkLineItem(y=50000)] ), ) .set_global_opts( xaxis_opts=opts.AxisOpts( type_="category", grid_index=2, axislabel_opts=opts.LabelOpts(is_show=False), ), yaxis_opts=opts.AxisOpts( min_='dataMin' ) ) ) grid_chart = Grid(init_opts=opts.InitOpts(width='2000px', height='900px')) grid_chart.add( kline_graph, grid_opts=opts.GridOpts(pos_left="3%", pos_right="10%", height="50%"), ) grid_chart.add( kline_line, grid_opts=opts.GridOpts( pos_left="3%", pos_right="10%", pos_top="60%", height="30%" ), ) grid_chart.render("kline_markline.html")
1.648438
2
heatsink.py
sww1235/heatsink-calc
1
471
"""Class representations of heatsinks.""" import math from scipy import constants as const from materials import Aluminium_6063 as aluminium class Heatsink: """ A Heatsink. Extended by form factor subclasses """ def __init__(self, material, configuration): """Init material and configuration variables.""" self.material = material self.configuration = configuration class CylindricalAnnularFin(Heatsink): """Extend base heatsink class with a cylindrical annular fin heatsink.""" def __init__(self, material, finSpacing, finRadius, finThickness, cylinderDiameter, numberOfFins, ambAirTemp, maxJunctionTemp, maxSurfaceTemp): """ Init remainder of class variables. NOTE: all models are based off of the finSpacing variable NOTE: using the simplified model for calculation efficiency. finSpacing : gap between adjacent fins finRadius : radius of fin minus central support cylinder (alternatively, fin depth) finThickness : thickness of individual fin cylinderDiameter: diameter of support cylinder heatsinkLength : overall axial length of heatsink overall diameter: outside diameter of heatsink including fins. """ self.finSpacing = finSpacing # in meters self.finRadius = finRadius # in meters self.finThickness = finThickness # in meters self.cylinderDiameter = cylinderDiameter # in meters self.numberOfFins = numberofFins self.heatsinkLength = ((self.finThickness * self.numberOfFins) + ((self.numberOfFins - 1) * self.finSpacing)) self.overallDiameter = self.cylinderDiameter + (2 * finRadius) self.ambAirTemp = ambAirTemp # degrees kelvin self.maxJunctionTemp = maxJunctionTemp self.maxSurfaceTemp = maxSurfaceTemp """ NOTE: in order to prevent ridiculously long variable names, all Nusselt Numbers are abbreviated as follows: nn = Nusselt Number nn0 = Nusselt Number 0 (Diffusive Limit) nnOut = Nusselt Number for outer surfaces nnIn = Nusselt Number for inner surfaces nnInT = Nusselt Number for the thin boundry layer of inner surface nnInFD = Nusselt Number for fully developed regime inner surface """ # thermal diffusivity of air at atmospheric pressure at 25C alpha = 22.39 * 10**(-6) # (meters^2) / seconds # Volumetric coefficient of thermal expansion beta = aluminium.expansionCoefficient # 1/kelvin heatsinkSurfaceTemp = # TODO kelvin # at atmospheric pressure at 25C kinematicViscosity = 15.52 * 10**(-6) # meter^2/second deltaT = heatsinkSurfaceTemp - ambAirTemp # kelvin hLoD = self.heatsinkLength / self.overallDiameter cDoD = self.cylinderDiameter / self.overallDiameter oneChannelArea = (math.pi * (((self.overallDiameter**2 - self.cylinderDiameter**2) / 2) + (self.cylinderDiameter * self.finSpacing))) # area of circumscribed cylinder areaCC = (math.pi * (((self.overallDiameter**2) / 2) + self.overallDiameter * self.heatsinkLength)) # meter^2 # inner surface area of heatsink areaIn = (self.numberOfFins - 1) * oneChannelArea # meter^2 # outer surface area of heatsink areaOut = (math.pi * (((self.overallDiameter**2) / 2) + (self.numberOfFins * self.overallDiameter * self.finThickness))) # meter^2 # overall area of heatsink areaHS = areaIn + areaOut # meter^2 RayleighNbrFinSpacing = ((const.g * beta * deltaT * self.finSpacing**4) / (kinematicViscosity * alpha * self.overallDiameter)) RayleighNbrOverallDiameter = ((const.g * beta * deltaT * self.overallDiameter**3) / (kinematicViscosity * alpha)) if 0.1 <= hLoD <= 8: self.nn0 = ((3.36 + (0.087 * hLoD)) * math.sqrt(areaCC) * (self.finSpacing / areaHS) ) if 0.1 <= (self.finThickness * self.numberOfFins / self.overallDiameter) <= 8: self.nnOut = ((0.499 - (0.026 * math.log(self.finThickness * self.numberOfFins / self.overallDiameter))) * math.pow(RayleighNbrFinSpacing, 0.25) * (areaOut/areaHS) ) if (0.1 <= cdoD <= 8) and (2.9 * 10**4 <= RayleighNbrOverallDiameter <= 2.3 * 10**5): nnInT = ((0.573-(0.184 * cdoD) + (0.0388 * cdoD**2)) * math.pow(RayleighNbrFinSpacing, 0.25)) nnInFD = (((0.0323 - (0.0517 * cdoD) + (0.11 * cdoD**2)) * math.pow(RayleighNbrFinSpacing, 0.25)) + (0.0516 + (0.0154 * cdoD) - (0.0433 * cdoD**2) + (0.0792 * cdoD**3)) * RayleighNbrFinSpacing) n = 1 self.nnIn = (math.pow(math.pow(nnInT, -n) + math.pow(nnInFD, -n), (-1/n) ) * (areaIn/areaHS) ) self.nn = (self.nnIn + self.nnOut + self.nn0) super(Child, self).__init__(material, self.__name__) """ Nusselt number = (Qconv * b) / (Ahs deltaT k) Qconv = heat flow rate by convection (Watts) b = finSpacing (meters) Ahs = Area of heatsink (meter^2) deltaT = temperature difference between surface temp of heatsink and ambient air temp. k = thermal conductivity of material (Watts / (meter kelvin)) """
2.640625
3
tests/mqtt/test_subscribe.py
smurfix/hbmqtt
0
479
# Copyright (c) 2015 <NAME> # # See the file license.txt for copying permission. import anyio import unittest from hbmqtt.mqtt.subscribe import SubscribePacket, SubscribePayload from hbmqtt.mqtt.packet import PacketIdVariableHeader from hbmqtt.mqtt.constants import QOS_1, QOS_2 from hbmqtt.adapters import BufferAdapter class SubscribePacketTest(unittest.TestCase): def test_from_stream(self): data = b'\x80\x0e\x00\x0a\x00\x03a/b\x01\x00\x03c/d\x02' stream = BufferAdapter(data) message = anyio.run(SubscribePacket.from_stream, stream) (topic, qos) = message.payload.topics[0] self.assertEqual(topic, 'a/b') self.assertEqual(qos, QOS_1) (topic, qos) = message.payload.topics[1] self.assertEqual(topic, 'c/d') self.assertEqual(qos, QOS_2) def test_to_stream(self): variable_header = PacketIdVariableHeader(10) payload = SubscribePayload( [ ('a/b', QOS_1), ('c/d', QOS_2) ]) publish = SubscribePacket(variable_header=variable_header, payload=payload) out = publish.to_bytes() self.assertEqual(out, b'\x82\x0e\x00\x0a\x00\x03a/b\x01\x00\x03c/d\x02')
1.578125
2
test/test_aes.py
haruhi-dl/haruhi-dl
32
495
#!/usr/bin/env python from __future__ import unicode_literals # Allow direct execution import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from haruhi_dl.aes import aes_decrypt, aes_encrypt, aes_cbc_decrypt, aes_cbc_encrypt, aes_decrypt_text from haruhi_dl.utils import bytes_to_intlist, intlist_to_bytes import base64 # the encrypted data can be generate with 'devscripts/generate_aes_testdata.py' class TestAES(unittest.TestCase): def setUp(self): self.key = self.iv = [0x20, 0x15] + 14 * [0] self.secret_msg = b'Secret message goes here' def test_encrypt(self): msg = b'message' key = list(range(16)) encrypted = aes_encrypt(bytes_to_intlist(msg), key) decrypted = intlist_to_bytes(aes_decrypt(encrypted, key)) self.assertEqual(decrypted, msg) def test_cbc_decrypt(self): data = bytes_to_intlist( b"\x97\x92+\xe5\x0b\xc3\x18\x91ky9m&\xb3\xb5@\xe6'\xc2\x96.\xc8u\x88\xab9-[\x9e|\xf1\xcd" ) decrypted = intlist_to_bytes(aes_cbc_decrypt(data, self.key, self.iv)) self.assertEqual(decrypted.rstrip(b'\x08'), self.secret_msg) def test_cbc_encrypt(self): data = bytes_to_intlist(self.secret_msg) encrypted = intlist_to_bytes(aes_cbc_encrypt(data, self.key, self.iv)) self.assertEqual( encrypted, b"\x97\x92+\xe5\x0b\xc3\x18\x91ky9m&\xb3\xb5@\xe6'\xc2\x96.\xc8u\x88\xab9-[\x9e|\xf1\xcd") def test_decrypt_text(self): password = intlist_to_bytes(self.key).decode('utf-8') encrypted = base64.b64encode( intlist_to_bytes(self.iv[:8]) + b'\x17\x15\x93\xab\x8d\x80V\xcdV\xe0\t\xcdo\xc2\xa5\xd8ksM\r\xe27N\xae' ).decode('utf-8') decrypted = (aes_decrypt_text(encrypted, password, 16)) self.assertEqual(decrypted, self.secret_msg) password = intlist_to_bytes(self.key).decode('utf-8') encrypted = base64.b64encode( intlist_to_bytes(self.iv[:8]) + b'\x0b\xe6\xa4\xd9z\x0e\xb8\xb9\xd0\xd4i_\x85\x1d\x99\x98_\xe5\x80\xe7.\xbf\xa5\x83' ).decode('utf-8') decrypted = (aes_decrypt_text(encrypted, password, 32)) self.assertEqual(decrypted, self.secret_msg) if __name__ == '__main__': unittest.main()
1.75
2
tests/bugs/core_6266_test.py
reevespaul/firebird-qa
0
503
#coding:utf-8 # # id: bugs.core_6266 # title: Deleting records from MON$ATTACHMENTS using ORDER BY clause doesn't close the corresponding attachments # decription: # Old title: Don't close attach while deleting record from MON$ATTACHMENTS using ORDER BY clause. # Confirmed bug on 3.0.6.33271. # Checked on 3.0.6.33272 (SS/CS) - works fine. # 22.04.2020. Checked separately on 4.0.0.1931 SS/CS: all OK. FB 4.0 can also be tested since this build. # # tracker_id: CORE-6266 # min_versions: ['3.0.0'] # versions: 3.0 # qmid: None import pytest from firebird.qa import db_factory, isql_act, Action # version: 3.0 # resources: None substitutions_1 = [] init_script_1 = """""" db_1 = db_factory(sql_dialect=3, init=init_script_1) # test_script_1 #--- # import os # import sys # import time # import fdb # # ATT_CNT=5 # ATT_DELAY=1 # # os.environ["ISC_USER"] = user_name # os.environ["ISC_PASSWORD"] = <PASSWORD> # # db_conn.close() # # con_list={} # for i in range(0, ATT_CNT): # if i > 0: # time.sleep( ATT_DELAY ) # # c = fdb.connect(dsn = dsn) # a = c.attachment_id # con_list[ i ] = (a, c) # # print('created attachment ', (a,c) ) # # con_admin = con_list[0][1] # # #print(con_admin.firebird_version) # # # this removes ALL connections --> should NOT be used for reproducing ticket issue: # #con_admin.execute_immediate('delete from mon$attachments where mon$attachment_id != current_connection order by mon$timestamp') # # # this removes ALL connections --> should NOT be used for reproducing ticket issue: # #con_admin.execute_immediate('delete from mon$attachments where mon$system_flag is distinct from 1 and mon$attachment_id != current_connection order by mon$timestamp') # # # This DOES NOT remove all attachments (only 'last' in order of timestamp), but # # DELETE statement must NOT contain phrase 'mon$attachment_id != current_connection': # con_admin.execute_immediate('delete from mon$attachments where mon$system_flag is distinct from 1 order by mon$timestamp') # # con_admin.commit() # # cur_admin = con_admin.cursor() # cur_admin.execute('select mon$attachment_id,mon$user from mon$attachments where mon$system_flag is distinct from 1 and mon$attachment_id != current_connection' ) # i=0 # for r in cur_admin: # print( '### ACHTUNG ### STILL ALIVE ATTACHMENT DETECTED: ', r[0], r[1].strip(), '###' ) # i += 1 # print('Number of attachments that remains alive: ',i) # # cur_admin.close() # # #print('Final cleanup before quit from Python.') # # for k,v in sorted( con_list.items() ): # #print('attempt to close attachment ', v[0] ) # try: # v[1].close() # #print('done.') # except Exception as e: # pass # #print('Got exception:', sys.exc_info()[0]) # #print(e[0]) # # #--- #act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1) expected_stdout_1 = """ Number of attachments that remains alive: 0 """ @pytest.mark.version('>=3.0') @pytest.mark.xfail def test_1(db_1): pytest.fail("Test not IMPLEMENTED")
1.164063
1
Doc/conf.py
python-doc-tw/cpython-tw
0
519
# # Python documentation build configuration file # # This file is execfile()d with the current directory set to its containing dir. # # The contents of this file are pickled, so don't put values in the namespace # that aren't pickleable (module imports are okay, they're removed automatically). import sys, os, time sys.path.append(os.path.abspath('tools/extensions')) # General configuration # --------------------- extensions = ['sphinx.ext.coverage', 'sphinx.ext.doctest', 'pyspecific', 'c_annotations'] # General substitutions. project = 'Python' copyright = '2001-%s, Python Software Foundation' % time.strftime('%Y') # We look for the Include/patchlevel.h file in the current Python source tree # and replace the values accordingly. import patchlevel version, release = patchlevel.get_version_info() # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: today = '' # Else, today_fmt is used as the format for a strftime call. today_fmt = '%B %d, %Y' # By default, highlight as Python 3. highlight_language = 'python3' # Require Sphinx 1.2 for build. needs_sphinx = '1.2' # Ignore any .rst files in the venv/ directory. exclude_patterns = ['venv/*'] # Options for HTML output # ----------------------- # Use our custom theme. html_theme = 'pydoctheme' html_theme_path = ['tools'] html_theme_options = {'collapsiblesidebar': True} # Short title used e.g. for <title> HTML tags. html_short_title = '%s Documentation' % release # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. html_last_updated_fmt = '%b %d, %Y' # Path to find HTML templates. templates_path = ['tools/templates'] # Custom sidebar templates, filenames relative to this file. html_sidebars = { 'index': 'indexsidebar.html', } # Additional templates that should be rendered to pages. html_additional_pages = { 'download': 'download.html', 'index': 'indexcontent.html', } # Output an OpenSearch description file. html_use_opensearch = 'https://docs.python.org/' + version # Additional static files. html_static_path = ['tools/static'] # Output file base name for HTML help builder. htmlhelp_basename = 'python' + release.replace('.', '') # Split the index html_split_index = True # Options for LaTeX output # ------------------------ # The paper size ('letter' or 'a4'). latex_paper_size = 'a4' # The font size ('10pt', '11pt' or '12pt'). latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, document class [howto/manual]). _stdauthor = r'<NAME>\\and the Python development team' latex_documents = [ ('c-api/index', 'c-api.tex', 'The Python/C API', _stdauthor, 'manual'), ('distributing/index', 'distributing.tex', 'Distributing Python Modules', _stdauthor, 'manual'), ('extending/index', 'extending.tex', 'Extending and Embedding Python', _stdauthor, 'manual'), ('installing/index', 'installing.tex', 'Installing Python Modules', _stdauthor, 'manual'), ('library/index', 'library.tex', 'The Python Library Reference', _stdauthor, 'manual'), ('reference/index', 'reference.tex', 'The Python Language Reference', _stdauthor, 'manual'), ('tutorial/index', 'tutorial.tex', 'Python Tutorial', _stdauthor, 'manual'), ('using/index', 'using.tex', 'Python Setup and Usage', _stdauthor, 'manual'), ('faq/index', 'faq.tex', 'Python Frequently Asked Questions', _stdauthor, 'manual'), ('whatsnew/' + version, 'whatsnew.tex', 'What\'s New in Python', '<NAME>', 'howto'), ] # Collect all HOWTOs individually latex_documents.extend(('howto/' + fn[:-4], 'howto-' + fn[:-4] + '.tex', '', _stdauthor, 'howto') for fn in os.listdir('howto') if fn.endswith('.rst') and fn != 'index.rst') # Additional stuff for the LaTeX preamble. latex_preamble = r''' \authoraddress{ \strong{Python Software Foundation}\\ Email: \email{<EMAIL>} } \let\Verbatim=\OriginalVerbatim \let\endVerbatim=\endOriginalVerbatim ''' # Documents to append as an appendix to all manuals. latex_appendices = ['glossary', 'about', 'license', 'copyright'] # Get LaTeX to handle Unicode correctly latex_elements = {'inputenc': r'\usepackage[utf8x]{inputenc}', 'utf8extra': ''} # Options for Epub output # ----------------------- epub_author = 'Python Documentation Authors' epub_publisher = 'Python Software Foundation' # Options for the coverage checker # -------------------------------- # The coverage checker will ignore all modules/functions/classes whose names # match any of the following regexes (using re.match). coverage_ignore_modules = [ r'[T|t][k|K]', r'Tix', r'distutils.*', ] coverage_ignore_functions = [ 'test($|_)', ] coverage_ignore_classes = [ ] # Glob patterns for C source files for C API coverage, relative to this directory. coverage_c_path = [ '../Include/*.h', ] # Regexes to find C items in the source files. coverage_c_regexes = { 'cfunction': (r'^PyAPI_FUNC\(.*\)\s+([^_][\w_]+)'), 'data': (r'^PyAPI_DATA\(.*\)\s+([^_][\w_]+)'), 'macro': (r'^#define ([^_][\w_]+)\(.*\)[\s|\\]'), } # The coverage checker will ignore all C items whose names match these regexes # (using re.match) -- the keys must be the same as in coverage_c_regexes. coverage_ignore_c_items = { # 'cfunction': [...] } # Options for the link checker # ---------------------------- # Ignore certain URLs. linkcheck_ignore = [r'https://bugs.python.org/(issue)?\d+', # Ignore PEPs for now, they all have permanent redirects. r'http://www.python.org/dev/peps/pep-\d+'] # Options for extensions # ---------------------- # Relative filename of the reference count data file. refcount_file = 'data/refcounts.dat' # Translation # ----------- gettext_compact = False locale_dirs = ["locale"]
1.289063
1
FakeNewsClassifierWithLSTM.py
pratikasarkar/nlp
0
527
# -*- coding: utf-8 -*- """ Created on Thu Feb 11 13:42:45 2021 @author: ASUS """ import pandas as pd df = pd.read_csv(r'D:\nlp\fake-news-data\train.csv') df = df.dropna() X = df.drop('label',axis = 1) y = df['label'] import tensorflow as tf from tensorflow.keras.layers import Embedding, Dense, LSTM from tensorflow.keras.models import Sequential from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.preprocessing.text import one_hot # Vocabulary size voc_size = 5000 # One Hot Representation messages = X.copy() messages.reset_index(inplace = True) import nltk import re from nltk.corpus import stopwords # Dataset Preprocessing from nltk.stem import PorterStemmer ps = PorterStemmer() corpus = [] for i in range(len(messages)): print(i) review = re.sub('[^a-zA-Z]',' ',messages['title'][i]) review = review.lower() review = review.split() review = [ps.stem(word) for word in review if word not in stopwords.words('english')] review = " ".join(review) corpus.append(review) onehot_repr = [one_hot(words,voc_size) for words in corpus] sent_len = 20 embedded_doc = pad_sequences(onehot_repr,maxlen = sent_len,padding = 'pre') # Creating the model embedding_vector_features = 40 model = Sequential() model.add(Embedding(voc_size,embedding_vector_features,input_length=sent_len)) model.add(LSTM(100)) model.add(Dense(1,activation='sigmoid')) model.compile(loss='binary_crossentropy',optimizer = 'adam',metrics = ['accuracy']) model.summary() import numpy as np X_final = np.array(embedded_doc) y_final = np.array(y) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X_final,y_final,test_size = 0.33,random_state = 42) model.fit(X_train,y_train,validation_data=(X_test,y_test),epochs=10,batch_size=64) y_pred = model.predict_classes(X_test) from sklearn.metrics import confusion_matrix, accuracy_score cm = confusion_matrix(y_test,y_pred) acc = accuracy_score(y_test,y_pred)
2.046875
2
tests/test_bugs.py
mmibrah2/OpenQL
0
535
import os import filecmp import unittest import numpy as np from openql import openql as ql from utils import file_compare curdir = os.path.dirname(os.path.realpath(__file__)) output_dir = os.path.join(curdir, 'test_output') class Test_bugs(unittest.TestCase): @classmethod def setUp(self): ql.initialize() ql.set_option('output_dir', output_dir) ql.set_option('use_default_gates', 'yes') ql.set_option('log_level', 'LOG_WARNING') # @unittest.expectedFailure # @unittest.skip def test_typecast(self): sweep_points = [1,2] num_circuits = 1 num_qubits = 2 platf = ql.Platform("starmon", 'cc_light') p = ql.Program('test_bug', platf, num_qubits) p.set_sweep_points(sweep_points) k = ql.Kernel('kernel1', platf, num_qubits) qubit = 1 k.identity(np.int(qubit)) k.identity(np.int32(qubit)) k.identity(np.int64(qubit)) k.identity(np.uint(qubit)) k.identity(np.uint32(qubit)) k.identity(np.uint64(qubit)) # add the kernel to the program p.add_kernel(k) # relates to https://github.com/QE-Lab/OpenQL/issues/171 # various runs of compiles were generating different results or in the best # case strange errors. So multiple (NCOMPILES) runs of compile are executed # to make sure there is no error and output generated in all these runs is same # JvS: more likely, it also had to do with the classical register allocator # depending on stuff like Python's garbage collection to free a register. # The register numbers have to be hardcoded now for that reason. def test_stateful_behavior(self): ql.set_option('optimize', 'no') ql.set_option('scheduler', 'ALAP') platform = ql.Platform("myPlatform", 'cc_light') sweep_points = [1] nqubits = 3 nregs = 3 p = ql.Program("statelessProgram", platform, nqubits, nregs) p.set_sweep_points(sweep_points) k = ql.Kernel("aKernel", platform, nqubits, nregs) k.prepz(0) k.gate('rx180', [0]) k.measure(0) rd = ql.CReg(0) rs1 = ql.CReg(1) rs2 = ql.CReg(2) k.classical(rs1, ql.Operation(3)) k.classical(rs1, ql.Operation(4)) k.classical(rd, ql.Operation(rs1, '+', rs2)) p.add_kernel(k) NCOMPILES=50 QISA_fn = os.path.join(output_dir, p.name+'_last.qasm') for i in range(NCOMPILES): p.compile() self.setUpClass() QISA_fn_i = os.path.join(output_dir, p.name+'_'+str(i)+'_last.qasm') os.rename(QISA_fn,QISA_fn_i) for i in range(NCOMPILES-1): QISA_fn_1 = os.path.join(output_dir, p.name+'_'+str(i)+'_last.qasm') QISA_fn_2 = os.path.join(output_dir, p.name+'_'+str(i+1)+'_last.qasm') self.assertTrue( file_compare(QISA_fn_1, QISA_fn_2)) # Unclear how this test works. # When clear, enable it again. # Now it fails, not clear how to repair, so it is disabled. # def test_empty_infinite_loop(self): # name = 'empty_infinite_loop' # in_fn = 'test_' + name + '.cq' # out_fn = 'test_output/' + name + '_out.cq' # gold_fn = 'golden/' + name + '_out.cq' # ql.initialize() # #ql.set_option('log_level', 'LOG_DEBUG') # ql.compile(in_fn) # self.assertTrue(file_compare(out_fn, gold_fn)) if __name__ == '__main__': unittest.main()
1.65625
2
tests/functional/test_soft_round_inverse.py
tallamjr/NeuralCompression
233
543
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from neuralcompression.functional import soft_round, soft_round_inverse def test_soft_round_inverse(): x = torch.linspace(-2.0, 2.0, 50) torch.testing.assert_close( x, soft_round_inverse(x, alpha=1e-13), ) x = torch.tensor([-1.25, -0.75, 0.75, 1.25]) torch.testing.assert_close( x, soft_round_inverse(soft_round(x, alpha=2.0), alpha=2.0), ) for offset in range(-5, 5): x = torch.linspace(offset + 0.001, offset + 0.999, 100) torch.testing.assert_close( torch.ceil(x) - 0.5, soft_round_inverse(x, alpha=5000.0), atol=0.001, rtol=0.002, )
1.648438
2
src/main/python/rds_log_cat/parser/mysql57.py
Scout24/rds-log-cat
1
551
from rds_log_cat.parser.parser import Parser, LineParserException class Mysql57(Parser): def __init__(self): Parser.__init__(self) def compose_timestamp(self, datetime, timezone): if len(datetime) != 27: raise LineParserException('wrong length of datetime - wrong date is: ' + datetime) if not timezone == 'UTC': raise LineParserException('Only able to parse times in UTC. You gave {}'.format(timezone)) return datetime def parse(self, line): """ parses the fields in line to generate json structure """ expected_min_no_fields = 5 if len(line) < expected_min_no_fields: raise LineParserException('line too short') pid = line[1] log_level = line[2].lstrip("[").rstrip("]") timezone = 'UTC' return { '@timestamp': self.compose_timestamp(line[0], timezone), 'log_level': log_level, 'process_id': int(pid), 'message': ' '.join(map(str, line[3:])) }
1.375
1
venv/lib/python3.6/site-packages/ansible_collections/community/azure/plugins/modules/azure_rm_availabilityset_info.py
usegalaxy-no/usegalaxy
1
575
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2016, <NAME> <<EMAIL>> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: azure_rm_availabilityset_info short_description: Get Azure Availability Set facts description: - Get facts for a specific availability set or all availability sets. options: name: description: - Limit results to a specific availability set. resource_group: description: - The resource group to search for the desired availability set. tags: description: - List of tags to be matched. extends_documentation_fragment: - azure.azcollection.azure author: - <NAME> (@julienstroheker) deprecated: removed_in: '2.0.0' why: The Ansible collection community.azure is deprecated. Use azure.azcollection instead. alternative: Use M(azure.azcollection.azure_rm_availabilityset_info) instead. ''' EXAMPLES = ''' - name: Get facts for one availability set community.azure.azure_rm_availabilityset_info: name: Testing resource_group: myResourceGroup - name: Get facts for all availability sets in a specific resource group community.azure.azure_rm_availabilityset_info: resource_group: myResourceGroup ''' RETURN = ''' azure_availabilityset: description: List of availability sets dicts. returned: always type: complex contains: location: description: - Location where the resource lives. type: str sample: eastus2 name: description: - Resource name. type: str sample: myAvailabilitySet properties: description: - The properties of the resource. type: dict contains: platformFaultDomainCount: description: - Fault Domain count. type: int sample: 3 platformUpdateDomainCount: description: - Update Domain count. type: int sample: 2 virtualMachines: description: - A list of references to all virtualmachines in the availability set. type: list sample: [] sku: description: - Location where the resource lives. type: str sample: Aligned type: description: - Resource type. type: str sample: "Microsoft.Compute/availabilitySets" tags: description: - Resource tags. type: dict sample: { env: sandbox } ''' from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase try: from msrestazure.azure_exceptions import CloudError except Exception: # handled in azure_rm_common pass AZURE_OBJECT_CLASS = 'AvailabilitySet' class AzureRMAvailabilitySetInfo(AzureRMModuleBase): """Utility class to get availability set facts""" def __init__(self): self.module_args = dict( name=dict(type='str'), resource_group=dict(type='str'), tags=dict(type='list') ) self.results = dict( changed=False, ansible_info=dict( azure_availabilitysets=[] ) ) self.name = None self.resource_group = None self.tags = None super(AzureRMAvailabilitySetInfo, self).__init__( derived_arg_spec=self.module_args, supports_tags=False, facts_module=True ) def exec_module(self, **kwargs): is_old_facts = self.module._name == 'azure_rm_availabilityset_facts' if is_old_facts: self.module.deprecate("The 'azure_rm_availabilityset_facts' module has been renamed to 'azure_rm_availabilityset_info'", version='3.0.0', collection_name='community.azure') # was 2.13 for key in self.module_args: setattr(self, key, kwargs[key]) if self.name and not self.resource_group: self.fail("Parameter error: resource group required when filtering by name.") if self.name: self.results['ansible_info']['azure_availabilitysets'] = self.get_item() else: self.results['ansible_info']['azure_availabilitysets'] = self.list_items() return self.results def get_item(self): """Get a single availability set""" self.log('Get properties for {0}'.format(self.name)) item = None result = [] try: item = self.compute_client.availability_sets.get(self.resource_group, self.name) except CloudError: pass if item and self.has_tags(item.tags, self.tags): avase = self.serialize_obj(item, AZURE_OBJECT_CLASS) avase['name'] = item.name avase['type'] = item.type avase['sku'] = item.sku.name result = [avase] return result def list_items(self): """Get all availability sets""" self.log('List all availability sets') try: response = self.compute_client.availability_sets.list(self.resource_group) except CloudError as exc: self.fail('Failed to list all items - {0}'.format(str(exc))) results = [] for item in response: if self.has_tags(item.tags, self.tags): avase = self.serialize_obj(item, AZURE_OBJECT_CLASS) avase['name'] = item.name avase['type'] = item.type avase['sku'] = item.sku.name results.append(avase) return results def main(): """Main module execution code path""" AzureRMAvailabilitySetInfo() if __name__ == '__main__': main()
1.28125
1
influxdb_service_sdk/model/container/resource_requirements_pb2.py
easyopsapis/easyops-api-python
5
583
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: resource_requirements.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from influxdb_service_sdk.model.container import resource_list_pb2 as influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='resource_requirements.proto', package='container', syntax='proto3', serialized_options=_b('ZCgo.easyops.local/contracts/protorepo-models/easyops/model/container'), serialized_pb=_b('\n\x1bresource_requirements.proto\x12\tcontainer\x1a\x38influxdb_service_sdk/model/container/resource_list.proto\"j\n\x14ResourceRequirements\x12\'\n\x06limits\x18\x01 \x01(\x0b\x32\x17.container.ResourceList\x12)\n\x08requests\x18\x02 \x01(\x0b\x32\x17.container.ResourceListBEZCgo.easyops.local/contracts/protorepo-models/easyops/model/containerb\x06proto3') , dependencies=[influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2.DESCRIPTOR,]) _RESOURCEREQUIREMENTS = _descriptor.Descriptor( name='ResourceRequirements', full_name='container.ResourceRequirements', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='limits', full_name='container.ResourceRequirements.limits', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='requests', full_name='container.ResourceRequirements.requests', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=100, serialized_end=206, ) _RESOURCEREQUIREMENTS.fields_by_name['limits'].message_type = influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2._RESOURCELIST _RESOURCEREQUIREMENTS.fields_by_name['requests'].message_type = influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2._RESOURCELIST DESCRIPTOR.message_types_by_name['ResourceRequirements'] = _RESOURCEREQUIREMENTS _sym_db.RegisterFileDescriptor(DESCRIPTOR) ResourceRequirements = _reflection.GeneratedProtocolMessageType('ResourceRequirements', (_message.Message,), { 'DESCRIPTOR' : _RESOURCEREQUIREMENTS, '__module__' : 'resource_requirements_pb2' # @@protoc_insertion_point(class_scope:container.ResourceRequirements) }) _sym_db.RegisterMessage(ResourceRequirements) DESCRIPTOR._options = None # @@protoc_insertion_point(module_scope)
0.902344
1
studies/mixture_feasibility/parsley_benchmark/alcohol_ester/run.py
openforcefield/nistdataselection
3
607
from evaluator import unit from evaluator.backends import QueueWorkerResources from evaluator.backends.dask import DaskLSFBackend from evaluator.client import ConnectionOptions, EvaluatorClient from evaluator.datasets import PhysicalPropertyDataSet from evaluator.forcefield import SmirnoffForceFieldSource from evaluator.server import EvaluatorServer from evaluator.utils import setup_timestamp_logging def main(): setup_timestamp_logging() # Load in the force field force_field_path = "openff-1.0.0.offxml" force_field_source = SmirnoffForceFieldSource.from_path(force_field_path) # Load in the test set. data_set = PhysicalPropertyDataSet.from_json("full_set.json") # Set up a server object to run the calculations using. working_directory = "working_directory" # Set up a backend to run the calculations on. This assume running # on a HPC resources with the LSF queue system installed. queue_resources = QueueWorkerResources( number_of_threads=1, number_of_gpus=1, preferred_gpu_toolkit=QueueWorkerResources.GPUToolkit.CUDA, per_thread_memory_limit=5 * unit.gigabyte, wallclock_time_limit="05:59", ) worker_script_commands = ["conda activate forcebalance", "module load cuda/10.1"] calculation_backend = DaskLSFBackend( minimum_number_of_workers=1, maximum_number_of_workers=50, resources_per_worker=queue_resources, queue_name="gpuqueue", setup_script_commands=worker_script_commands, adaptive_interval="1000ms", ) with calculation_backend: server = EvaluatorServer( calculation_backend=calculation_backend, working_directory=working_directory, port=8004, ) with server: # Request the estimates. client = EvaluatorClient(ConnectionOptions(server_port=8004)) request, _ = client.request_estimate( property_set=data_set, force_field_source=force_field_source, ) # Wait for the results. results, _ = request.results(True, 5) results.json(f"results.json") if __name__ == "__main__": main()
1.414063
1
src/collectors/heartbeat/heartbeat.py
art19/netuitive-diamond
2
615
# coding=utf-8 """ Send a value of 1 as a heartbeat every time this collector is invoked. #### Dependencies None #### Usage Add the collector config as : enabled = True path = netuitive Metrics are collected as : - metrics.heartbeat Netuitive Change History ======================== DVG 2016/11/14 Initial version. """ import diamond.collector from diamond.utils.config import load_config as load_server_config try: import netuitive except ImportError: netuitive = None class HeartbeatCollector(diamond.collector.Collector): def __init__(self, *args, **kwargs): super(HeartbeatCollector, self).__init__(*args, **kwargs) self.hostname = self.get_hostname() self.ttl = self.config['ttl'] self.connection_timeout = 5 if not netuitive: self.log.error('netuitive import failed. Heartbeat collector disabled') self.enabled = False return try: self.version = self._get_version() if 'netuitive_connection_timeout' in self.config: self.connection_timeout = int(self.config['netuitive_connection_timeout']) self.api = netuitive.Client(url=self.config['netuitive_url'], api_key=self.config['netuitive_api_key'], agent=self.version, connection_timeout=self.connection_timeout) except Exception as e: self.log.debug(e) def collect(self): check = netuitive.Check('heartbeat', self.hostname, self.ttl) self.api.post_check(check)
1.671875
2
scripts/bam-stats.py
varlociraptor/prosic-evaluation
2
631
#!/usr/bin/env python import sys import numpy as np import pandas as pd import pysam import matplotlib matplotlib.use("agg") import matplotlib.pyplot as plt import seaborn as sns from functools import partial tumor = pysam.AlignmentFile(snakemake.input[0], "rb") normal = pysam.AlignmentFile(snakemake.input[1], "rb") softclips = [] for i, rec in enumerate(normal): if rec.is_supplementary or rec.is_unmapped: continue is_first_read = rec.pos < rec.mpos get_clip = lambda c: c[1] if c[0] == 4 else None clip_left = get_clip(rec.cigartuples[0]) if clip_left is not None: softclips.append([clip_left, True, is_first_read]) clip_right = get_clip(rec.cigartuples[-1]) if clip_right is not None: softclips.append([clip_right, False, is_first_read]) if i == 10000000: break softclips = pd.DataFrame(softclips, columns=["len", "left", "first_in_pair"]) def plot(*args, **kwargs): softclips = args[0] plt.hist(softclips, normed=True) q95 = np.percentile(softclips, 99) plt.plot([q95, q95], [0, 1.0], "--k") m = max(softclips) plt.plot([m, m], [0, 1.0], ":k") plt.text(m, 1, "max={}".format(m), horizontalalignment="right", verticalalignment="top") g = sns.FacetGrid(softclips, col="left", row="first_in_pair") g = g.map(plot, "len") plt.savefig(snakemake.output[0])
2.078125
2
electrum/version.py
c4pt000/electrum-radiocoin
0
639
ELECTRUM_VERSION = '4.1.5-radc' # version of the client package APK_VERSION = '4.1.5.0' # read by buildozer.spec PROTOCOL_VERSION = '1.4' # protocol version requested # The hash of the mnemonic seed must begin with this SEED_PREFIX = '01' # Standard wallet SEED_PREFIX_SW = '100' # Segwit wallet SEED_PREFIX_2FA = '101' # Two-factor authentication SEED_PREFIX_2FA_SW = '102' # Two-factor auth, using segwit def seed_prefix(seed_type): if seed_type == 'standard': return SEED_PREFIX elif seed_type == 'segwit': return SEED_PREFIX_SW elif seed_type == '2fa': return SEED_PREFIX_2FA elif seed_type == '2fa_segwit': return SEED_PREFIX_2FA_SW raise Exception(f"unknown seed_type: {seed_type}")
1.15625
1
tests/unit/controllers/v1/test_rbac_for_supported_st2api_endpoints.py
cognifloyd/st2-open-rbac
0
647
# Licensed to the StackStorm, Inc ('StackStorm') under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import OrderedDict import six import mock from st2common.services import triggers as trigger_service with mock.patch.object(trigger_service, 'create_trigger_type_db', mock.MagicMock()): from st2api.controllers.v1.webhooks import HooksHolder from st2common.persistence.rbac import UserRoleAssignment from st2common.models.db.rbac import UserRoleAssignmentDB from st2common.service_setup import register_service_in_service_registry from st2common.services import coordination from st2tests import config as tests_config from st2tests.fixturesloader import FixturesLoader from open_rbac.tests import APIControllerWithRBACTestCase from tests.unit.controllers.v1.test_webhooks import DUMMY_TRIGGER_DICT http_client = six.moves.http_client __all__ = [ 'APIControllersRBACTestCase' ] FIXTURES_PACK = 'generic' TEST_FIXTURES = OrderedDict([ ('runners', ['testrunner1.yaml', 'run-local.yaml']), ('sensors', ['sensor1.yaml']), ('actions', ['action1.yaml', 'local.yaml']), ('aliases', ['alias1.yaml']), ('triggers', ['trigger1.yaml', 'cron1.yaml']), ('rules', ['rule1.yaml']), ('triggertypes', ['triggertype1.yaml']), ('executions', ['execution1.yaml']), ('liveactions', ['liveaction1.yaml', 'parentliveaction.yaml', 'childliveaction.yaml']), ('enforcements', ['enforcement1.yaml']), ('apikeys', ['apikey1.yaml']), ('traces', ['trace_for_test_enforce.yaml']) ]) MOCK_RUNNER_1 = { 'name': 'test-runner-1', 'description': 'test', 'enabled': False } MOCK_ACTION_1 = { 'name': 'ma.dummy.action', 'pack': 'examples', 'description': 'test description', 'enabled': True, 'entry_point': '/tmp/test/action2.py', 'runner_type': 'local-shell-script', 'parameters': { 'c': {'type': 'string', 'default': 'C1', 'position': 0}, 'd': {'type': 'string', 'default': 'D1', 'immutable': True} } } MOCK_ACTION_ALIAS_1 = { 'name': 'alias3', 'pack': 'aliases', 'description': 'test description', 'action_ref': 'core.local', 'formats': ['a', 'b'] } MOCK_RULE_1 = { 'enabled': True, 'name': 'st2.test.rule2', 'pack': 'yoyohoneysingh', 'trigger': { 'type': 'wolfpack.triggertype-1' }, 'criteria': { 'trigger.k1': { 'pattern': 't1_p_v', 'type': 'equals' } }, 'action': { 'ref': 'sixpack.st2.test.action', 'parameters': { 'ip2': '{{rule.k1}}', 'ip1': '{{trigger.t1_p}}' } }, 'description': '' } class APIControllersRBACTestCase(APIControllerWithRBACTestCase): """ Test class which hits all the API endpoints which are behind the RBAC wall with a user which has no permissions and makes sure API returns access denied. """ register_packs = True fixtures_loader = FixturesLoader() coordinator = None @classmethod def setUpClass(cls): tests_config.parse_args(coordinator_noop=True) super(APIControllersRBACTestCase, cls).setUpClass() cls.coordinator = coordination.get_coordinator(use_cache=False) # Register mock service in the service registry for testing purposes service = six.binary_type(six.text_type('mock_service').encode('ascii')) register_service_in_service_registry(service=service, capabilities={'key1': 'value1', 'name': 'mock_service'}, start_heart=True) @classmethod def tearDownClass(cls): super(APIControllersRBACTestCase, cls).tearDownClass() coordination.coordinator_teardown(cls.coordinator) def setUp(self): super(APIControllersRBACTestCase, self).setUp() # Register packs if self.register_packs: self._register_packs() # Insert mock objects - those objects are used to test get one, edit and delete operations self.models = self.fixtures_loader.save_fixtures_to_db(fixtures_pack=FIXTURES_PACK, fixtures_dict=TEST_FIXTURES) self.role_assignment_db_model = UserRoleAssignmentDB( user='user', role='role', source='assignments/user.yaml') UserRoleAssignment.add_or_update(self.role_assignment_db_model) @mock.patch.object(HooksHolder, 'get_triggers_for_hook', mock.MagicMock( return_value=[DUMMY_TRIGGER_DICT])) def test_api_endpoints_behind_rbac_wall(self): # alias_model = self.models['aliases']['alias1.yaml'] sensor_model = self.models['sensors']['sensor1.yaml'] rule_model = self.models['rules']['rule1.yaml'] enforcement_model = self.models['enforcements']['enforcement1.yaml'] execution_model = self.models['executions']['execution1.yaml'] trace_model = self.models['traces']['trace_for_test_enforce.yaml'] timer_model = self.models['triggers']['cron1.yaml'] supported_endpoints = [ # Runners { 'path': '/v1/runnertypes', 'method': 'GET', 'is_getall': True }, { 'path': '/v1/runnertypes/test-runner-1', 'method': 'GET' }, { 'path': '/v1/runnertypes/test-runner-1', 'method': 'PUT', 'payload': MOCK_RUNNER_1 }, # Packs { 'path': '/v1/packs', 'method': 'GET', 'is_getall': True }, { 'path': '/v1/packs/dummy_pack_1', 'method': 'GET' }, # Pack management { 'path': '/v1/packs/install', 'method': 'POST', 'payload': {'packs': 'libcloud'} }, { 'path': '/v1/packs/uninstall', 'method': 'POST', 'payload': {'packs': 'libcloud'} }, { 'path': '/v1/packs/register', 'method': 'POST', 'payload': {'types': ['actions']} }, { 'path': '/v1/packs/index/search', 'method': 'POST', 'payload': {'query': 'cloud'} }, { 'path': '/v1/packs/index/health', 'method': 'GET' }, # Pack views { 'path': '/v1/packs/views/files/dummy_pack_1', 'method': 'GET' }, # Pack config schemas { 'path': '/v1/config_schemas', 'method': 'GET', 'is_getall': True }, { 'path': '/v1/config_schemas/dummy_pack_1', 'method': 'GET' }, { 'path': '/v1/packs/views/file/dummy_pack_1/pack.yaml', 'method': 'GET' }, # Pack configs { 'path': '/v1/configs', 'method': 'GET', 'is_getall': True }, { 'path': '/v1/configs/dummy_pack_1', 'method': 'GET' }, { 'path': '/v1/configs/dummy_pack_1', 'method': 'PUT', 'payload': { 'foo': 'bar' } }, # Sensors { 'path': '/v1/sensortypes', 'method': 'GET', 'is_getall': True }, { 'path': '/v1/sensortypes/%s' % (sensor_model.ref), 'method': 'GET' }, { 'path': '/v1/sensortypes/%s' % (sensor_model.ref), 'method': 'PUT', 'payload': {'enabled': False} }, # Actions { 'path': '/v1/actions', 'method': 'GET', 'is_getall': True }, { 'path': '/v1/actions/wolfpack.action-1', 'method': 'GET' }, { 'path': '/v1/actions', 'method': 'POST', 'payload': MOCK_ACTION_1 }, { 'path': '/v1/actions/wolfpack.action-1', 'method': 'PUT', 'payload': MOCK_ACTION_1 }, { 'path': '/v1/actions/wolfpack.action-1', 'method': 'DELETE' }, # Action aliases { 'path': '/v1/actionalias', 'method': 'GET', 'is_getall': True }, { 'path': '/v1/actionalias/aliases.alias1', 'method': 'GET' }, { 'path': '/v1/actionalias', 'method': 'POST', 'payload': MOCK_ACTION_ALIAS_1 }, { 'path': '/v1/actionalias/aliases.alias1', 'method': 'PUT', 'payload': MOCK_ACTION_ALIAS_1 }, { 'path': '/v1/actionalias/aliases.alias1', 'method': 'DELETE' }, { 'path': '/v1/actionalias/match', 'method': 'POST', 'payload': {'command': 'test command string'} }, # Rules { 'path': '/v1/rules', 'method': 'GET', 'is_getall': True }, { 'path': '/v1/rules/%s' % (rule_model.ref), 'method': 'GET' }, { 'path': '/v1/rules', 'method': 'POST', 'payload': MOCK_RULE_1 }, { 'path': '/v1/rules/%s' % (rule_model.ref), 'method': 'PUT', 'payload': MOCK_RULE_1 }, { 'path': '/v1/rules/%s' % (rule_model.ref), 'method': 'DELETE' }, # Rule enforcements { 'path': '/v1/ruleenforcements', 'method': 'GET', 'is_getall': True }, { 'path': '/v1/ruleenforcements/%s' % (enforcement_model.id), 'method': 'GET' }, # Action Executions { 'path': '/v1/executions', 'method': 'GET', 'is_getall': True }, { 'path': '/v1/executions/%s' % (execution_model.id), 'method': 'GET' }, { 'path': '/v1/executions/%s/output' % (execution_model.id), 'method': 'GET' }, { 'path': '/v1/executions', 'method': 'POST', 'payload': {'action': 'core.local'} # schedule execution / run action }, { 'path': '/v1/executions/%s' % (execution_model.id), 'method': 'DELETE' # stop execution }, { 'path': '/v1/executions/%s/re_run' % (execution_model.id), 'method': 'POST', # re-run execution 'payload': {'parameters': {}} }, # Action execution nested controllers { 'path': '/v1/executions/%s/attribute/trigger_instance' % (execution_model.id), 'method': 'GET' }, { 'path': '/v1/executions/%s/children' % (execution_model.id), 'method': 'GET' }, # Alias executions { 'path': '/v1/aliasexecution', 'method': 'POST', 'payload': {'name': 'alias1', 'format': 'foo bar ponies', 'command': 'foo bar ponies', 'user': 'channel', 'source_channel': 'bar'} }, # Webhook { 'path': '/v1/webhooks/st2', 'method': 'POST', 'payload': { 'trigger': 'some', 'payload': { 'some': 'thing' } } }, # Traces { 'path': '/v1/traces', 'method': 'GET', 'is_getall': True }, { 'path': '/v1/traces/%s' % (trace_model.id), 'method': 'GET' }, # Timers { 'path': '/v1/timers', 'method': 'GET' }, { 'path': '/v1/timers/%s' % (timer_model.id), 'method': 'GET' }, # Webhooks { 'path': '/v1/webhooks', 'method': 'GET' }, { 'path': '/v1/webhooks/git', 'method': 'GET' }, # RBAC - roles { 'path': '/v1/rbac/roles', 'method': 'GET', 'is_getall': True }, { 'path': '/v1/rbac/roles/admin', 'method': 'GET' }, # RBAC - user role assignments { 'path': '/v1/rbac/role_assignments', 'method': 'GET', 'is_getall': True }, { 'path': '/v1/rbac/role_assignments/%s' % (self.role_assignment_db_model['id']), 'method': 'GET' }, # RBAC - permission types { 'path': '/v1/rbac/permission_types', 'method': 'GET', 'is_getall': True }, { 'path': '/v1/rbac/permission_types/action', 'method': 'GET' }, # Action views { 'path': '/v1/actions/views/overview', 'method': 'GET', 'is_getall': True }, # Rule views { 'path': '/v1/rules/views', 'method': 'GET', 'is_getall': True }, # Service registry { 'path': '/v1/service_registry/groups', 'method': 'GET', 'is_getall': True }, { 'path': '/v1/service_registry/groups/mock_service/members', 'method': 'GET', 'is_getall': True } ] self.use_user(self.users['no_permissions']) for endpoint in supported_endpoints: response = self._perform_request_for_endpoint(endpoint=endpoint) msg = '%s "%s" didn\'t return 403 status code (body=%s)' % (endpoint['method'], endpoint['path'], response.body) self.assertEqual(response.status_code, http_client.FORBIDDEN, msg) # Also test ?limit=-1 - non-admin user self.use_user(self.users['observer']) for endpoint in supported_endpoints: if not endpoint.get('is_getall', False): continue response = self.app.get(endpoint['path'] + '?limit=-1', expect_errors=True) msg = '%s "%s" didn\'t return 403 status code (body=%s)' % (endpoint['method'], endpoint['path'], response.body) self.assertEqual(response.status_code, http_client.FORBIDDEN, msg) # Also test ?limit=-1 - admin user self.use_user(self.users['admin']) for endpoint in supported_endpoints: if not endpoint.get('is_getall', False): continue response = self.app.get(endpoint['path'] + '?limit=-1') self.assertEqual(response.status_code, http_client.OK) def test_icon_png_file_is_whitelisted(self): self.use_user(self.users['no_permissions']) # Test that access to icon.png file doesn't require any permissions response = self.app.get('/v1/packs/views/file/dummy_pack_2/icon.png') self.assertEqual(response.status_code, http_client.OK) # Other files should return forbidden response = self.app.get('/v1/packs/views/file/dummy_pack_2/pack.yaml', expect_errors=True) self.assertEqual(response.status_code, http_client.FORBIDDEN) def _perform_request_for_endpoint(self, endpoint): if endpoint['method'] == 'GET': response = self.app.get(endpoint['path'], expect_errors=True) elif endpoint['method'] == 'POST': return self.app.post_json(endpoint['path'], endpoint['payload'], expect_errors=True) elif endpoint['method'] == 'PUT': return self.app.put_json(endpoint['path'], endpoint['payload'], expect_errors=True) elif endpoint['method'] == 'DELETE': return self.app.delete(endpoint['path'], expect_errors=True) else: raise ValueError('Unsupported method: %s' % (endpoint['method'])) return response
1.4375
1
vize/150401052/sunucu.py
hasan-se/blm304
1
655
import os import sys import time from socket import * from os import system, name ip = '127.0.0.1' port = 42 s_soket = socket(AF_INET, SOCK_DGRAM) s_soket.bind((ip, port)) print("\nSunucu Hazir\n") kontrol, istemciAdres = s_soket.recvfrom(4096) s_soket.sendto(bytes("Sunucu hazir", encoding='utf-8'), istemciAdres) i, istemciAdres = s_soket.recvfrom(4096) if(i.decode("utf-8") == "listeleme yap"): dosyalar = "\n".join(os.listdir()) s_soket.sendto(bytes(dosyalar, encoding='utf-8'), istemciAdres) sys.exit() elif(i.decode("utf-8") == "put yap"): cevap = s_soket.recvfrom(4096) if(cevap[0].decode("utf-8") == "mevcut"): dosyaIsmi, istemciAdres = s_soket.recvfrom(4096) dosyaIcerigi = s_soket.recvfrom(4096) if(os.path.exists(dosyaIsmi.decode("utf-8")) == True): s_soket.sendto(bytes("aynisi mevcut", encoding='utf-8'), istemciAdres) karar = s_soket.recvfrom(4096) if(karar[0].decode("utf-8") == "1"): yeniAd = dosyaIsmi.decode("utf-8")[:-4] + " (kopya)" + ".txt" dosyaYeni = open(yeniAd, "wb") dosyaYeni.write(dosyaIcerigi[0]) dosyaYeni.close() print("\nPUT islemi basariyla gerceklesti..") else: dosyaYeni = open(dosyaIsmi, "wb") dosyaYeni.write(dosyaIcerigi[0]) dosyaYeni.close() s_soket.sendto(bytes("tamam", encoding='utf-8'), istemciAdres) print("\nPUT islemi basariyla gerceklesti..") else: print("\nGirilen adda bir dosya istemcide bulunamadi..") elif(i.decode("utf-8") == "get yap"): dosyaIsmi, istemciAdres = s_soket.recvfrom(4096) if (os.path.exists(dosyaIsmi.decode("utf-8")) == True): dosya = open(dosyaIsmi.decode("utf-8"), "rb") s_soket.sendto(bytes("dosya mevcut", encoding='utf-8'), istemciAdres) dosyaIcerik = dosya.read() dosya.close() s_soket.sendto(dosyaIcerik, istemciAdres) kontrol = s_soket.recvfrom(4096) print("\nGET islemi basariyla gerceklesti..") sys.exit() else: print("\n! Bu isimde bir dosya sunucuda mevcut değil") sys.exit() elif(i.decode("utf-8") == "bitir"): s_soket.close() print("\nSunucu kapandi") sys.exit()
1.242188
1
project/settings/production.py
chiehtu/kissaten
0
663
from .base import * SECRET_KEY = get_env_var('SECRET_KEY') CSRF_COOKIE_SECURE = True SESSION_COOKIE_SECURE = True TEMPLATE_LOADERS = ( ('django.template.loaders.cached.Loader', ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', )), ) EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' EMAIL_HOST = 'smtp.gmail.com' EMAIL_HOST_USER = get_env_var('EMAIL_HOST_USER') EMAIL_HOST_PASSWORD = get_env_var('EMAIL_HOST_PASSWORD') EMAIL_PORT = 587 EMAIL_USE_TLS = True DEFAULT_FROM_EMAIL = '' USERENA_USE_HTTPS = True
0.722656
1
pygments/lexers/trafficscript.py
blu-base/pygments
1
751
""" pygments.lexers.trafficscript ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Lexer for RiverBed's TrafficScript (RTS) language. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexer import RegexLexer from pygments.token import String, Number, Name, Keyword, Operator, Text, Comment __all__ = ['RtsLexer'] class RtsLexer(RegexLexer): """ For `Riverbed Stingray Traffic Manager <http://www.riverbed.com/stingray>`_ .. versionadded:: 2.1 """ name = 'TrafficScript' aliases = ['trafficscript', 'rts'] filenames = ['*.rts'] tokens = { 'root' : [ (r"'(\\\\|\\[^\\]|[^'\\])*'", String), (r'"', String, 'escapable-string'), (r'(0x[0-9a-fA-F]+|\d+)', Number), (r'\d+\.\d+', Number.Float), (r'\$[a-zA-Z](\w|_)*', Name.Variable), (r'(if|else|for(each)?|in|while|do|break|sub|return|import)', Keyword), (r'[a-zA-Z][\w.]*', Name.Function), (r'[-+*/%=,;(){}<>^.!~|&\[\]\?\:]', Operator), (r'(>=|<=|==|!=|' r'&&|\|\||' r'\+=|.=|-=|\*=|/=|%=|<<=|>>=|&=|\|=|\^=|' r'>>|<<|' r'\+\+|--|=>)', Operator), (r'[ \t\r]+', Text), (r'#[^\n]*', Comment), ], 'escapable-string' : [ (r'\\[tsn]', String.Escape), (r'[^"]', String), (r'"', String, '#pop'), ], }
1.625
2
week2/Assignment2Answer.py
RayshineRen/Introduction_to_Data_Science_in_Python
1
759
# -*- coding: utf-8 -*- """ Created on Fri Sep 18 21:56:15 2020 @author: Ray @email: <EMAIL> @wechat: RayTing0305 """ ''' Question 1 Write a function called proportion_of_education which returns the proportion of children in the dataset who had a mother with the education levels equal to less than high school (<12), high school (12), more than high school but not a college graduate (>12) and college degree. This function should return a dictionary in the form of (use the correct numbers, do not round numbers): {"less than high school":0.2, "high school":0.4, "more than high school but not college":0.2, "college":0.2} ''' import scipy.stats as stats import numpy as np import pandas as pd df = pd.read_csv("./assets/NISPUF17.csv") def proportion_of_education(): # your code goes here # YOUR CODE HERE df_edu = df.EDUC1 edu_list = [1, 2, 3, 4] zero_df = pd.DataFrame(np.zeros((df_edu.shape[0], len(edu_list))), columns=edu_list) for edu in edu_list: zero_df[edu][df_edu==edu]=1 #zero_df sum_ret = zero_df.sum(axis=0) name_l = ["less than high school", "high school", "more than high school but not college", "college"] rat = sum_ret.values/sum(sum_ret.values) dic = dict() for i in range(4): dic[name_l[i]] = rat[i] return dic raise NotImplementedError() assert type(proportion_of_education())==type({}), "You must return a dictionary." assert len(proportion_of_education()) == 4, "You have not returned a dictionary with four items in it." assert "less than high school" in proportion_of_education().keys(), "You have not returned a dictionary with the correct keys." assert "high school" in proportion_of_education().keys(), "You have not returned a dictionary with the correct keys." assert "more than high school but not college" in proportion_of_education().keys(), "You have not returned a dictionary with the correct keys." assert "college" in proportion_of_education().keys(), "You have not returned a dictionary with the correct" ''' Question 2 Let's explore the relationship between being fed breastmilk as a child and getting a seasonal influenza vaccine from a healthcare provider. Return a tuple of the average number of influenza vaccines for those children we know received breastmilk as a child and those who know did not. This function should return a tuple in the form (use the correct numbers: (2.5, 0.1) ''' def average_influenza_doses(): # YOUR CODE HERE #是否喂养母乳 fed_breastmilk = list(df.groupby(by='CBF_01')) be_fed_breastmilk = fed_breastmilk[0][1] not_fed_breastmilk = fed_breastmilk[1][1] #喂养母乳的influenza数目 be_fed_breastmilk_influenza = be_fed_breastmilk.P_NUMFLU num_be_fed_breastmilk_influenza = be_fed_breastmilk_influenza.dropna().mean() #未喂养母乳的influenza数目 not_be_fed_breastmilk_influenza = not_fed_breastmilk.P_NUMFLU num_not_be_fed_breastmilk_influenza = not_be_fed_breastmilk_influenza.dropna().mean() return num_be_fed_breastmilk_influenza, num_not_be_fed_breastmilk_influenza raise NotImplementedError() assert len(average_influenza_doses())==2, "Return two values in a tuple, the first for yes and the second for no." ''' Question 3 It would be interesting to see if there is any evidence of a link between vaccine effectiveness and sex of the child. Calculate the ratio of the number of children who contracted chickenpox but were vaccinated against it (at least one varicella dose) versus those who were vaccinated but did not contract chicken pox. Return results by sex. This function should return a dictionary in the form of (use the correct numbers): {"male":0.2, "female":0.4} Note: To aid in verification, the chickenpox_by_sex()['female'] value the autograder is looking for starts with the digits 0.0077. ''' def chickenpox_by_sex(): # YOUR CODE HERE #是否感染Varicella cpox = df.HAD_CPOX #cpox.value_counts() cpox_group = list(df.groupby(by='HAD_CPOX')) have_cpox = cpox_group[0][1] not_have_cpox = cpox_group[1][1] #男女分开 have_cpox_group = list(have_cpox.groupby(by='SEX')) not_have_cpox_group = list(not_have_cpox.groupby(by='SEX')) have_cpox_boy = have_cpox_group[0][1] have_cpox_girl = have_cpox_group[1][1] not_have_cpox_boy = not_have_cpox_group[0][1] not_have_cpox_girl = not_have_cpox_group[1][1] #接种感染 #have_cpox_boy_injected = have_cpox_boy[(have_cpox_boy['P_NUMMMR']>0) | (have_cpox_boy['P_NUMVRC']>0)] have_cpox_boy_injected = have_cpox_boy[(have_cpox_boy['P_NUMVRC']>0)] num_have_cpox_boy_injected = have_cpox_boy_injected.count()['SEQNUMC'] have_cpox_girl_injected = have_cpox_girl[(have_cpox_girl['P_NUMVRC']>0)] num_have_cpox_girl_injected = have_cpox_girl_injected.count()['SEQNUMC'] #接种未感染 not_have_cpox_boy_injected = not_have_cpox_boy[(not_have_cpox_boy['P_NUMVRC']>0)] num_not_have_cpox_boy_injected = not_have_cpox_boy_injected.count()['SEQNUMC'] not_have_cpox_girl_injected = not_have_cpox_girl[(not_have_cpox_girl['P_NUMVRC']>0)] num_not_have_cpox_girl_injected = not_have_cpox_girl_injected.count()['SEQNUMC'] #计算比例 ratio_boy = num_have_cpox_boy_injected / num_not_have_cpox_boy_injected ratio_girl = num_have_cpox_girl_injected / num_not_have_cpox_girl_injected dic = {} dic['male'] = ratio_boy dic['female'] = ratio_girl return dic raise NotImplementedError() assert len(chickenpox_by_sex())==2, "Return a dictionary with two items, the first for males and the second for females." ''' Question 4 A correlation is a statistical relationship between two variables. If we wanted to know if vaccines work, we might look at the correlation between the use of the vaccine and whether it results in prevention of the infection or disease [1]. In this question, you are to see if there is a correlation between having had the chicken pox and the number of chickenpox vaccine doses given (varicella). Some notes on interpreting the answer. The had_chickenpox_column is either 1 (for yes) or 2 (for no), and the num_chickenpox_vaccine_column is the number of doses a child has been given of the varicella vaccine. A positive correlation (e.g., corr > 0) means that an increase in had_chickenpox_column (which means more no’s) would also increase the values of num_chickenpox_vaccine_column (which means more doses of vaccine). If there is a negative correlation (e.g., corr < 0), it indicates that having had chickenpox is related to an increase in the number of vaccine doses. Also, pval is the probability that we observe a correlation between had_chickenpox_column and num_chickenpox_vaccine_column which is greater than or equal to a particular value occurred by chance. A small pval means that the observed correlation is highly unlikely to occur by chance. In this case, pval should be very small (will end in e-18 indicating a very small number). [1] This isn’t really the full picture, since we are not looking at when the dose was given. It’s possible that children had chickenpox and then their parents went to get them the vaccine. Does this dataset have the data we would need to investigate the timing of the dose? ''' def corr_chickenpox(): cpox = df[(df.P_NUMVRC).notnull()] have_cpox = cpox[(cpox.HAD_CPOX==1) | (cpox.HAD_CPOX==2)] df1=pd.DataFrame({"had_chickenpox_column":have_cpox.HAD_CPOX, "num_chickenpox_vaccine_column":have_cpox.P_NUMVRC}) corr, pval=stats.pearsonr(df1["had_chickenpox_column"],df1["num_chickenpox_vaccine_column"]) return corr raise NotImplementedError()
2.703125
3
tests/unit_tests/test_nn/test_converters/test_tensorflow/test_Dropout.py
samysweb/dnnv
5
767
import numpy as np from dnnv.nn.converters.tensorflow import * from dnnv.nn.operations import * TOL = 1e-6 def test_Dropout_consts(): x = np.array([3, 4]).astype(np.float32) op = Dropout(x) tf_op = TensorflowConverter().visit(op) result_ = tf_op() assert isinstance(result_, tuple) assert len(result_) == 2 result, none = result_ assert none is None y = np.array([3, 4]).astype(np.float32) assert np.all(result >= (y - TOL)) assert np.all(result <= (y + TOL)) def test_Dropout_x_is_op(): x = np.array([3, 4]).astype(np.float32) input_op = Input((2,), np.dtype(np.float32)) op = Dropout(input_op) tf_op = TensorflowConverter().visit(op) result_ = tf_op(x) assert isinstance(result_, tuple) assert len(result_) == 2 result, none = result_ assert none is None y = np.array([3, 4]).astype(np.float32) assert np.all(result >= (y - TOL)) assert np.all(result <= (y + TOL))
1.835938
2
user_service/user_service/api.py
Ziang-Lu/Flask-Blog
0
775
# -*- coding: utf-8 -*- """ API definition module. """ from flask import Blueprint from flask_restful import Api from .resources.user import UserAuth, UserItem, UserList, UserFollow # Create an API-related blueprint api_bp = Blueprint(name='api', import_name=__name__) api = Api(api_bp) api.add_resource(UserList, '/users') api.add_resource(UserItem, '/users/<int:id>') api.add_resource(UserAuth, '/user-auth') api.add_resource( UserFollow, '/user-follow/<int:follower_id>/<followed_username>' )
1.375
1
db.py
RunnerPro/RunnerProApi
0
783
from sqlalchemy import create_engine from sqlalchemy.orm import scoped_session from sqlalchemy.orm import sessionmaker from settings import DB_URI Session = sessionmaker(autocommit=False, autoflush=False, bind=create_engine(DB_URI)) session = scoped_session(Session)
1.046875
1
u24_lymphocyte/third_party/treeano/sandbox/nodes/gradnet.py
ALSM-PhD/quip_classification
45
807
import theano import theano.tensor as T import treeano import treeano.nodes as tn fX = theano.config.floatX @treeano.register_node("grad_net_interpolation") class GradNetInterpolationNode(treeano.NodeImpl): """ interpolates outputs between 2 nodes """ hyperparameter_names = ("late_gate",) children_container = treeano.core.DictChildrenContainerSchema( early=treeano.core.ChildContainer, late=treeano.core.ChildContainer, ) input_keys = ("early", "late") def init_state(self, network): children = self.raw_children() early = children["early"] late = children["late"] network.forward_input_to(early.name) network.forward_input_to(late.name) network.take_output_from(early.name, to_key="early") network.take_output_from(late.name, to_key="late") def compute_output(self, network, early_vw, late_vw): late_gate = network.find_hyperparameter(["late_gate"], 1) out_var = (early_vw.variable * (1 - late_gate) + late_vw.variable * late_gate) out_shape = [] assert early_vw.ndim == late_vw.ndim for e, l in zip(early_vw.shape, late_vw.shape): if e is None and l is None: out_shape.append(None) elif e is None: out_shape.append(l) elif l is None: out_shape.append(e) else: assert e == l out_shape.append(e) network.create_vw( "default", variable=out_var, shape=tuple(out_shape), tags={"output"}, ) @treeano.register_node("grad_net_optimizer_interpolation") class _GradNetOptimizerInterpolationNode(treeano.Wrapper1NodeImpl): hyperparameter_names = ("late_gate", "gradnet_epsilon", "epsilon", "multiplicative_inverse_for_early_gate") def init_state(self, network): super(_GradNetOptimizerInterpolationNode, self).init_state(network) epsilon = network.find_hyperparameter(["gradnet_epsilon", "epsilon"], 1e-3) late_gate = network.find_hyperparameter(["late_gate"], 1) late_gate = treeano.utils.as_fX(late_gate) # NOTE: late gate cannot be 0 because the early gate is divide by it # AND multiplied by it. Clipping only for the early gate will cause # no updates to occur. late_gate = T.clip(late_gate, epsilon, 1) use_multiplicative_inverse = network.find_hyperparameter( ["multiplicative_inverse_for_early_gate"], False) if use_multiplicative_inverse: early_gate = epsilon / late_gate else: early_gate = 1 - late_gate network.set_hyperparameter(self.name + "_late_update_scale", "update_scale_factor", late_gate) network.set_hyperparameter(self.name + "_early_update_scale", "update_scale_factor", # these updates are also multiplied by # late_gate later on, so rescale them early_gate / late_gate) def GradNetOptimizerInterpolationNode(name, children, early, late, **kwargs): """ interpolates updates from 2 optimizers nodes NOTE: this is a hack to take in node constructors as arguments """ assert set(children.keys()) == {"subtree", "cost"} subtree = children["subtree"] cost = children["cost"] cost_ref = tn.ReferenceNode(name + "_costref", reference=cost.name) late_subtree = tn.UpdateScaleNode(name + "_late_update_scale", subtree) late_node = late(name + "_late", {"subtree": late_subtree, "cost": cost}) early_subtree = tn.UpdateScaleNode(name + "_early_update_scale", late_node) early_node = early(name + "_early", {"subtree": early_subtree, "cost": cost_ref}) # NOTE: need separate node to forward hyperparameter return _GradNetOptimizerInterpolationNode(name, early_node, **kwargs) def GradualSimpleBatchNormalizationNode(name): from treeano.sandbox.nodes import batch_normalization as bn return GradNetInterpolationNode( name, {"early": bn.SimpleBatchNormalizationNode(name + "_bn"), "late": tn.IdentityNode(name + "_identity")}) GradualBNNode = GradualSimpleBatchNormalizationNode
1.882813
2
python/testData/resolve/TryExceptElse.py
jnthn/intellij-community
2
815
try: name = "" except: pass else: print na<ref>me
-0.025879
0
trainloops/listeners/cluster_killswitch.py
Gerryflap/master_thesis
0
823
""" Cancelling jobs on the University cluster forces programs to instantly quit, which sometimes crashes cluster nodes. As a remedy, this killswitch listener will stop the experiment in a nicer way to prevent this from happening. The experiment will be stopped if a file named "stop" is encountered in the results folder of the experiment. The existence of this file is checked after each epoch. """ import os from trainloops.listeners.listener import Listener class KillSwitchListener(Listener): def __init__(self, experiment_path): super().__init__() self.path = os.path.join(experiment_path, "stop") def initialize(self): pass def report(self, state_dict): if os.path.exists(self.path): exit()
1.6875
2
models2.py
Lydia-Tan/MindLife
1
839
import nltk import re import sys from sys import argv from nltk.sentiment.vader import SentimentIntensityAnalyzer def ajay(ans): ajay = SentimentIntensityAnalyzer() completeScore = 0 questionWeights = [0.05, 0.20, 0.05, 0.05, 0.05, 0.20, 0.05, 0.05, 0.20, 0.10] print ans ansList = ans.split("$") for j in range(10): print ansList[j] for i in range(10): results = [] score = 0 count = 0 # print (count) for paragraph in ansList: for line in paragraph: #Split Paragraph on basis of '.' or ? or !. for l in re.split(r"\.|\?|\!",paragraph): # print(l) ss = ajay.polarity_scores(l) results.append(ss); # print(ss['compound']) score += ss['compound'] count += 1 completeScore += (score/count)*questionWeights[i] #print(completeScore) if (completeScore >= 0.1): return "False Alarm! You don't have Depression." elif (completeScore >= -0.1): return ("Seasonal affective disorder (SAD). This type of depression " + "emerges as days get shorter in the fall and winter. The mood " + "change may result from alterations in the body's natural daily " + "rhythms, in the eyes' sensitivity to light, or in how chemical " + "messengers like serotonin and melatonin function. The leading " + "treatment is light therapy, which involves daily sessions sitting " + "close to an especially intense light source. The usual treatments " + "for depression, such as psychotherapy and medication, may also be " + "effective."); elif (completeScore >= -0.4): return ("Persistent depressive disorder. Formerly called dysthymia, this " + "type of depression refers to low mood that has lasted for at least " + "two years but may not reach the intensity of major depression. Many " + "people with this type of depression type are able to function day to " + "but feel low or joyless much of the time. Some depressive symptoms, " + "such as appetite and sleep changes, low energy, low self-esteem, or " + "hopelessness, are usually part of the picture.") else: return ("The classic depression type, major depression is a state where a dark " + "mood is all-consuming and one loses interest in activities, even ones " + "that are usually pleasurable. Symptoms of this type of depression " + "include trouble sleeping, changes in appetite or weight, loss of energy, " + "and feeling worthless. Thoughts of death or suicide may occur. It is " + "usually treated with psychotherapy and medication. For some people with " + "severe depression that isn't alleviated with psychotherapy or antidepressant " + "medications, electroconvulsive therapy may be effective.")
2.34375
2
comtypes/_meta.py
phuslu/pyMSAA
23
847
# comtypes._meta helper module from ctypes import POINTER, c_void_p, cast import comtypes ################################################################ # metaclass for CoClass (in comtypes/__init__.py) def _wrap_coclass(self): # We are an IUnknown pointer, represented as a c_void_p instance, # but we really want this interface: itf = self._com_interfaces_[0] punk = cast(self, POINTER(itf)) result = punk.QueryInterface(itf) result.__dict__["__clsid"] = str(self._reg_clsid_) return result def _coclass_from_param(cls, obj): if isinstance(obj, (cls._com_interfaces_[0], cls)): return obj raise TypeError(obj) # # The mro() of a POINTER(App) type, where class App is a subclass of CoClass: # # POINTER(App) # App # CoClass # c_void_p # _SimpleCData # _CData # object class _coclass_meta(type): # metaclass for CoClass # # When a CoClass subclass is created, create a POINTER(...) type # for that class, with bases <coclass> and c_void_p. Also, the # POINTER(...) type gets a __ctypes_from_outparam__ method which # will QueryInterface for the default interface: the first one on # the coclass' _com_interfaces_ list. def __new__(cls, name, bases, namespace): klass = type.__new__(cls, name, bases, namespace) if bases == (object,): return klass # XXX We should insist that a _reg_clsid_ is present. if "_reg_clsid_" in namespace: clsid = namespace["_reg_clsid_"] comtypes.com_coclass_registry[str(clsid)] = klass PTR = _coclass_pointer_meta("POINTER(%s)" % klass.__name__, (klass, c_void_p), {"__ctypes_from_outparam__": _wrap_coclass, "from_param": classmethod(_coclass_from_param), }) from ctypes import _pointer_type_cache _pointer_type_cache[klass] = PTR return klass # will not work if we change the order of the two base classes! class _coclass_pointer_meta(type(c_void_p), _coclass_meta): pass
1.820313
2
tests/test_engine.py
Foxboron/python-adblock
35
863
import adblock import pytest SMALL_FILTER_LIST = """ ||wikipedia.org^ ||old.reddit.com^ ||lobste.rs^ """ def empty_engine(): return adblock.Engine(adblock.FilterSet()) def test_engine_creation_and_blocking(): filter_set = adblock.FilterSet(debug=True) filter_set.add_filter_list(SMALL_FILTER_LIST) engine = adblock.Engine(filter_set=filter_set) blocker_result_wikipedia = engine.check_network_urls( url="https://wikipedia.org/img.png", source_url="https://google.com/", request_type="image", ) assert isinstance(blocker_result_wikipedia, adblock.BlockerResult) assert blocker_result_wikipedia.matched blocker_result_facebook = engine.check_network_urls( "https://facebook.com/directory/img.png", "https://old.reddit.com/r/all", "image", ) assert isinstance(blocker_result_facebook, adblock.BlockerResult) assert not blocker_result_facebook.matched def test_serde_file(tmpdir): path = str(tmpdir / "cache.dat") engine0 = empty_engine() with pytest.raises(FileNotFoundError): # We haven't created the cache.dat file, so we should get an exception # when attempting to deserialize. engine0.deserialize_from_file(path) engine1 = empty_engine() serialization_result = engine1.serialize_to_file(path) assert serialization_result is None engine2 = empty_engine() deserialization_result = engine2.deserialize_from_file(path) assert deserialization_result is None def test_deserialize_corrupt(tmpdir): path = str(tmpdir / "corrupt_cache.dat") with open(path, "w", encoding="utf-8") as f: f.write("abc") engine = empty_engine() with pytest.raises(adblock.DeserializationError): engine.deserialize_from_file(path) with pytest.raises(adblock.DeserializationError): engine.deserialize(b"abc") def test_serde(): engine = empty_engine() serialization_result = engine.serialize() assert isinstance(serialization_result, bytes) engine2 = empty_engine() deserialization_result = engine2.deserialize(serialization_result) assert deserialization_result is None
1.515625
2
app/django_first/news/migrations/0002_movies_year.py
vvuri/flask_pipeline
0
871
# Generated by Django 4.0.1 on 2022-01-19 23:58 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('news', '0001_initial'), ] operations = [ migrations.AddField( model_name='movies', name='year', field=models.CharField(max_length=4, null=True), ), ]
0.992188
1
timm/utils/checkpoint_saver.py
Robert-JunWang/pytorch-image-models
17,769
879
""" Checkpoint Saver Track top-n training checkpoints and maintain recovery checkpoints on specified intervals. Hacked together by / Copyright 2020 <NAME> """ import glob import operator import os import logging import torch from .model import unwrap_model, get_state_dict _logger = logging.getLogger(__name__) class CheckpointSaver: def __init__( self, model, optimizer, args=None, model_ema=None, amp_scaler=None, checkpoint_prefix='checkpoint', recovery_prefix='recovery', checkpoint_dir='', recovery_dir='', decreasing=False, max_history=10, unwrap_fn=unwrap_model): # objects to save state_dicts of self.model = model self.optimizer = optimizer self.args = args self.model_ema = model_ema self.amp_scaler = amp_scaler # state self.checkpoint_files = [] # (filename, metric) tuples in order of decreasing betterness self.best_epoch = None self.best_metric = None self.curr_recovery_file = '' self.last_recovery_file = '' # config self.checkpoint_dir = checkpoint_dir self.recovery_dir = recovery_dir self.save_prefix = checkpoint_prefix self.recovery_prefix = recovery_prefix self.extension = '.pth.tar' self.decreasing = decreasing # a lower metric is better if True self.cmp = operator.lt if decreasing else operator.gt # True if lhs better than rhs self.max_history = max_history self.unwrap_fn = unwrap_fn assert self.max_history >= 1 def save_checkpoint(self, epoch, metric=None): assert epoch >= 0 tmp_save_path = os.path.join(self.checkpoint_dir, 'tmp' + self.extension) last_save_path = os.path.join(self.checkpoint_dir, 'last' + self.extension) self._save(tmp_save_path, epoch, metric) if os.path.exists(last_save_path): os.unlink(last_save_path) # required for Windows support. os.rename(tmp_save_path, last_save_path) worst_file = self.checkpoint_files[-1] if self.checkpoint_files else None if (len(self.checkpoint_files) < self.max_history or metric is None or self.cmp(metric, worst_file[1])): if len(self.checkpoint_files) >= self.max_history: self._cleanup_checkpoints(1) filename = '-'.join([self.save_prefix, str(epoch)]) + self.extension save_path = os.path.join(self.checkpoint_dir, filename) os.link(last_save_path, save_path) self.checkpoint_files.append((save_path, metric)) self.checkpoint_files = sorted( self.checkpoint_files, key=lambda x: x[1], reverse=not self.decreasing) # sort in descending order if a lower metric is not better checkpoints_str = "Current checkpoints:\n" for c in self.checkpoint_files: checkpoints_str += ' {}\n'.format(c) _logger.info(checkpoints_str) if metric is not None and (self.best_metric is None or self.cmp(metric, self.best_metric)): self.best_epoch = epoch self.best_metric = metric best_save_path = os.path.join(self.checkpoint_dir, 'model_best' + self.extension) if os.path.exists(best_save_path): os.unlink(best_save_path) os.link(last_save_path, best_save_path) return (None, None) if self.best_metric is None else (self.best_metric, self.best_epoch) def _save(self, save_path, epoch, metric=None): save_state = { 'epoch': epoch, 'arch': type(self.model).__name__.lower(), 'state_dict': get_state_dict(self.model, self.unwrap_fn), 'optimizer': self.optimizer.state_dict(), 'version': 2, # version < 2 increments epoch before save } if self.args is not None: save_state['arch'] = self.args.model save_state['args'] = self.args if self.amp_scaler is not None: save_state[self.amp_scaler.state_dict_key] = self.amp_scaler.state_dict() if self.model_ema is not None: save_state['state_dict_ema'] = get_state_dict(self.model_ema, self.unwrap_fn) if metric is not None: save_state['metric'] = metric torch.save(save_state, save_path) def _cleanup_checkpoints(self, trim=0): trim = min(len(self.checkpoint_files), trim) delete_index = self.max_history - trim if delete_index < 0 or len(self.checkpoint_files) <= delete_index: return to_delete = self.checkpoint_files[delete_index:] for d in to_delete: try: _logger.debug("Cleaning checkpoint: {}".format(d)) os.remove(d[0]) except Exception as e: _logger.error("Exception '{}' while deleting checkpoint".format(e)) self.checkpoint_files = self.checkpoint_files[:delete_index] def save_recovery(self, epoch, batch_idx=0): assert epoch >= 0 filename = '-'.join([self.recovery_prefix, str(epoch), str(batch_idx)]) + self.extension save_path = os.path.join(self.recovery_dir, filename) self._save(save_path, epoch) if os.path.exists(self.last_recovery_file): try: _logger.debug("Cleaning recovery: {}".format(self.last_recovery_file)) os.remove(self.last_recovery_file) except Exception as e: _logger.error("Exception '{}' while removing {}".format(e, self.last_recovery_file)) self.last_recovery_file = self.curr_recovery_file self.curr_recovery_file = save_path def find_recovery(self): recovery_path = os.path.join(self.recovery_dir, self.recovery_prefix) files = glob.glob(recovery_path + '*' + self.extension) files = sorted(files) return files[0] if len(files) else ''
2.5
2
test/unit/__init__.py
thiagodasilva/swift
0
887
# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ Swift tests """ from __future__ import print_function import os import copy import logging import errno from six.moves import range import sys from contextlib import contextmanager, closing from collections import defaultdict, Iterable import itertools from numbers import Number from tempfile import NamedTemporaryFile import time import eventlet from eventlet.green import socket from tempfile import mkdtemp from shutil import rmtree from swift.common.utils import Timestamp, NOTICE from test import get_config from swift.common import swob, utils from swift.common.ring import Ring, RingData from hashlib import md5 import logging.handlers from six.moves.http_client import HTTPException from swift.common import storage_policy from swift.common.storage_policy import (StoragePolicy, ECStoragePolicy, VALID_EC_TYPES) import functools import six.moves.cPickle as pickle from gzip import GzipFile import mock as mocklib import inspect EMPTY_ETAG = md5().hexdigest() # try not to import this module from swift if not os.path.basename(sys.argv[0]).startswith('swift'): # never patch HASH_PATH_SUFFIX AGAIN! utils.HASH_PATH_SUFFIX = 'endcap' EC_TYPE_PREFERENCE = [ 'liberasurecode_rs_vand', 'jerasure_rs_vand', ] for eclib_name in EC_TYPE_PREFERENCE: if eclib_name in VALID_EC_TYPES: break else: raise SystemExit('ERROR: unable to find suitable PyECLib type' ' (none of %r found in %r)' % ( EC_TYPE_PREFERENCE, VALID_EC_TYPES, )) DEFAULT_TEST_EC_TYPE = eclib_name def patch_policies(thing_or_policies=None, legacy_only=False, with_ec_default=False, fake_ring_args=None): if isinstance(thing_or_policies, ( Iterable, storage_policy.StoragePolicyCollection)): return PatchPolicies(thing_or_policies, fake_ring_args=fake_ring_args) if legacy_only: default_policies = [ StoragePolicy(0, name='legacy', is_default=True), ] default_ring_args = [{}] elif with_ec_default: default_policies = [ ECStoragePolicy(0, name='ec', is_default=True, ec_type=DEFAULT_TEST_EC_TYPE, ec_ndata=10, ec_nparity=4, ec_segment_size=4096), StoragePolicy(1, name='unu'), ] default_ring_args = [{'replicas': 14}, {}] else: default_policies = [ StoragePolicy(0, name='nulo', is_default=True), StoragePolicy(1, name='unu'), ] default_ring_args = [{}, {}] fake_ring_args = fake_ring_args or default_ring_args decorator = PatchPolicies(default_policies, fake_ring_args=fake_ring_args) if not thing_or_policies: return decorator else: # it's a thing, we return the wrapped thing instead of the decorator return decorator(thing_or_policies) class PatchPolicies(object): """ Why not mock.patch? In my case, when used as a decorator on the class it seemed to patch setUp at the wrong time (i.e. in setup the global wasn't patched yet) """ def __init__(self, policies, fake_ring_args=None): if isinstance(policies, storage_policy.StoragePolicyCollection): self.policies = policies else: self.policies = storage_policy.StoragePolicyCollection(policies) self.fake_ring_args = fake_ring_args or [None] * len(self.policies) def _setup_rings(self): """ Our tests tend to use the policies rings like their own personal playground - which can be a problem in the particular case of a patched TestCase class where the FakeRing objects are scoped in the call to the patch_policies wrapper outside of the TestCase instance which can lead to some bled state. To help tests get better isolation without having to think about it, here we're capturing the args required to *build* a new FakeRing instances so we can ensure each test method gets a clean ring setup. The TestCase can always "tweak" these fresh rings in setUp - or if they'd prefer to get the same "reset" behavior with custom FakeRing's they can pass in their own fake_ring_args to patch_policies instead of setting the object_ring on the policy definitions. """ for policy, fake_ring_arg in zip(self.policies, self.fake_ring_args): if fake_ring_arg is not None: policy.object_ring = FakeRing(**fake_ring_arg) def __call__(self, thing): if isinstance(thing, type): return self._patch_class(thing) else: return self._patch_method(thing) def _patch_class(self, cls): """ Creating a new class that inherits from decorated class is the more common way I've seen class decorators done - but it seems to cause infinite recursion when super is called from inside methods in the decorated class. """ orig_setUp = cls.setUp orig_tearDown = cls.tearDown def setUp(cls_self): self._orig_POLICIES = storage_policy._POLICIES if not getattr(cls_self, '_policies_patched', False): storage_policy._POLICIES = self.policies self._setup_rings() cls_self._policies_patched = True orig_setUp(cls_self) def tearDown(cls_self): orig_tearDown(cls_self) storage_policy._POLICIES = self._orig_POLICIES cls.setUp = setUp cls.tearDown = tearDown return cls def _patch_method(self, f): @functools.wraps(f) def mywrapper(*args, **kwargs): self._orig_POLICIES = storage_policy._POLICIES try: storage_policy._POLICIES = self.policies self._setup_rings() return f(*args, **kwargs) finally: storage_policy._POLICIES = self._orig_POLICIES return mywrapper def __enter__(self): self._orig_POLICIES = storage_policy._POLICIES storage_policy._POLICIES = self.policies def __exit__(self, *args): storage_policy._POLICIES = self._orig_POLICIES class FakeRing(Ring): def __init__(self, replicas=3, max_more_nodes=0, part_power=0, base_port=1000): """ :param part_power: make part calculation based on the path If you set a part_power when you setup your FakeRing the parts you get out of ring methods will actually be based on the path - otherwise we exercise the real ring code, but ignore the result and return 1. """ self._base_port = base_port self.max_more_nodes = max_more_nodes self._part_shift = 32 - part_power # 9 total nodes (6 more past the initial 3) is the cap, no matter if # this is set higher, or R^2 for R replicas self.set_replicas(replicas) self._reload() def _reload(self): self._rtime = time.time() def set_replicas(self, replicas): self.replicas = replicas self._devs = [] for x in range(self.replicas): ip = '10.0.0.%s' % x port = self._base_port + x self._devs.append({ 'ip': ip, 'replication_ip': ip, 'port': port, 'replication_port': port, 'device': 'sd' + (chr(ord('a') + x)), 'zone': x % 3, 'region': x % 2, 'id': x, }) @property def replica_count(self): return self.replicas def _get_part_nodes(self, part): return [dict(node, index=i) for i, node in enumerate(list(self._devs))] def get_more_nodes(self, part): for x in range(self.replicas, (self.replicas + self.max_more_nodes)): yield {'ip': '10.0.0.%s' % x, 'replication_ip': '10.0.0.%s' % x, 'port': self._base_port + x, 'replication_port': self._base_port + x, 'device': 'sda', 'zone': x % 3, 'region': x % 2, 'id': x} def write_fake_ring(path, *devs): """ Pretty much just a two node, two replica, 2 part power ring... """ dev1 = {'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1', 'port': 6000} dev2 = {'id': 0, 'zone': 0, 'device': 'sdb1', 'ip': '127.0.0.1', 'port': 6000} dev1_updates, dev2_updates = devs or ({}, {}) dev1.update(dev1_updates) dev2.update(dev2_updates) replica2part2dev_id = [[0, 1, 0, 1], [1, 0, 1, 0]] devs = [dev1, dev2] part_shift = 30 with closing(GzipFile(path, 'wb')) as f: pickle.dump(RingData(replica2part2dev_id, devs, part_shift), f) class FabricatedRing(Ring): """ When a FakeRing just won't do - you can fabricate one to meet your tests needs. """ def __init__(self, replicas=6, devices=8, nodes=4, port=6000, part_power=4): self.devices = devices self.nodes = nodes self.port = port self.replicas = 6 self.part_power = part_power self._part_shift = 32 - self.part_power self._reload() def _reload(self, *args, **kwargs): self._rtime = time.time() * 2 if hasattr(self, '_replica2part2dev_id'): return self._devs = [{ 'region': 1, 'zone': 1, 'weight': 1.0, 'id': i, 'device': 'sda%d' % i, 'ip': '10.0.0.%d' % (i % self.nodes), 'replication_ip': '10.0.0.%d' % (i % self.nodes), 'port': self.port, 'replication_port': self.port, } for i in range(self.devices)] self._replica2part2dev_id = [ [None] * 2 ** self.part_power for i in range(self.replicas) ] dev_ids = itertools.cycle(range(self.devices)) for p in range(2 ** self.part_power): for r in range(self.replicas): self._replica2part2dev_id[r][p] = next(dev_ids) class FakeMemcache(object): def __init__(self): self.store = {} def get(self, key): return self.store.get(key) def keys(self): return self.store.keys() def set(self, key, value, time=0): self.store[key] = value return True def incr(self, key, time=0): self.store[key] = self.store.setdefault(key, 0) + 1 return self.store[key] @contextmanager def soft_lock(self, key, timeout=0, retries=5): yield True def delete(self, key): try: del self.store[key] except Exception: pass return True def readuntil2crlfs(fd): rv = '' lc = '' crlfs = 0 while crlfs < 2: c = fd.read(1) if not c: raise ValueError("didn't get two CRLFs; just got %r" % rv) rv = rv + c if c == '\r' and lc != '\n': crlfs = 0 if lc == '\r' and c == '\n': crlfs += 1 lc = c return rv def connect_tcp(hostport): rv = socket.socket() rv.connect(hostport) return rv @contextmanager def tmpfile(content): with NamedTemporaryFile('w', delete=False) as f: file_name = f.name f.write(str(content)) try: yield file_name finally: os.unlink(file_name) xattr_data = {} def _get_inode(fd): if not isinstance(fd, int): try: fd = fd.fileno() except AttributeError: return os.stat(fd).st_ino return os.fstat(fd).st_ino def _setxattr(fd, k, v): inode = _get_inode(fd) data = xattr_data.get(inode, {}) data[k] = v xattr_data[inode] = data def _getxattr(fd, k): inode = _get_inode(fd) data = xattr_data.get(inode, {}).get(k) if not data: raise IOError(errno.ENODATA, "Fake IOError") return data import xattr xattr.setxattr = _setxattr xattr.getxattr = _getxattr @contextmanager def temptree(files, contents=''): # generate enough contents to fill the files c = len(files) contents = (list(contents) + [''] * c)[:c] tempdir = mkdtemp() for path, content in zip(files, contents): if os.path.isabs(path): path = '.' + path new_path = os.path.join(tempdir, path) subdir = os.path.dirname(new_path) if not os.path.exists(subdir): os.makedirs(subdir) with open(new_path, 'w') as f: f.write(str(content)) try: yield tempdir finally: rmtree(tempdir) def with_tempdir(f): """ Decorator to give a single test a tempdir as argument to test method. """ @functools.wraps(f) def wrapped(*args, **kwargs): tempdir = mkdtemp() args = list(args) args.append(tempdir) try: return f(*args, **kwargs) finally: rmtree(tempdir) return wrapped class NullLoggingHandler(logging.Handler): def emit(self, record): pass class UnmockTimeModule(object): """ Even if a test mocks time.time - you can restore unmolested behavior in a another module who imports time directly by monkey patching it's imported reference to the module with an instance of this class """ _orig_time = time.time def __getattribute__(self, name): if name == 'time': return UnmockTimeModule._orig_time return getattr(time, name) # logging.LogRecord.__init__ calls time.time logging.time = UnmockTimeModule() class FakeLogger(logging.Logger, object): # a thread safe fake logger def __init__(self, *args, **kwargs): self._clear() self.name = 'swift.unit.fake_logger' self.level = logging.NOTSET if 'facility' in kwargs: self.facility = kwargs['facility'] self.statsd_client = None self.thread_locals = None self.parent = None store_in = { logging.ERROR: 'error', logging.WARNING: 'warning', logging.INFO: 'info', logging.DEBUG: 'debug', logging.CRITICAL: 'critical', NOTICE: 'notice', } def notice(self, msg, *args, **kwargs): """ Convenience function for syslog priority LOG_NOTICE. The python logging lvl is set to 25, just above info. SysLogHandler is monkey patched to map this log lvl to the LOG_NOTICE syslog priority. """ self.log(NOTICE, msg, *args, **kwargs) def _log(self, level, msg, *args, **kwargs): store_name = self.store_in[level] cargs = [msg] if any(args): cargs.extend(args) captured = dict(kwargs) if 'exc_info' in kwargs and \ not isinstance(kwargs['exc_info'], tuple): captured['exc_info'] = sys.exc_info() self.log_dict[store_name].append((tuple(cargs), captured)) super(FakeLogger, self)._log(level, msg, *args, **kwargs) def _clear(self): self.log_dict = defaultdict(list) self.lines_dict = {'critical': [], 'error': [], 'info': [], 'warning': [], 'debug': [], 'notice': []} clear = _clear # this is a public interface def get_lines_for_level(self, level): if level not in self.lines_dict: raise KeyError( "Invalid log level '%s'; valid levels are %s" % (level, ', '.join("'%s'" % lvl for lvl in sorted(self.lines_dict)))) return self.lines_dict[level] def all_log_lines(self): return dict((level, msgs) for level, msgs in self.lines_dict.items() if len(msgs) > 0) def _store_in(store_name): def stub_fn(self, *args, **kwargs): self.log_dict[store_name].append((args, kwargs)) return stub_fn # mock out the StatsD logging methods: update_stats = _store_in('update_stats') increment = _store_in('increment') decrement = _store_in('decrement') timing = _store_in('timing') timing_since = _store_in('timing_since') transfer_rate = _store_in('transfer_rate') set_statsd_prefix = _store_in('set_statsd_prefix') def get_increments(self): return [call[0][0] for call in self.log_dict['increment']] def get_increment_counts(self): counts = {} for metric in self.get_increments(): if metric not in counts: counts[metric] = 0 counts[metric] += 1 return counts def setFormatter(self, obj): self.formatter = obj def close(self): self._clear() def set_name(self, name): # don't touch _handlers self._name = name def acquire(self): pass def release(self): pass def createLock(self): pass def emit(self, record): pass def _handle(self, record): try: line = record.getMessage() except TypeError: print('WARNING: unable to format log message %r %% %r' % ( record.msg, record.args)) raise self.lines_dict[record.levelname.lower()].append(line) def handle(self, record): self._handle(record) def flush(self): pass def handleError(self, record): pass class DebugLogger(FakeLogger): """A simple stdout logging version of FakeLogger""" def __init__(self, *args, **kwargs): FakeLogger.__init__(self, *args, **kwargs) self.formatter = logging.Formatter( "%(server)s %(levelname)s: %(message)s") def handle(self, record): self._handle(record) print(self.formatter.format(record)) class DebugLogAdapter(utils.LogAdapter): def _send_to_logger(name): def stub_fn(self, *args, **kwargs): return getattr(self.logger, name)(*args, **kwargs) return stub_fn # delegate to FakeLogger's mocks update_stats = _send_to_logger('update_stats') increment = _send_to_logger('increment') decrement = _send_to_logger('decrement') timing = _send_to_logger('timing') timing_since = _send_to_logger('timing_since') transfer_rate = _send_to_logger('transfer_rate') set_statsd_prefix = _send_to_logger('set_statsd_prefix') def __getattribute__(self, name): try: return object.__getattribute__(self, name) except AttributeError: return getattr(self.__dict__['logger'], name) def debug_logger(name='test'): """get a named adapted debug logger""" return DebugLogAdapter(DebugLogger(), name) original_syslog_handler = logging.handlers.SysLogHandler def fake_syslog_handler(): for attr in dir(original_syslog_handler): if attr.startswith('LOG'): setattr(FakeLogger, attr, copy.copy(getattr(logging.handlers.SysLogHandler, attr))) FakeLogger.priority_map = \ copy.deepcopy(logging.handlers.SysLogHandler.priority_map) logging.handlers.SysLogHandler = FakeLogger if utils.config_true_value( get_config('unit_test').get('fake_syslog', 'False')): fake_syslog_handler() class MockTrue(object): """ Instances of MockTrue evaluate like True Any attr accessed on an instance of MockTrue will return a MockTrue instance. Any method called on an instance of MockTrue will return a MockTrue instance. >>> thing = MockTrue() >>> thing True >>> thing == True # True == True True >>> thing == False # True == False False >>> thing != True # True != True False >>> thing != False # True != False True >>> thing.attribute True >>> thing.method() True >>> thing.attribute.method() True >>> thing.method().attribute True """ def __getattribute__(self, *args, **kwargs): return self def __call__(self, *args, **kwargs): return self def __repr__(*args, **kwargs): return repr(True) def __eq__(self, other): return other is True def __ne__(self, other): return other is not True @contextmanager def mock(update): returns = [] deletes = [] for key, value in update.items(): imports = key.split('.') attr = imports.pop(-1) module = __import__(imports[0], fromlist=imports[1:]) for modname in imports[1:]: module = getattr(module, modname) if hasattr(module, attr): returns.append((module, attr, getattr(module, attr))) else: deletes.append((module, attr)) setattr(module, attr, value) try: yield True finally: for module, attr, value in returns: setattr(module, attr, value) for module, attr in deletes: delattr(module, attr) class FakeStatus(object): """ This will work with our fake_http_connect, if you hand in one of these instead of a status int or status int tuple to the "codes" iter you can add some eventlet sleep to the expect and response stages of the connection. """ def __init__(self, status, expect_sleep=None, response_sleep=None): """ :param status: the response status int, or a tuple of ([expect_status, ...], response_status) :param expect_sleep: float, time to eventlet sleep during expect, can be a iter of floats :param response_sleep: float, time to eventlet sleep during response """ # connect exception if isinstance(status, (Exception, eventlet.Timeout)): raise status if isinstance(status, tuple): self.expect_status = list(status[:-1]) self.status = status[-1] self.explicit_expect_list = True else: self.expect_status, self.status = ([], status) self.explicit_expect_list = False if not self.expect_status: # when a swift backend service returns a status before reading # from the body (mostly an error response) eventlet.wsgi will # respond with that status line immediately instead of 100 # Continue, even if the client sent the Expect 100 header. # BufferedHttp and the proxy both see these error statuses # when they call getexpect, so our FakeConn tries to act like # our backend services and return certain types of responses # as expect statuses just like a real backend server would do. if self.status in (507, 412, 409): self.expect_status = [status] else: self.expect_status = [100, 100] # setup sleep attributes if not isinstance(expect_sleep, (list, tuple)): expect_sleep = [expect_sleep] * len(self.expect_status) self.expect_sleep_list = list(expect_sleep) while len(self.expect_sleep_list) < len(self.expect_status): self.expect_sleep_list.append(None) self.response_sleep = response_sleep def get_response_status(self): if self.response_sleep is not None: eventlet.sleep(self.response_sleep) if self.expect_status and self.explicit_expect_list: raise Exception('Test did not consume all fake ' 'expect status: %r' % (self.expect_status,)) if isinstance(self.status, (Exception, eventlet.Timeout)): raise self.status return self.status def get_expect_status(self): expect_sleep = self.expect_sleep_list.pop(0) if expect_sleep is not None: eventlet.sleep(expect_sleep) expect_status = self.expect_status.pop(0) if isinstance(expect_status, (Exception, eventlet.Timeout)): raise expect_status return expect_status class SlowBody(object): """ This will work with our fake_http_connect, if you hand in these instead of strings it will make reads take longer by the given amount. It should be a little bit easier to extend than the current slow kwarg - which inserts whitespace in the response. Also it should be easy to detect if you have one of these (or a subclass) for the body inside of FakeConn if we wanted to do something smarter than just duck-type the str/buffer api enough to get by. """ def __init__(self, body, slowness): self.body = body self.slowness = slowness def slowdown(self): eventlet.sleep(self.slowness) def __getitem__(self, s): return SlowBody(self.body[s], self.slowness) def __len__(self): return len(self.body) def __radd__(self, other): self.slowdown() return other + self.body def fake_http_connect(*code_iter, **kwargs): class FakeConn(object): def __init__(self, status, etag=None, body='', timestamp='1', headers=None, expect_headers=None, connection_id=None, give_send=None): if not isinstance(status, FakeStatus): status = FakeStatus(status) self._status = status self.reason = 'Fake' self.host = '1.2.3.4' self.port = '1234' self.sent = 0 self.received = 0 self.etag = etag self.body = body self.headers = headers or {} self.expect_headers = expect_headers or {} self.timestamp = timestamp self.connection_id = connection_id self.give_send = give_send if 'slow' in kwargs and isinstance(kwargs['slow'], list): try: self._next_sleep = kwargs['slow'].pop(0) except IndexError: self._next_sleep = None # be nice to trixy bits with node_iter's eventlet.sleep() def getresponse(self): exc = kwargs.get('raise_exc') if exc: if isinstance(exc, (Exception, eventlet.Timeout)): raise exc raise Exception('test') if kwargs.get('raise_timeout_exc'): raise eventlet.Timeout() self.status = self._status.get_response_status() return self def getexpect(self): expect_status = self._status.get_expect_status() headers = dict(self.expect_headers) if expect_status == 409: headers['X-Backend-Timestamp'] = self.timestamp response = FakeConn(expect_status, timestamp=self.timestamp, headers=headers) response.status = expect_status return response def getheaders(self): etag = self.etag if not etag: if isinstance(self.body, str): etag = '"' + md5(self.body).hexdigest() + '"' else: etag = '"68b329da9893e34099c7d8ad5cb9c940"' headers = swob.HeaderKeyDict({ 'content-length': len(self.body), 'content-type': 'x-application/test', 'x-timestamp': self.timestamp, 'x-backend-timestamp': self.timestamp, 'last-modified': self.timestamp, 'x-object-meta-test': 'testing', 'x-delete-at': '9876543210', 'etag': etag, 'x-works': 'yes', }) if self.status // 100 == 2: headers['x-account-container-count'] = \ kwargs.get('count', 12345) if not self.timestamp: # when timestamp is None, HeaderKeyDict raises KeyError headers.pop('x-timestamp', None) try: if next(container_ts_iter) is False: headers['x-container-timestamp'] = '1' except StopIteration: pass am_slow, value = self.get_slow() if am_slow: headers['content-length'] = '4' headers.update(self.headers) return headers.items() def get_slow(self): if 'slow' in kwargs and isinstance(kwargs['slow'], list): if self._next_sleep is not None: return True, self._next_sleep else: return False, 0.01 if kwargs.get('slow') and isinstance(kwargs['slow'], Number): return True, kwargs['slow'] return bool(kwargs.get('slow')), 0.1 def read(self, amt=None): am_slow, value = self.get_slow() if am_slow: if self.sent < 4: self.sent += 1 eventlet.sleep(value) return ' ' rv = self.body[:amt] self.body = self.body[amt:] return rv def send(self, amt=None): if self.give_send: self.give_send(self.connection_id, amt) am_slow, value = self.get_slow() if am_slow: if self.received < 4: self.received += 1 eventlet.sleep(value) def getheader(self, name, default=None): return swob.HeaderKeyDict(self.getheaders()).get(name, default) def close(self): pass timestamps_iter = iter(kwargs.get('timestamps') or ['1'] * len(code_iter)) etag_iter = iter(kwargs.get('etags') or [None] * len(code_iter)) if isinstance(kwargs.get('headers'), (list, tuple)): headers_iter = iter(kwargs['headers']) else: headers_iter = iter([kwargs.get('headers', {})] * len(code_iter)) if isinstance(kwargs.get('expect_headers'), (list, tuple)): expect_headers_iter = iter(kwargs['expect_headers']) else: expect_headers_iter = iter([kwargs.get('expect_headers', {})] * len(code_iter)) x = kwargs.get('missing_container', [False] * len(code_iter)) if not isinstance(x, (tuple, list)): x = [x] * len(code_iter) container_ts_iter = iter(x) code_iter = iter(code_iter) conn_id_and_code_iter = enumerate(code_iter) static_body = kwargs.get('body', None) body_iter = kwargs.get('body_iter', None) if body_iter: body_iter = iter(body_iter) def connect(*args, **ckwargs): if kwargs.get('slow_connect', False): eventlet.sleep(0.1) if 'give_content_type' in kwargs: if len(args) >= 7 and 'Content-Type' in args[6]: kwargs['give_content_type'](args[6]['Content-Type']) else: kwargs['give_content_type']('') i, status = next(conn_id_and_code_iter) if 'give_connect' in kwargs: give_conn_fn = kwargs['give_connect'] argspec = inspect.getargspec(give_conn_fn) if argspec.keywords or 'connection_id' in argspec.args: ckwargs['connection_id'] = i give_conn_fn(*args, **ckwargs) etag = next(etag_iter) headers = next(headers_iter) expect_headers = next(expect_headers_iter) timestamp = next(timestamps_iter) if status <= 0: raise HTTPException() if body_iter is None: body = static_body or '' else: body = next(body_iter) return FakeConn(status, etag, body=body, timestamp=timestamp, headers=headers, expect_headers=expect_headers, connection_id=i, give_send=kwargs.get('give_send')) connect.code_iter = code_iter return connect @contextmanager def mocked_http_conn(*args, **kwargs): requests = [] def capture_requests(ip, port, method, path, headers, qs, ssl): req = { 'ip': ip, 'port': port, 'method': method, 'path': path, 'headers': headers, 'qs': qs, 'ssl': ssl, } requests.append(req) kwargs.setdefault('give_connect', capture_requests) fake_conn = fake_http_connect(*args, **kwargs) fake_conn.requests = requests with mocklib.patch('swift.common.bufferedhttp.http_connect_raw', new=fake_conn): yield fake_conn left_over_status = list(fake_conn.code_iter) if left_over_status: raise AssertionError('left over status %r' % left_over_status) def make_timestamp_iter(): return iter(Timestamp(t) for t in itertools.count(int(time.time())))
1.304688
1
tests/ximpl.py
zsimic/sandbox
0
895
import click import poyo import ruamel.yaml import runez import strictyaml import yaml as pyyaml from zyaml import load_path, load_string, tokens_from_path, tokens_from_string from zyaml.marshal import decode, default_marshal, represented_scalar from . import TestSettings class ImplementationCollection(object): def __init__(self, names, default="zyaml,ruamel"): av = [ZyamlImplementation, RuamelImplementation, PyyamlBaseImplementation, PoyoImplementation, StrictImplementation] self.available = dict((m.name, m()) for m in av) self.unknown = [] self.selected = [] if names.startswith("+"): names = "%s,%s" % (names[1:], default) names = [s.strip() for s in names.split(",")] names = [s for s in names if s] seen = {} for name in names: found = 0 for i in self.available.values(): if name == "all" or name in i.name: if i.name not in seen: seen[i.name] = True self.selected.append(i) found += 1 if found == 0: self.unknown.append(name) self.combinations = None def track_result_combination(self, impl, data): if isinstance(data, Exception): value = runez.stringified(data) else: value = runez.represented_json(data, stringify=decode, keep_none=True, none_key="-null-") name = impl.name if self.combinations is None: self.combinations = {} for i1 in self.selected: for i2 in self.selected: if i1.name < i2.name: self.combinations[(i1.name, i2.name)] = set() for names, values in self.combinations.items(): if name in names: values.add(value) def __repr__(self): return ",".join(str(i) for i in self.selected) def __len__(self): return len(self.selected) def __iter__(self): for i in self.selected: yield i class Implementation(object): """Implementation of loading a yml file""" name = None # type: str def __repr__(self): return self.name @classmethod def option(cls, default="zyaml,ruamel", count=None, **kwargs): """ Args: default (str | None): Default implementation(s) to use count (int | None): Optional: exact number of implementations that have to specified **kwargs: Passed-through to click """ kwargs["default"] = default def _callback(_ctx, _param, value): if not value: return None impls = ImplementationCollection(value, default=default) if impls.unknown: raise click.BadParameter("Unknown implementation(s): %s" % ", ".join(impls.unknown)) if count and len(impls) != count: if count == 1: raise click.BadParameter("Need exactly 1 implementation") raise click.BadParameter("Need exactly %s" % runez.plural(count, "implementation")) if count == 1: return impls.selected[0] return impls metavar = "I1,..." hlp = "Implementation(s)" if count: hlp = runez.plural(count, "implementation") metavar = ",".join("I%s" % (i + 1) for i in range(count)) kwargs.setdefault("help", "%s to use" % hlp) kwargs.setdefault("show_default", True) kwargs.setdefault("metavar", metavar) name = "implementation" if count == 1 else "implementations" return click.option(name, "-i", callback=_callback, **kwargs) def show_result(self, data, tokens=False): rtype = "tokens" if tokens else data.__class__.__name__ if data is not None else "None" rep = data if not tokens or isinstance(data, Exception): rep = TestSettings.represented(data) message = "---- %s: %s" % (runez.bold(self.name), runez.dim(rtype)) if isinstance(data, NotImplementedError): print("%s - %s" % (message, rep)) return print(message) print(rep) def get_outcome(self, content, tokens=False): if tokens: data = self.tokens(content) if isinstance(data, list): data = "\n".join(self.represented_token(t) for t in data) return data return self.deserialized(content) def deserialized(self, source): value = TestSettings.protected_call(self._deserialized, source) return self._simplified(value) def tokens(self, source): return TestSettings.protected_call(self._tokenize, source) def represented_token(self, token): return str(token) def _deserialized(self, source): if hasattr(source, "path"): return self._deserialized_from_path(source.path) return self._deserialized_from_string(source) def _deserialized_from_path(self, path): with open(path) as fh: return self._deserialized_from_string(fh.read()) def _deserialized_from_string(self, source): raise NotImplementedError() def _tokenize(self, source): if hasattr(source, "path"): return self._tokens_from_path(source.path) return self._tokens_from_string(source) def _tokens_from_path(self, path): with open(path) as fh: return TestSettings.unwrapped(self._tokens_from_string(fh.read())) def _tokens_from_string(self, source): raise NotImplementedError() def _simplified(self, value): if isinstance(value, list) and len(value) == 1: return value[0] return value class ZyamlImplementation(Implementation): name = "zyaml" def _deserialized_from_path(self, path): return load_path(path) def _deserialized_from_string(self, source): return load_string(source) def _tokens_from_path(self, path): return tokens_from_path(path) def _tokens_from_string(self, source): return tokens_from_string(source) def _simplified(self, value): return value def ruamel_passthrough_tags(loader, tag, node): name = node.__class__.__name__ if "Seq" in name: result = [] for v in node.value: result.append(ruamel_passthrough_tags(loader, tag, v)) return result if "Map" in name: result = {} for k, v in node.value: k = ruamel_passthrough_tags(loader, tag, k) v = ruamel_passthrough_tags(loader, tag, v) result[k] = v return result return default_marshal(node.value) class RuamelImplementation(Implementation): name = "ruamel" def _deserialized_from_string(self, source): y = ruamel.yaml.YAML(typ="safe") ruamel.yaml.add_multi_constructor("", ruamel_passthrough_tags, Loader=ruamel.yaml.SafeLoader) return y.load_all(source) def _tokens_from_string(self, source): return ruamel.yaml.main.scan(source) class PyyamlBaseImplementation(Implementation): name = "pyyaml" def _deserialized_from_string(self, source): return pyyaml.load_all(source, Loader=pyyaml.BaseLoader) def _tokens_from_string(self, source): yaml_loader = pyyaml.BaseLoader(source) curr = yaml_loader.get_token() while curr is not None: yield curr curr = yaml_loader.get_token() def represented_token(self, token): linenum = token.start_mark.line + 1 column = token.start_mark.column + 1 result = "%s[%s,%s]" % (token.__class__.__name__, linenum, column) value = getattr(token, "value", None) if value is not None: if token.id == "<scalar>": value = represented_scalar(token.style, value) elif token.id == "<anchor>": value = "&%s" % value elif token.id == "<alias>": value = "*%s" % value elif token.id == "<tag>": assert isinstance(value, tuple) value = " ".join(str(s) for s in runez.flattened(value)) elif token.id == "<directive>": result += " %s" % token.name value = " ".join(str(s) for s in runez.flattened(value)) else: assert False result = "%s %s" % (result, value) return result class PoyoImplementation(Implementation): name = "poyo" def _deserialized_from_string(self, source): return [poyo.parse_string(source)] class StrictImplementation(Implementation): name = "strict" def _deserialized_from_string(self, source): obj = strictyaml.dirty_load(source, allow_flow_style=True) return obj.data
1.648438
2
pytorch_lightning/accelerators/cpu_backend.py
ozen/pytorch-lightning
0
919
# Copyright The PyTorch Lightning team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from pytorch_lightning.accelerators.base_backend import Accelerator from pytorch_lightning.utilities import AMPType, rank_zero_warn from pytorch_lightning.utilities.exceptions import MisconfigurationException class CPUBackend(Accelerator): def __init__(self, trainer, cluster_environment=None): super().__init__(trainer, cluster_environment) def setup(self, model): # run through amp wrapper if self.trainer.amp_backend: raise MisconfigurationException('amp + cpu is not supported. Please use a GPU option') # call setup after the ddp process has connected self.trainer.call_setup_hook(model) # CHOOSE OPTIMIZER # allow for lr schedulers as well self.setup_optimizers(model) self.trainer.model = model def train(self): model = self.trainer.model # set up training routine self.trainer.train_loop.setup_training(model) # train or test results = self.train_or_test() return results def training_step(self, args): if self.trainer.amp_backend == AMPType.NATIVE: with torch.cuda.amp.autocast(): output = self.trainer.model.training_step(*args) else: output = self.trainer.model.training_step(*args) return output def validation_step(self, args): if self.trainer.amp_backend == AMPType.NATIVE: with torch.cuda.amp.autocast(): output = self.trainer.model.validation_step(*args) else: output = self.trainer.model.validation_step(*args) return output def test_step(self, args): if self.trainer.amp_backend == AMPType.NATIVE: with torch.cuda.amp.autocast(): output = self.trainer.model.test_step(*args) else: output = self.trainer.model.test_step(*args) return output
1.695313
2
notebooks/2018.11.09 Meeting.py
costrouc/uarray
0
951
#%% from uarray.core import * #%% s = Scalar(Int(10)) #%% @operation def Always(a: T) -> CCallableUnary[T, CContent]: ... #%% register(Call(Always(w("a")), w("idx")), lambda a, idx: a) #%% a_ten = Always(s) #%% s = Sequence(Int(10), a_ten)
1.367188
1
0.py
itspuneet/itspuneet
0
959
k=0 while k!=1: print(k) k+=1
1.507813
2
test_data/parse/unexpected/symbol_table/inheritance_from_non_class/meta_model.py
aas-core-works/aas-core-csharp-codegen
0
975
class Some_enum(Enum): some_literal = "some_literal" class Something(Some_enum): pass class Reference: pass __book_url__ = "dummy" __book_version__ = "dummy" associate_ref_with(Reference)
1.078125
1
core/fanarttvapi.py
SchadLucas/pyscrape
0
991
import urllib2 import json import time from core.helpers.decorator import Cached from core.helpers.config import config from core.helpers.logger import log, LogLevel @Cached def __request(request): log('Send Fanart Request: ' + request.replace(config.fanart.api_key, 'XXX'), 'DEBUG') headers = {'Accept': 'application/json'} _request = urllib2.Request(request, headers=headers) response_body = urllib2.urlopen(_request).read() result = json.loads(response_body) return result def _get(video_type, movie_id, output_format='JSON'): req = '{0}{1}/{2}/{3}/{4}'.format(config.fanart.url_base, video_type, config.fanart.api_key, movie_id, output_format) try_again = True n = 0 while try_again and n < 10: try: return __request(req) except urllib2.HTTPError: n += 1 try_again = True log('Ooops.. FanartTV Error - Try again', LogLevel.Warning) time.sleep(2) def get_movie(tmdb_id): return _get(video_type='movie', movie_id=tmdb_id) def get_show(tvdb_id): return _get(video_type='series', movie_id=tvdb_id)
1.71875
2
examples/client/main.py
TheFarGG/Discode
3
1039
import os import discode TOKEN = os.environ.get("TOKEN") # The token from the developer portal. client = discode.Client(token=TOKEN, intents=discode.Intents.default()) @client.on_event("ready") async def on_ready(): print(client.user, "is ready!") # The ready listener gets fired when the bot/client is completely ready for use. @client.on_event("message_create") async def on_message(message: discode.Message): msg: str = msg.content if msg.startswith("?hi"): await message.channel.send("Hi!!!") # The message_create listener is fired whenever a message is sent to any channel that the bot has access to.
1.515625
2
setup.py
nopipifish/bert4keras
1
1047
#! -*- coding: utf-8 -*- from setuptools import setup, find_packages setup( name='bert4keras', version='0.8.4', description='an elegant bert4keras', long_description='bert4keras: https://github.com/bojone/bert4keras', license='Apache License 2.0', url='https://github.com/bojone/bert4keras', author='bojone', author_email='<EMAIL>', install_requires=['keras<=2.3.1'], packages=find_packages() )
1.023438
1
whoPay.py
susurigirl/susuri
0
1055
import random names_string = input("내기를 할 친구들의 이름을 적습니다. 콤마(,)로 분리해서 적습니다.\n") names = names_string.split(",") print(names) n = random.randint(0, len(names)) print(f"오늘 커피는 {names[n]}가 쏩니다!")
1.789063
2
formidable/forms/boundfield.py
jayvdb/django-formidable
0
1063
from django.forms import forms class FormatBoundField(forms.BoundField): """ The format field skips the rendering with the label attribute in the form level (i.e => form.as_p() doesn't have to generate any label for format field). This boundfield has this main goal. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # This attribute is used to generate (or not) the final label # with html tags. We force the label to None to avoid the label # generation: self.label = None class HelpTextBoundField(FormatBoundField): def value(self): return self.field.text class TitleBoundField(FormatBoundField): def value(self): return self.field.label class SeparatorBoundField(FormatBoundField): def value(self): return None
1.632813
2
pymatgen/apps/battery/insertion_battery.py
adozier/pymatgen
18
1071
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. from __future__ import division, unicode_literals """ This module is used for analysis of materials with potential application as intercalation batteries. """ __author__ = "<NAME>, <NAME>" __copyright__ = "Copyright 2012, The Materials Project" __version__ = "0.1" __maintainer__ = "<NAME>" __email__ = "<EMAIL>" __date__ = "Jan 13, 2012" __status__ = "Beta" import itertools from pymatgen.core.composition import Composition from pymatgen.core.units import Charge, Time from pymatgen.phasediagram.maker import PhaseDiagram from pymatgen.phasediagram.entries import PDEntry from pymatgen.apps.battery.battery_abc import AbstractElectrode, \ AbstractVoltagePair from pymatgen.core.periodic_table import Element from scipy.constants import N_A class InsertionElectrode(AbstractElectrode): """ A set of topotactically related compounds, with different amounts of a single element, e.g. TiO2 and LiTiO2, that can be used to define an insertion battery electrode. """ def __init__(self, entries, working_ion_entry): """ Create a new InsertionElectrode. Args: entries: A list of ComputedStructureEntries (or subclasses) representing the different topotactic states of the battery, e.g. TiO2 and LiTiO2. working_ion_entry: A single ComputedEntry or PDEntry representing the element that carries charge across the battery, e.g. Li. """ self._entries = entries self._working_ion = working_ion_entry.composition.elements[0] self._working_ion_entry = working_ion_entry #Prepare to make phase diagram: determine elements and set their energy #to be very high elements = set() for entry in entries: elements.update(entry.composition.elements) #Set an artificial energy for each element for convex hull generation element_energy = max([entry.energy_per_atom for entry in entries]) + 10 pdentries = [] pdentries.extend(entries) pdentries.extend([PDEntry(Composition({el:1}), element_energy) for el in elements]) #Make phase diagram to determine which entries are stable vs. unstable pd = PhaseDiagram(pdentries) lifrac = lambda e: e.composition.get_atomic_fraction(self._working_ion) #stable entries ordered by amount of Li asc self._stable_entries = tuple(sorted([e for e in pd.stable_entries if e in entries], key=lifrac)) #unstable entries ordered by amount of Li asc self._unstable_entries = tuple(sorted([e for e in pd.unstable_entries if e in entries], key=lifrac)) #create voltage pairs self._vpairs = tuple([InsertionVoltagePair(self._stable_entries[i], self._stable_entries[i + 1], working_ion_entry) for i in range(len(self._stable_entries) - 1)]) @property def working_ion(self): """ The working ion as an Element object """ return self._working_ion @property def working_ion_entry(self): return self._working_ion_entry @property def voltage_pairs(self): return self._vpairs def get_stable_entries(self, charge_to_discharge=True): """ Get the stable entries. Args: charge_to_discharge: order from most charge to most discharged state? Default to True. Returns: A list of stable entries in the electrode, ordered by amount of the working ion. """ list_copy = list(self._stable_entries) return list_copy if charge_to_discharge else list_copy.reverse() def get_unstable_entries(self, charge_to_discharge=True): """ Returns the unstable entries for the electrode. Args: charge_to_discharge: Order from most charge to most discharged state? Defaults to True. Returns: A list of unstable entries in the electrode, ordered by amount of the working ion. """ list_copy = list(self._unstable_entries) return list_copy if charge_to_discharge else list_copy.reverse() def get_all_entries(self, charge_to_discharge=True): """ Return all entries input for the electrode. Args: charge_to_discharge: order from most charge to most discharged state? Defaults to True. Returns: A list of all entries in the electrode (both stable and unstable), ordered by amount of the working ion. """ all_entries = list(self.get_stable_entries()) all_entries.extend(self.get_unstable_entries()) #sort all entries by amount of working ion ASC fsrt = lambda e: e.composition.get_atomic_fraction(self.working_ion) all_entries = sorted([e for e in all_entries], key=fsrt) return all_entries if charge_to_discharge else all_entries.reverse() @property def fully_charged_entry(self): """ The most charged entry along the topotactic path. """ return self._stable_entries[0] @property def fully_discharged_entry(self): """ The most discharged entry along the topotactic path. """ return self._stable_entries[-1] def get_max_instability(self, min_voltage=None, max_voltage=None): """ The maximum instability along a path for a specific voltage range. Args: min_voltage: The minimum allowable voltage. max_voltage: The maximum allowable voltage. Returns: Maximum decomposition energy of all compounds along the insertion path (a subset of the path can be chosen by the optional arguments) """ data = [] for pair in self._select_in_voltage_range(min_voltage, max_voltage): if pair.decomp_e_charge is not None: data.append(pair.decomp_e_charge) if pair.decomp_e_discharge is not None: data.append(pair.decomp_e_discharge) return max(data) if len(data) > 0 else None def get_min_instability(self, min_voltage=None, max_voltage=None): """ The minimum instability along a path for a specific voltage range. Args: min_voltage: The minimum allowable voltage. max_voltage: The maximum allowable voltage. Returns: Minimum decomposition energy of all compounds along the insertion path (a subset of the path can be chosen by the optional arguments) """ data = [] for pair in self._select_in_voltage_range(min_voltage, max_voltage): if pair.decomp_e_charge is not None: data.append(pair.decomp_e_charge) if pair.decomp_e_discharge is not None: data.append(pair.decomp_e_discharge) return min(data) if len(data) > 0 else None def get_max_muO2(self, min_voltage=None, max_voltage=None): """ Maximum critical oxygen chemical potential along path. Args: min_voltage: The minimum allowable voltage. max_voltage: The maximum allowable voltage. Returns: Maximum critical oxygen chemical of all compounds along the insertion path (a subset of the path can be chosen by the optional arguments). """ data = [] for pair in self._select_in_voltage_range(min_voltage, max_voltage): if pair.muO2_discharge is not None: data.append(pair.pair.muO2_discharge) if pair.muO2_charge is not None: data.append(pair.muO2_charge) return max(data) if len(data) > 0 else None def get_min_muO2(self, min_voltage=None, max_voltage=None): """ Minimum critical oxygen chemical potential along path. Args: min_voltage: The minimum allowable voltage for a given step max_voltage: The maximum allowable voltage allowable for a given step Returns: Minimum critical oxygen chemical of all compounds along the insertion path (a subset of the path can be chosen by the optional arguments). """ data = [] for pair in self._select_in_voltage_range(min_voltage, max_voltage): if pair.pair.muO2_discharge is not None: data.append(pair.pair.muO2_discharge) if pair.muO2_charge is not None: data.append(pair.muO2_charge) return min(data) if len(data) > 0 else None def get_sub_electrodes(self, adjacent_only=True, include_myself=True): """ If this electrode contains multiple voltage steps, then it is possible to use only a subset of the voltage steps to define other electrodes. For example, an LiTiO2 electrode might contain three subelectrodes: [LiTiO2 --> TiO2, LiTiO2 --> Li0.5TiO2, Li0.5TiO2 --> TiO2] This method can be used to return all the subelectrodes with some options Args: adjacent_only: Only return electrodes from compounds that are adjacent on the convex hull, i.e. no electrodes returned will have multiple voltage steps if this is set True. include_myself: Include this identical electrode in the list of results. Returns: A list of InsertionElectrode objects """ battery_list = [] pair_it = self._vpairs if adjacent_only \ else itertools.combinations_with_replacement(self._vpairs, 2) ion = self._working_ion for pair in pair_it: entry_charge = pair.entry_charge if adjacent_only \ else pair[0].entry_charge entry_discharge = pair.entry_discharge if adjacent_only \ else pair[1].entry_discharge chg_frac = entry_charge.composition.get_atomic_fraction(ion) dischg_frac = entry_discharge.composition.get_atomic_fraction(ion) def in_range(entry): frac = entry.composition.get_atomic_fraction(ion) return chg_frac <= frac <= dischg_frac if include_myself or entry_charge != self.fully_charged_entry \ or entry_discharge != self.fully_discharged_entry: unstable_entries = filter(in_range, self.get_unstable_entries()) stable_entries = filter(in_range, self.get_stable_entries()) all_entries = list(stable_entries) all_entries.extend(unstable_entries) battery_list.append(self.__class__(all_entries, self.working_ion_entry)) return battery_list def as_dict_summary(self, print_subelectrodes=True): """ Generate a summary dict. Args: print_subelectrodes: Also print data on all the possible subelectrodes. Returns: A summary of this electrode"s properties in dict format. """ chg_comp = self.fully_charged_entry.composition dischg_comp = self.fully_discharged_entry.composition ion = self.working_ion d = {"average_voltage": self.get_average_voltage(), "max_voltage": self.max_voltage, "min_voltage": self.min_voltage, "max_delta_volume": self.max_delta_volume, "max_voltage_step": self.max_voltage_step, "capacity_grav": self.get_capacity_grav(), "capacity_vol": self.get_capacity_vol(), "energy_grav": self.get_specific_energy(), "energy_vol": self.get_energy_density(), "working_ion": self._working_ion.symbol, "nsteps": self.num_steps, "framework": self._vpairs[0].framework.to_data_dict, "formula_charge": chg_comp.reduced_formula, "formula_discharge": dischg_comp.reduced_formula, "fracA_charge": chg_comp.get_atomic_fraction(ion), "fracA_discharge": dischg_comp.get_atomic_fraction(ion), "max_instability": self.get_max_instability(), "min_instability": self.get_min_instability()} if print_subelectrodes: f_dict = lambda c: c.as_dict_summary(print_subelectrodes=False) d["adj_pairs"] = map(f_dict, self.get_sub_electrodes(adjacent_only=True)) d["all_pairs"] = map(f_dict, self.get_sub_electrodes(adjacent_only=False)) return d def __str__(self): return self.__repr__() def __repr__(self): output = [] chg_form = self.fully_charged_entry.composition.reduced_formula dischg_form = self.fully_discharged_entry.composition.reduced_formula output.append("InsertionElectrode with endpoints at {} and {}".format( chg_form, dischg_form)) output.append("Avg. volt. = {} V".format(self.get_average_voltage())) output.append("Grav. cap. = {} mAh/g".format(self.get_capacity_grav())) output.append("Vol. cap. = {}".format(self.get_capacity_vol())) return "\n".join(output) @classmethod def from_dict(cls, d): from monty.json import MontyDecoder dec = MontyDecoder() return cls(dec.process_decoded(d["entries"]), dec.process_decoded(d["working_ion_entry"])) def as_dict(self): return {"@module": self.__class__.__module__, "@class": self.__class__.__name__, "entries": [entry.as_dict() for entry in self._entries], "working_ion_entry": self.working_ion_entry.as_dict()} class InsertionVoltagePair(AbstractVoltagePair): """ Defines an Insertion Voltage Pair. Args: entry1: Entry corresponding to one of the entries in the voltage step. entry2: Entry corresponding to the other entry in the voltage step. working_ion_entry: A single ComputedEntry or PDEntry representing the element that carries charge across the battery, e.g. Li. """ def __init__(self, entry1, entry2, working_ion_entry): #initialize some internal variables working_element = working_ion_entry.composition.elements[0] entry_charge = entry1 entry_discharge = entry2 if entry_charge.composition.get_atomic_fraction(working_element) \ > entry2.composition.get_atomic_fraction(working_element): (entry_charge, entry_discharge) = (entry_discharge, entry_charge) comp_charge = entry_charge.composition comp_discharge = entry_discharge.composition ion_sym = working_element.symbol frame_charge_comp = Composition({el: comp_charge[el] for el in comp_charge if el.symbol != ion_sym}) frame_discharge_comp = Composition({el: comp_discharge[el] for el in comp_discharge if el.symbol != ion_sym}) #Data validation #check that the ion is just a single element if not working_ion_entry.composition.is_element: raise ValueError("VoltagePair: The working ion specified must be " "an element") #check that at least one of the entries contains the working element if not comp_charge.get_atomic_fraction(working_element) > 0 and \ not comp_discharge.get_atomic_fraction(working_element) > 0: raise ValueError("VoltagePair: The working ion must be present in " "one of the entries") #check that the entries do not contain the same amount of the workin #element if comp_charge.get_atomic_fraction(working_element) == \ comp_discharge.get_atomic_fraction(working_element): raise ValueError("VoltagePair: The working ion atomic percentage " "cannot be the same in both the entries") #check that the frameworks of the entries are equivalent if not frame_charge_comp.reduced_formula == \ frame_discharge_comp.reduced_formula: raise ValueError("VoltagePair: the specified entries must have the" " same compositional framework") #Initialize normalization factors, charged and discharged entries valence_list = Element(ion_sym).oxidation_states working_ion_valence = max(valence_list) (self.framework, norm_charge) = frame_charge_comp.get_reduced_composition_and_factor() norm_discharge = \ frame_discharge_comp.get_reduced_composition_and_factor()[1] self._working_ion_entry = working_ion_entry #Initialize normalized properties self._vol_charge = entry_charge.structure.volume / norm_charge self._vol_discharge = entry_discharge.structure.volume / norm_discharge comp_charge = entry_charge.composition comp_discharge = entry_discharge.composition self._mass_charge = comp_charge.weight / norm_charge self._mass_discharge = comp_discharge.weight / norm_discharge self._num_ions_transferred = \ (comp_discharge[working_element] / norm_discharge) \ - (comp_charge[working_element] / norm_charge) self._voltage = \ (((entry_charge.energy / norm_charge) - (entry_discharge.energy / norm_discharge)) / \ self._num_ions_transferred + working_ion_entry.energy_per_atom) / working_ion_valence self._mAh = self._num_ions_transferred * Charge(1, "e").to("C") * \ Time(1, "s").to("h") * N_A * 1000 * working_ion_valence #Step 4: add (optional) hull and muO2 data self.decomp_e_charge = \ entry_charge.data.get("decomposition_energy", None) self.decomp_e_discharge = \ entry_discharge.data.get("decomposition_energy", None) self.muO2_charge = entry_charge.data.get("muO2", None) self.muO2_discharge = entry_discharge.data.get("muO2", None) self.entry_charge = entry_charge self.entry_discharge = entry_discharge self.normalization_charge = norm_charge self.normalization_discharge = norm_discharge self._frac_charge = comp_charge.get_atomic_fraction(working_element) self._frac_discharge = \ comp_discharge.get_atomic_fraction(working_element) @property def frac_charge(self): return self._frac_charge @property def frac_discharge(self): return self._frac_discharge @property def voltage(self): return self._voltage @property def mAh(self): return self._mAh @property def mass_charge(self): return self._mass_charge @property def mass_discharge(self): return self._mass_discharge @property def vol_charge(self): return self._vol_charge @property def vol_discharge(self): return self._vol_discharge @property def working_ion_entry(self): return self._working_ion_entry def __repr__(self): output = ["Insertion voltage pair with working ion {}" .format(self._working_ion_entry.composition.reduced_formula), "V = {}, mAh = {}".format(self.voltage, self.mAh), "mass_charge = {}, mass_discharge = {}" .format(self.mass_charge, self.mass_discharge), "vol_charge = {}, vol_discharge = {}" .format(self.vol_charge, self.vol_discharge), "frac_charge = {}, frac_discharge = {}" .format(self.frac_charge, self.frac_discharge)] return "\n".join(output) def __str__(self): return self.__repr__()
2.6875
3
apps/core/forms.py
allexvissoci/djangoecommerce
0
1079
from django import forms from django.core.mail import send_mail from django.conf import settings class ContactForm(forms.Form): name = forms.CharField(label='Nome', required=True) email = forms.EmailField(label='E-mail') message = forms.CharField(label='Mensagem', widget=forms.Textarea(), required=True) def send_mail(self): name = self.cleaned_data['name'] email = self.cleaned_data['email'] message = self.cleaned_data['message'] message = 'Nome: {0}\nE-mail:{1}\n{2}'.format(name, email, message) send_mail( 'Contato Django E-commerce', message, settings.DEFAULT_FROM_EMAIL, [settings.DEFAULT_FROM_EMAIL] )
1.34375
1
Adafruit_BluefruitLE/interfaces/__init__.py
acoomans/Adafruit_Python_BluefruitLE
415
1087
from .provider import Provider from .adapter import Adapter from .device import Device from .gatt import GattService, GattCharacteristic, GattDescriptor
0.132813
0
app/configs/development_settings.py
DIS-SIN/FlaskShell
0
1095
######################################################## FLASK SETTINGS ############################################################## #Variable used to securly sign cookies ##THIS IS SET IN DEV ENVIRONMENT FOR CONVENIENCE BUT SHOULD BE SET AS AN ENVIRONMENT VARIABLE IN PROD SECRET_KEY = "dev" ######################################################## DATABSE SETTINGS #################################################### #Neo4j Database URI used by the Neomodel OGM ## THIS SHOULD BE SET AS AN ENVIRONMENT VARIABLE IN PRODUCTION ## DATABASE_URI = "bolt://test:test@localhost:7687"
0.859375
1
teeth_overlord/tests/unit/networks/neutron.py
rackerlabs/teeth-overlord
0
1103
""" Copyright 2013 Rackspace, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import collections from teeth_overlord import config from teeth_overlord.networks import neutron from teeth_overlord import tests from keystoneclient.apiclient import exceptions as keystone_exceptions from keystoneclient.v2_0 import client as keystone_client from neutronclient.common import exceptions as neutron_exceptions from neutronclient.neutron import client as neutron_client NETWORK1_RESPONSE = { u'status': u'ACTIVE', u'subnets': [u'SUBNET1'], u'name': u'private', u'provider:physical_network': None, u'admin_state_up': True, u'tenant_id': u'TENANTID', u'provider:network_type': u'local', u'router:external': False, u'shared': False, u'id': u'NETWORK1', u'provider:segmentation_id': None } NETWORK2_RESPONSE = { u'status': u'ACTIVE', u'subnets': [u'SUBNET2'], u'name': u'public', u'provider:physical_network': None, u'admin_state_up': True, u'tenant_id': u'TENANTID', u'provider:network_type': u'local', u'router:external': True, u'shared': False, u'id': u'NETWORK2', u'provider:segmentation_id': None } PORT1_RESPONSE = { u'status': u'ACTIVE', u'binding:host_id': u'precise64', u'name': u'', u'allowed_address_pairs': [], u'admin_state_up': True, u'network_id': u'NETWORK1', u'tenant_id': u'TENANTID', u'extra_dhcp_opts': [], u'binding:vif_type': u'ovs', u'device_owner': u'network:dhcp', u'binding:capabilities': {u'port_filter': True}, u'mac_address': u'fa:16:3e:e0:d4:63', u'fixed_ips': [ { u'subnet_id': u'SUBNET1', u'ip_address': u'10.0.0.3' } ], u'id': u'PORT1', u'security_groups': [], u'device_id': u'' } PORT2_RESPONSE = { u'status': u'DOWN', u'binding:host_id': u'', u'name': u'', u'allowed_address_pairs': [], u'admin_state_up': True, u'network_id': u'NETWORK2', u'tenant_id': u'TENANTID', u'extra_dhcp_opts': [], u'binding:vif_type': u'unbound', u'device_owner': u'', u'binding:capabilities': {u'port_filter': False}, u'mac_address': u'00:09:7b:3e:18:ca', u'fixed_ips': [ { u'subnet_id': u'SUBNET2', u'ip_address': u'192.168.27.3' } ], u'id': u'PORT2', u'security_groups': [u'SECGRP'], u'device_id': u'' } SUBNET1_RESPONSE = { u'name': u'private-subnet', u'enable_dhcp': True, u'network_id': u'NETWORK1', u'tenant_id': u'TENANTID', u'dns_nameservers': [], u'allocation_pools': [ { u'start': u'10.0.0.2', u'end': u'10.0.0.254' } ], u'host_routes': [], u'ip_version': 4, u'gateway_ip': u'10.0.0.1', u'cidr': u'10.0.0.0/24', u'id': u'SUBNET1' } SUBNET2_RESPONSE = { u'name': u'public-subnet', u'enable_dhcp': False, u'network_id': u'NETWORK2', u'tenant_id': u'TENANTID', u'dns_nameservers': [], u'allocation_pools': [ { u'start': u'192.168.27.1', u'end': u'192.168.27.1' }, { u'start': u'192.168.27.3', u'end': u'192.168.27.254' } ], u'host_routes': [], u'ip_version': 4, u'gateway_ip': u'192.168.27.2', u'cidr': u'192.168.27.0/24', u'id': u'SUBNET2' } SERIALIZED_NETWORK1 = collections.OrderedDict([ ('id', u'NETWORK1'), ('name', u'private'), ('status', u'ACTIVE'), ('subnets', [ collections.OrderedDict([ ('id', u'SUBNET1'), ('name', u'private-subnet'), ('ip_version', 4), ('gateway_ip', u'10.0.0.1'), ('cidr', u'10.0.0.0/24'), ('enable_dhcp', True) ]) ]) ]) SERIALIZED_NETWORK2 = collections.OrderedDict([ ('id', u'NETWORK2'), ('name', u'public'), ('status', u'ACTIVE'), ('subnets', [ collections.OrderedDict([ ('id', u'SUBNET2'), ('name', u'public-subnet'), ('ip_version', 4), ('gateway_ip', u'192.168.27.2'), ('cidr', u'192.168.27.0/24'), ('enable_dhcp', False) ]) ]) ]) SERIALIZED_PORT1 = collections.OrderedDict([ ('id', u'PORT1'), ('name', u''), ('status', u'ACTIVE'), ('mac_address', u'fa:16:3e:e0:d4:63'), ('fixed_ips', [ { u'subnet_id': u'SUBNET1', u'ip_address': u'10.0.0.3' } ]), ('network', SERIALIZED_NETWORK1) ]) class TestNeutronProvider(tests.TeethMockTestUtilities): def setUp(self): super(TestNeutronProvider, self).setUp() self.config = config.LazyConfig(config={ 'KEYSTONE_USER': 'user', 'KEYSTONE_PASS': '<PASSWORD>', 'KEYSTONE_TENANT_ID': 'tenant', 'KEYSTONE_AUTH_URL': 'auth_url', 'NEUTRON_VERSION': '2.0', 'NEUTRON_URL': 'neutron_url', 'NEUTRON_PUBLIC_NETWORK': 'd6b32008-1432-4299-81c7-cbe3128ba13f', 'NEUTRON_PRIVATE_NETWORK': '2afa16d6-7b84-484f-a642-af243b0e5b10', 'NEUTRON_SERVICE_NETWORK': '2afa16d6-7b84-484f-a642-af243b0e5b10', }) self.neutron_client_mock = self.add_mock(neutron_client, 'Client') self.neutron_mock = self.neutron_client_mock.return_value self.keystone_client_mock = self.add_mock(keystone_client, 'Client') self.keystone_client_mock.return_value.auth_token = '<PASSWORD>' self.provider = neutron.NeutronProvider(self.config) def test_get_auth_token(self): t = self.provider._get_auth_token() self.assertEqual(t, 'auth_token') self.keystone_client_mock.assert_called_with( username='user', password='<PASSWORD>', tenant_id='tenant', auth_url='auth_url' ) def test_get_auth_token_client_exception(self): exc = keystone_exceptions.ClientException self.keystone_client_mock.side_effect = exc self.assertRaises(self.provider.NetworkProviderException, self.provider._get_auth_token) def test_get_neutron_client(self): self.provider._get_neutron_client() self.neutron_client_mock.assert_called_with( '2.0', endpoint_url='neutron_url', token='auth_token' ) def test_get_neutron_client_exception(self): exc = neutron_exceptions.NeutronException() self.neutron_client_mock.side_effect = exc self.assertRaises(self.provider.NetworkProviderException, self.provider._get_neutron_client) def test_list_networks(self): networks = {'networks': [NETWORK1_RESPONSE, NETWORK2_RESPONSE]} self.neutron_mock.list_networks.return_value = networks self.neutron_mock.show_subnet.side_effect = [ {'subnet': SUBNET1_RESPONSE}, {'subnet': SUBNET2_RESPONSE} ] networks = self.provider.list_networks() results = [ SERIALIZED_NETWORK1, SERIALIZED_NETWORK2 ] self.assertEqual([n.serialize() for n in networks], results) def test_list_networks_empty(self): self.neutron_mock.list_networks.return_value = {'networks': []} networks = self.provider.list_networks() self.neutron_mock.list_networks.assert_called() self.assertEqual(networks, []) def test_list_networks_client_exception(self): exc = neutron_exceptions.NeutronException() self.neutron_mock.list_networks.side_effect = exc self.assertRaises(self.provider.NetworkProviderException, self.provider.list_networks) def test_get_network_info(self): network = {'network': NETWORK1_RESPONSE} self.neutron_mock.show_network.return_value = network self.neutron_mock.show_subnet.side_effect = [ {'subnet': SUBNET1_RESPONSE} ] network = self.provider.get_network_info('NETWORK1') self.assertEqual(network.serialize(), SERIALIZED_NETWORK1) self.neutron_mock.show_network.assert_called_with('NETWORK1') def test_get_network_info_does_not_exist(self): exc = neutron_exceptions.NeutronException() exc.message = '404 Not Found' self.neutron_mock.show_network.side_effect = exc self.assertRaises(self.provider.NetworkDoesNotExist, self.provider.get_network_info, 'NETWORK1') def test_get_network_info_client_exception(self): exc = neutron_exceptions.NeutronException() self.neutron_mock.show_network.side_effect = exc self.assertRaises(self.provider.NetworkProviderException, self.provider.get_network_info, 'NETWORK1') def test_list_ports(self): ports = {'ports': [PORT1_RESPONSE]} self.neutron_mock.list_ports.return_value = ports network = {'network': NETWORK1_RESPONSE} self.neutron_mock.show_network.return_value = network subnet = {'subnet': SUBNET1_RESPONSE} self.neutron_mock.show_subnet.return_value = subnet ports = self.provider.list_ports('a:b:c:d') self.assertEqual([p.serialize() for p in ports], [SERIALIZED_PORT1]) self.neutron_mock.list_ports.assert_called_with(mac_address='a:b:c:d') def test_attach(self): port = {'port': PORT1_RESPONSE} self.neutron_mock.create_port.return_value = port network = {'network': NETWORK1_RESPONSE} self.neutron_mock.show_network.return_value = network subnet = {'subnet': SUBNET1_RESPONSE} self.neutron_mock.show_subnet.return_value = subnet port = self.provider.attach('a:b:c:d', 'network_id') self.neutron_mock.create_port.assert_called_with({ 'port': { 'network_id': 'network_id', 'admin_state_up': True, 'mac_address': 'a:b:c:d' } }) self.assertEqual(port.serialize(), SERIALIZED_PORT1) def test_attach_client_exception(self): exc = neutron_exceptions.NeutronException() self.neutron_mock.create_port.side_effect = exc self.assertRaises(self.provider.NetworkProviderException, self.provider.attach, 'mac_address', 'network_id') def test_detatch(self): ports = {'ports': [PORT1_RESPONSE]} self.neutron_mock.list_ports.return_value = ports network = {'network': NETWORK1_RESPONSE} self.neutron_mock.show_network.return_value = network subnet = {'subnet': SUBNET1_RESPONSE} self.neutron_mock.show_subnet.return_value = subnet self.provider.detach('a:b:c:d') self.neutron_mock.delete_port.assert_called_with(PORT1_RESPONSE['id']) self.neutron_mock.list_ports.assert_called_with(mac_address='a:b:c:d') def test_detach_specific_network(self): ports = {'ports': [PORT1_RESPONSE]} self.neutron_mock.list_ports.return_value = ports network = {'network': NETWORK1_RESPONSE} self.neutron_mock.show_network.return_value = network subnet = {'subnet': SUBNET1_RESPONSE} self.neutron_mock.show_subnet.return_value = subnet self.provider.detach('a:b:c:d', 'network_id') self.neutron_mock.delete_port.assert_called_with(PORT1_RESPONSE['id']) self.neutron_mock.list_ports.assert_called_with( mac_address='a:b:c:d', network_id='network_id') def test_detach_client_exception(self): ports = {'ports': [PORT1_RESPONSE]} self.neutron_mock.list_ports.return_value = ports network = {'network': NETWORK1_RESPONSE} self.neutron_mock.show_network.return_value = network subnet = {'subnet': SUBNET1_RESPONSE} self.neutron_mock.show_subnet.return_value = subnet exc = neutron_exceptions.NeutronException() self.neutron_mock.delete_port.side_effect = exc self.assertRaises(self.provider.NetworkProviderException, self.provider.detach, 'a:b:c:d') def test_get_default_networks(self): network_ids = self.provider.get_default_networks() self.assertEqual(network_ids, [self.config.NEUTRON_PUBLIC_NETWORK, self.config.NEUTRON_PRIVATE_NETWORK]) def test_get_service_network(self): network_id = self.provider.get_service_network() self.assertEqual(network_id, self.config.NEUTRON_SERVICE_NETWORK)
1.429688
1
services/core-api/app/api/mms_now_submissions/models/surface_bulk_sample_activity.py
bcgov/mds
25
1127
from app.api.utils.models_mixins import Base from app.extensions import db class MMSSurfaceBulkSampleActivity(Base): __tablename__ = "surface_bulk_sample_activity" __table_args__ = {"schema": "mms_now_submissions"} id = db.Column(db.Integer, primary_key=True) messageid = db.Column(db.Integer, db.ForeignKey('mms_now_submissions.application.messageid')) mms_cid = db.Column(db.Integer) type = db.Column(db.String) disturbedarea = db.Column(db.Numeric(14, 2)) timbervolume = db.Column(db.Numeric(14, 2)) quantity = db.Column(db.Integer) def __repr__(self): return '<MMSSurfaceBulkSampleActivity %r>' % self.id
1.015625
1
Dataset/Leetcode/train/58/28.py
kkcookies99/UAST
0
1135
class Solution: def XXX(self, s): """ :type s: str :rtype: int """ cnt, tail = 0, len(s) - 1 while tail >= 0 and s[tail] == ' ': tail -= 1 while tail >= 0 and s[tail] != ' ': cnt += 1 tail -= 1 return cnt
1.976563
2
api/api/form7_searching_utils/__init__.py
bcgov/court-of-appeal
0
1151
from .form7_search import Form7Search from .parse_form7 import Form7Parsing
0.21875
0
Incident-Response/Tools/dfirtrack/dfirtrack_main/views/division_views.py
sn0b4ll/Incident-Playbook
1
1167
from django.contrib import messages from django.contrib.auth.mixins import LoginRequiredMixin from django.shortcuts import redirect, render from django.urls import reverse from django.views.generic import DetailView, ListView from django.views.generic.edit import CreateView, UpdateView from dfirtrack_main.forms import DivisionForm from dfirtrack_main.logger.default_logger import debug_logger from dfirtrack_main.models import Division class DivisionList(LoginRequiredMixin, ListView): login_url = '/login' model = Division template_name = 'dfirtrack_main/division/division_list.html' context_object_name = 'division_list' def get_queryset(self): debug_logger(str(self.request.user), " DIVISION_LIST_ENTERED") return Division.objects.order_by('division_name') class DivisionDetail(LoginRequiredMixin, DetailView): login_url = '/login' model = Division template_name = 'dfirtrack_main/division/division_detail.html' def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) division = self.object division.logger(str(self.request.user), " DIVISION_DETAIL_ENTERED") return context class DivisionCreate(LoginRequiredMixin, CreateView): login_url = '/login' model = Division form_class = DivisionForm template_name = 'dfirtrack_main/division/division_add.html' def get(self, request, *args, **kwargs): form = self.form_class() debug_logger(str(request.user), " DIVISION_ADD_ENTERED") return render(request, self.template_name, {'form': form}) def post(self, request, *args, **kwargs): form = self.form_class(request.POST) if form.is_valid(): division = form.save(commit=False) division.save() division.logger(str(request.user), " DIVISION_ADD_EXECUTED") messages.success(request, 'Division added') return redirect(reverse('division_detail', args=(division.division_id,))) else: return render(request, self.template_name, {'form': form}) class DivisionUpdate(LoginRequiredMixin, UpdateView): login_url = '/login' model = Division form_class = DivisionForm template_name = 'dfirtrack_main/division/division_edit.html' def get(self, request, *args, **kwargs): division = self.get_object() form = self.form_class(instance=division) division.logger(str(request.user), " DIVISION_EDIT_ENTERED") return render(request, self.template_name, {'form': form}) def post(self, request, *args, **kwargs): division = self.get_object() form = self.form_class(request.POST, instance=division) if form.is_valid(): division = form.save(commit=False) division.save() division.logger(str(request.user), " DIVISION_EDIT_EXECUTED") messages.success(request, 'Division edited') return redirect(reverse('division_detail', args=(division.division_id,))) else: return render(request, self.template_name, {'form': form})
1.351563
1
pytorch-frontend/benchmarks/operator_benchmark/pt/embeddingbag_test.py
AndreasKaratzas/stonne
206
1175
import operator_benchmark as op_bench import torch import numpy from . import configs """EmbeddingBag Operator Benchmark""" class EmbeddingBagBenchmark(op_bench.TorchBenchmarkBase): def init(self, embeddingbags, dim, mode, input_size, offset, sparse, include_last_offset, device): self.embedding = torch.nn.EmbeddingBag( num_embeddings=embeddingbags, embedding_dim=dim, mode=mode, include_last_offset=include_last_offset, sparse=sparse).to(device=device) numpy.random.seed((1 << 32) - 1) self.input = torch.tensor(numpy.random.randint(0, embeddingbags, input_size), device=device).long() offsets = torch.LongTensor([offset], device=device) self.offset = torch.cat((offsets, torch.tensor([self.input.size(0)], dtype=torch.long)), 0) self.set_module_name('embeddingbag') def forward(self): return self.embedding(self.input, self.offset) op_bench.generate_pt_test(configs.embeddingbag_short_configs, EmbeddingBagBenchmark) op_bench.generate_pt_gradient_test(configs.embeddingbag_short_configs, EmbeddingBagBenchmark) if __name__ == "__main__": op_bench.benchmark_runner.main()
1.703125
2
ark_nlp/factory/utils/attack.py
yubuyuabc/ark-nlp
1
1191
import torch class FGM(object): """ 基于FGM算法的攻击机制 Args: module (:obj:`torch.nn.Module`): 模型 Examples:: >>> # 初始化 >>> fgm = FGM(module) >>> for batch_input, batch_label in data: >>> # 正常训练 >>> loss = module(batch_input, batch_label) >>> loss.backward() # 反向传播,得到正常的grad >>> # 对抗训练 >>> fgm.attack() # 在embedding上添加对抗扰动 >>> loss_adv = module(batch_input, batch_label) >>> loss_adv.backward() # 反向传播,并在正常的grad基础上,累加对抗训练的梯度 >>> fgm.restore() # 恢复embedding参数 >>> # 梯度下降,更新参数 >>> optimizer.step() >>> optimizer.zero_grad() Reference: [1] https://zhuanlan.zhihu.com/p/91269728 """ def __init__(self, module): self.module = module self.backup = {} def attack( self, epsilon=1., emb_name='word_embeddings' ): for name, param in self.module.named_parameters(): if param.requires_grad and emb_name in name: self.backup[name] = param.data.clone() norm = torch.norm(param.grad) if norm != 0 and not torch.isnan(norm): r_at = epsilon * param.grad / norm param.data.add_(r_at) def restore( self, emb_name='word_embeddings' ): for name, param in self.module.named_parameters(): if param.requires_grad and emb_name in name: assert name in self.backup param.data = self.backup[name] self.backup = {} class PGD(object): """ 基于PGD算法的攻击机制 Args: module (:obj:`torch.nn.Module`): 模型 Examples:: >>> pgd = PGD(module) >>> K = 3 >>> for batch_input, batch_label in data: >>> # 正常训练 >>> loss = module(batch_input, batch_label) >>> loss.backward() # 反向传播,得到正常的grad >>> pgd.backup_grad() >>> # 对抗训练 >>> for t in range(K): >>> pgd.attack(is_first_attack=(t==0)) # 在embedding上添加对抗扰动, first attack时备份param.data >>> if t != K-1: >>> optimizer.zero_grad() >>> else: >>> pgd.restore_grad() >>> loss_adv = module(batch_input, batch_label) >>> loss_adv.backward() # 反向传播,并在正常的grad基础上,累加对抗训练的梯度 >>> pgd.restore() # 恢复embedding参数 >>> # 梯度下降,更新参数 >>> optimizer.step() >>> optimizer.zero_grad() Reference: [1] https://zhuanlan.zhihu.com/p/91269728 """ def __init__(self, module): self.module = module self.emb_backup = {} self.grad_backup = {} def attack( self, epsilon=1., alpha=0.3, emb_name='emb.', is_first_attack=False ): # emb_name这个参数要换成你模型中embedding的参数名 for name, param in self.module.named_parameters(): if param.requires_grad and emb_name in name: if is_first_attack: self.emb_backup[name] = param.data.clone() norm = torch.norm(param.grad) if norm != 0 and not torch.isnan(norm): r_at = alpha * param.grad / norm param.data.add_(r_at) param.data = self.project(name, param.data, epsilon) def restore(self, emb_name='emb.'): # emb_name这个参数要换成你模型中embedding的参数名 for name, param in self.module.named_parameters(): if param.requires_grad and emb_name in name: assert name in self.emb_backup param.data = self.emb_backup[name] self.emb_backup = {} def project(self, param_name, param_data, epsilon): r = param_data - self.emb_backup[param_name] if torch.norm(r) > epsilon: r = epsilon * r / torch.norm(r) return self.emb_backup[param_name] + r def backup_grad(self): for name, param in self.module.named_parameters(): if param.requires_grad: self.grad_backup[name] = param.grad.clone() def restore_grad(self): for name, param in self.module.named_parameters(): if param.requires_grad: param.grad = self.grad_backup[name]
2.1875
2
faced/const.py
binhmuc/faced
0
1215
import os MODELS_PATH = os.path.join(os.path.dirname(__file__), "models") YOLO_SIZE = 288 YOLO_TARGET = 9 CORRECTOR_SIZE = 50
0.582031
1
atmpro1_vsm2.py
joselynzhao/One-shot-Person-Re-ID-ATM
3
1255
#!/usr/bin/python3.6 # -*- coding: utf-8 -*- # @Time : 2020/9/3 上午11:03 # @Author : Joselynzhao # @Email : <EMAIL> # @File : atmpro1_vsm2.py # @Software: PyCharm # @Desc : #!/usr/bin/python3.6 # -*- coding: utf-8 -*- # @Time : 2020/9/1 下午7:07 # @Author : Joselynzhao # @Email : <EMAIL> # @File : atmpro1_vsm.py # @Software: PyCharm # @Desc : #!/usr/bin/python3.6 # -*- coding: utf-8 -*- # @Time : 2020/8/26 下午8:26 # @Author : Joselynzhao # @Email : <EMAIL> # @File : atmpro1.py # @Software: PyCharm # @Desc : from my_reid.eug import * from my_reid import datasets from my_reid import models import numpy as np import torch import argparse import os import warnings warnings.filterwarnings("ignore") from my_reid.utils.logging import Logger import os.path as osp import sys from torch.backends import cudnn from my_reid.utils.serialization import load_checkpoint from torch import nn import time import pickle import torch.distributed as dist from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.data.distributed import DistributedSampler from pathlib import Path def resume(savepath): import re pattern = re.compile(r'step_(\d+)\.ckpt') start_step = -1 ckpt_file = "" # find start step files = os.listdir(savepath) files.sort() for filename in files: try: iter_ = int(pattern.search(filename).groups()[0]) print(iter_) if iter_ > start_step: start_step = iter_ ckpt_file = osp.join(savepath, filename) except: continue # if need resume if start_step >= 0: print("continued from iter step", start_step) else: print("resume failed", start_step, files) return start_step, ckpt_file def main(args): father = Path('/mnt/') if father.exists(): # 是在服务器上 data_dir = Path('/mnt/share/datasets/RE-ID/data') # 服务器 logs_dir = Path('/mnt/home/{}'.format(args.log_name)) # 服务器 else: #本地 data_dir = Path('/home/joselyn/workspace/ATM_SERIES/data') # 本地跑用这个 logs_dir = Path('/home/joselyn/workspace/ATM_SERIES/{}'.format(args.log_name)) # 本地跑用这个 cudnn.benchmark = True cudnn.enabled = True save_path = os.path.join(logs_dir, args.dataset, args.exp_name, args.exp_order) # 到编号位置. total_step = 100 // args.EF + 1 sys.stdout = Logger(osp.join(save_path, 'log' + str(args.EF) + time.strftime(".%m_%d_%H:%M:%S") + '.txt')) dataf_file = open(osp.join(save_path, 'dataf.txt'), 'a') # 保存性能数据. #特征空间中的性能问题. data_file = open(osp.join(save_path, 'data.txt'), 'a') # 保存性能数据. #特征空间中的性能问题. kf_file = open(osp.join(save_path,'kf.txt'),'a') # 数据格式为 label_pre_r, select_pre_r,label_pre_t, select_pre_t ,加上了了tagper的数据. tagper_path = osp.join(save_path,'tagper') #tagper存储路径. if not Path(tagper_path).exists(): os.mkdir(tagper_path) '''# 记录配置信息 和路径''' print('-'*20+'config_info'+'-'*20) config_file = open(osp.join(save_path, 'config.txt'), 'w') config_info = str(args).split('(')[1].strip(')').split(',') config_info.sort() for one in config_info: key,value=map(str,one.split('=')) config_file.write(key.strip()+'='+value.strip('\'')+'\n') print(key.strip()+'='+value.strip('\'')) config_file.write('save_path='+save_path) print('save_path='+save_path) print('-' * 20 + 'config_info' + '-' * 20) config_file.close() train_time_file = open(osp.join(save_path, 'time.txt'), 'a') # 只记录训练所需要的时间. # 数据格式为 step_time total_time. total_time = 0 # get all the labeled and unlabeled data for training dataset_all = datasets.create(args.dataset, osp.join(data_dir, args.dataset)) num_all_examples = len(dataset_all.train) l_data, u_data = get_init_shot_in_cam1(dataset_all, load_path="./examples/{}_init_{}.pickle".format(dataset_all.name, args.init), init=args.init) resume_step, ckpt_file = -1, '' if args.resume: resume_step, ckpt_file = resume(save_path) # initial the EUG algorithm eug = EUG(batch_size=args.batch_size, num_classes=dataset_all.num_train_ids, dataset=dataset_all, l_data=l_data, u_data=u_data, save_path=save_path, max_frames=args.max_frames, embeding_fea_size=args.fea, momentum=args.momentum, lamda=args.lamda) tagper = EUG(batch_size=args.batch_size, num_classes=dataset_all.num_train_ids, dataset=dataset_all, l_data=l_data, u_data=u_data, save_path=tagper_path, max_frames=args.max_frames, embeding_fea_size=args.fea, momentum=args.momentum, lamda=args.lamda) new_train_data = l_data unselected_data = u_data iter_mode = 2 #迭代模式,确定是否训练tagper for step in range(total_step): # for resume if step < resume_step: continue ratio = (step + 1) * args.EF / 100 ratio_t = (step+1+args.t) * args.EF /100 nums_to_select = int(len(u_data) * ratio) nums_to_select_tagper = int(len(u_data) * ratio_t) if nums_to_select >= len(u_data): break #args.vsm_lambda的衰减 0.5 - 0 vsm_lambda = args.vsm_lambda*step/(1-(total_step/2)) +args.vsm_lambda vsm_lambda +=1 print("Runing: EF={}%, step {}:\t Nums_to_be_select {} \t Ritio \t Logs-dir {}".format( args.EF, step, nums_to_select, ratio, save_path)) # train the model or load ckpt start_time = time.time() print("training reid model") eug.train(new_train_data, unselected_data, step, loss=args.loss, epochs=args.epochs, step_size=args.step_size, init_lr=0.1) if step != resume_step else eug.resume(ckpt_file, step) # 只对eug进行性能评估 # mAP, rank1, rank5, rank10, rank20 = 0, 0, 0, 0, 0 mAP, rank1, rank5, rank10, rank20 = eug.evaluate(dataset_all.query, dataset_all.gallery) # 把数据写到data文件里. data_file.write('{} {:.2%} {:.2%} {:.2%} {:.2%} {:.2%}\n'.format(step, mAP, rank1, rank5, rank10, rank20)) pred_y, pred_score,label_pre,dists= eug.estimate_label_vsm() selected_idx = eug.select_top_data_vsm2(pred_score, dists,args.topk,vsm_lambda,min(nums_to_select_tagper,len(u_data)-50) if iter_mode==2 else min(nums_to_select,len(u_data))) #直接翻两倍取数据. -50个样本,保证unselected_data数量不为0 new_train_data, unselected_data, select_pre= eug.generate_new_train_data(selected_idx, pred_y) raw_label_pre, raw_select_pre = label_pre,select_pre t_label_pre,t_select_pre = 0,0 raw_select_pre_t = 0 # label_pre_t,select_pre_t=0,0 if iter_mode==2: raw_select_pre_t = raw_select_pre print("training tagper model") selected_idx = eug.select_top_data_vsm2(pred_score,dists,args.topk,vsm_lambda, min(nums_to_select, len(u_data))) _, _, raw_select_pre = eug.generate_new_train_data(selected_idx, pred_y) # kf_file.write('{} {:.2%} {:.2%}'.format(step, label_pre, select_pre)) tagper.resume(osp.join(save_path,'step_{}.ckpt'.format(step)),step) tagper.train(new_train_data, unselected_data, step, loss=args.loss, epochs=args.epochs, step_size=args.step_size, init_lr=0.1) pred_y, pred_score, label_pre,dists= tagper.estimate_label_vsm() selected_idx = tagper.select_top_data_vsm2(pred_score,dists,args.topk,vsm_lambda,min(nums_to_select,len(u_data))) # 采样目标数量 new_train_data, unselected_data, select_pre= tagper.generate_new_train_data(selected_idx, pred_y) t_label_pre,t_select_pre = label_pre,select_pre label_pre,select_pre = t_label_pre,t_select_pre if nums_to_select_tagper >=len(u_data): iter_mode=1 #切换模式 print('tagper is stop') else: #mode = 1 # raw_select_pre = raw_select_pre_t # raw_select_pre_t = 0 label_pre,select_pre = raw_label_pre,raw_select_pre end_time = time.time() step_time = end_time - start_time total_time = step_time + total_time train_time_file.write('{} {:.6} {:.6}\n'.format(step, step_time, total_time)) kf_file.write('{} {} {} {:.2%} {:.2%} {:.2%} {:.2%} {:.2%}\n'.format(step,nums_to_select,nums_to_select_tagper,raw_label_pre,raw_select_pre,raw_select_pre_t,t_label_pre,t_select_pre)) dataf_file.write( '{} {:.2%} {:.2%}\n'.format(step, label_pre, select_pre)) dataf_file.close() train_time_file.close() if __name__ == '__main__': parser = argparse.ArgumentParser(description='Progressive Learning for One-Example re-ID') parser.add_argument('-d', '--dataset', type=str, default='mars', choices=datasets.names()) parser.add_argument('-b', '--batch-size', type=int, default=16) parser.add_argument('-f', '--fea', type=int, default=1024) parser.add_argument('--EF', type=int, default=10) parser.add_argument('--t', type=float, default=2) #不再tagper采样的倍率, 而是表示跨多少个step采样. parser.add_argument('--exp_order', type=str, default='0') parser.add_argument('--exp_name', type=str, default='atm') parser.add_argument('--exp_aim', type=str, default='for paper') parser.add_argument('--run_file',type=str,default='train.py') parser.add_argument('--log_name',type=str,default='pl_logs') parser.add_argument('--topk',type=int,default=2) parser.add_argument('--vsm_lambda',type=float,default=0.5) parser.add_argument('--resume', type=str, default='Yes') parser.add_argument('--max_frames', type=int, default=900) parser.add_argument('--loss', type=str, default='ExLoss', choices=['CrossEntropyLoss', 'ExLoss']) parser.add_argument('--init', type=float, default=-1) parser.add_argument('-m', '--momentum', type=float, default=0.5) parser.add_argument('-e', '--epochs', type=int, default=70) parser.add_argument('-s', '--step_size', type=int, default=55) parser.add_argument('--lamda', type=float, default=0.5) main(parser.parse_args())
1.703125
2
cms/tests/test_views.py
Ibrahem3amer/bala7
0
1287
from django.core.urlresolvers import resolve from django.urls import reverse from django.test import TestCase, RequestFactory from django.http import HttpRequest, Http404 from django.contrib.auth.models import User from unittest import skip from users.models import University, Faculty, Department, UserProfile from cms.models import Topic from cms.views import get_topic class AccessRestriction(TestCase): def setUp(self): self.user = User.objects.create(username='test_username', email='<EMAIL>', password='<PASSWORD>') self.uni = University.objects.create(name='test_university') self.fac = Faculty.objects.create(name='Test faculty') self.dep = Department.objects.create(name='Test dep') self.profile = UserProfile.objects.create(university=self.uni, faculty=self.fac, department=self.dep) self.topic = Topic.objects.create(name='cs', desc="test test test", faculty=self.fac, term=1) self.topic.department.add(self.dep) self.user.profile = self.profile self.profile.topics.add(self.topic) def test_return_topic_that_match_user(self): # Setup test request = RequestFactory() request = request.get(reverse('get_topic', kwargs={'dep_id': self.dep.id, 'topic_id': self.topic.id})) request.user = self.user # Exercise test response = get_topic(request, self.dep.id, self.topic.id) # Assert test self.assertEqual(200, response.status_code) def test_return_topic_that_has_different_department(self): # Setup test request = RequestFactory() request = request.get(reverse('get_topic', kwargs={'dep_id': self.dep.id, 'topic_id': self.topic.id})) request.user = self.user # Exercise test another_dep = Department.objects.create() try: response = get_topic(request, another_dep.id, self.topic.id) flag = False except Http404: flag = True # Assert test self.assertTrue(flag) def test_return_topic_that_does_not_exist(self): # Setup test request = RequestFactory() request = request.get(reverse('get_topic', kwargs={'dep_id': self.dep.id, 'topic_id': self.topic.id})) request.user = self.user # Exercise test try: response = get_topic(request, self.dep.id, 990) flag = False except Http404: flag = True # Assert test self.assertTrue(flag) def test_return_topic_that_outside_user_topics(self): # Setup test another_topic = Topic.objects.create(name='is', desc="test test test", faculty=self.fac, term=1) another_topic.department.add(self.dep) self.user.profile.topics.add(another_topic) request = RequestFactory() request = request.get(reverse('get_topic', kwargs={'dep_id': self.dep.id, 'topic_id': self.topic.id})) request.user = self.user # Exercise test outsider_topic = Topic.objects.create(name='ms', desc="test test test", faculty=self.fac, term=1) outsider_topic.department.add(self.dep) try: response = get_topic(request, self.dep.id, outsider_topic.id) flag = False except Http404: flag = True # Assert test self.assertTrue(flag) def test_get_topic_with_no_parameters(self): # Setup test another_topic = Topic.objects.create(name='is', desc="test test test", faculty=self.fac, term=1) another_topic.department.add(self.dep) self.user.profile.topics.add(another_topic) request = RequestFactory() request = request.get(reverse('get_topic', kwargs={'dep_id': self.dep.id, 'topic_id': self.topic.id})) request.user = self.user # Exercise test outsider_topic = Topic.objects.create(name='ms', desc="test test test", faculty=self.fac, term=1) outsider_topic.department.add(self.dep) try: response = get_topic(request) flag = False except Http404: flag = True # Assert test self.assertTrue(flag) class TableViews(TestCase): def setUp(self): self.user = User.objects.create_user(username='ssss', email='<EMAIL>', password='<PASSWORD>') self.fac = Faculty.objects.create() self.dep = Department.objects.create(faculty=self.fac) self.profile = UserProfile.objects.create(user=self.user, department=self.dep, faculty=self.fac) def test_page_load_on_get(self): # Setup test url = reverse('web_dep_table') request = self.client.login(username="ssss", password="<PASSWORD>") # Exercise test request = self.client.get(url) # Assert test self.assertEqual(200, request.status_code) self.assertTemplateUsed(request, 'tables/table_main.html') def test_page_redirect_on_post(self): # Setup test url = reverse('web_dep_table') request = self.client.login(username="ssss", password="<PASSWORD>") # Exercise test request = self.client.post(url) # Assert test self.assertEqual(302, request.status_code) def test_page_redirect_on_no_profile(self): # Setup test user = User.objects.create_user( username='test_username', email='<EMAIL>', password='<PASSWORD>' ) url = reverse('web_dep_table') request = self.client.login(username="test_username", password="<PASSWORD>") # Exercise test request = self.client.get(url) # Assert test self.assertEqual(302, request.status_code) class UserTableViews(TestCase): def setUp(self): self.user = User.objects.create_user(username='ssss', email='<EMAIL>', password='<PASSWORD>') self.fac = Faculty.objects.create() self.dep = Department.objects.create(faculty=self.fac) UserProfile.objects.create(user=self.user, department=self.dep, faculty=self.fac) self.topic = Topic.objects.create(name='topic name', desc='ddddd', term=1) self.topic.department.add(self.dep) def test_page_load_on_get(self): # Setup test url = reverse('web_user_table') request = self.client.login(username="ssss", password="<PASSWORD>") # Exercise test request = self.client.get(url) # Assert test self.assertEqual(200, request.status_code) self.assertTemplateUsed(request, 'tables/user_table.html') def test_page_load_if_no_profile(self): # Setup test url = reverse('web_user_table') another_user = User.objects.create_user(username='xxxss', email='<EMAIL>', password='<PASSWORD>') request = self.client.login(username="xxxss", password="<PASSWORD>") # Exercise test request = self.client.get(url) # Assert test self.assertEqual(200, request.status_code) self.assertTemplateUsed(request, 'tables/user_table.html') def test_post_when_no_choices(self): # Setup test url = reverse('web_user_table') data = {} request = self.client.login(username="xxxss", password="<PASSWORD>") # Exercise test request = self.client.post(url, data=data) # Assert test self.assertEqual(302, request.status_code)
1.710938
2
guardian/decorators.py
peopledoc/django-guardian
0
1335
from django.conf import settings from django.contrib.auth import REDIRECT_FIELD_NAME from django.core.exceptions import PermissionDenied from django.http import HttpResponseForbidden, HttpResponseRedirect from django.utils.functional import wraps from django.utils.http import urlquote from django.db.models import Model, get_model from django.db.models.base import ModelBase from django.db.models.query import QuerySet from django.shortcuts import get_object_or_404, render_to_response from django.template import RequestContext, TemplateDoesNotExist from guardian.conf import settings as guardian_settings from guardian.exceptions import GuardianError def permission_required(perm, lookup_variables=None, **kwargs): """ Decorator for views that checks whether a user has a particular permission enabled. Optionally, instances for which check should be made may be passed as an second argument or as a tuple parameters same as those passed to ``get_object_or_404`` but must be provided as pairs of strings. :param login_url: if denied, user would be redirected to location set by this parameter. Defaults to ``django.conf.settings.LOGIN_URL``. :param redirect_field_name: name of the parameter passed if redirected. Defaults to ``django.contrib.auth.REDIRECT_FIELD_NAME``. :param return_403: if set to ``True`` then instead of redirecting to the login page, response with status code 403 is returned ( ``django.http.HttpResponseForbidden`` instance or rendered template - see :setting:`GUARDIAN_RENDER_403`). Defaults to ``False``. :param accept_global_perms: if set to ``True``, then *object level permission* would be required **only if user does NOT have global permission** for target *model*. If turned on, makes this decorator like an extension over standard ``django.contrib.admin.decorators.permission_required`` as it would check for global permissions first. Defaults to ``False``. Examples:: @permission_required('auth.change_user', return_403=True) def my_view(request): return HttpResponse('Hello') @permission_required('auth.change_user', (User, 'username', 'username')) def my_view(request, username): user = get_object_or_404(User, username=username) return user.get_absolute_url() @permission_required('auth.change_user', (User, 'username', 'username', 'groups__name', 'group_name')) def my_view(request, username, group_name): user = get_object_or_404(User, username=username, group__name=group_name) return user.get_absolute_url() """ login_url = kwargs.pop('login_url', settings.LOGIN_URL) redirect_field_name = kwargs.pop('redirect_field_name', REDIRECT_FIELD_NAME) return_403 = kwargs.pop('return_403', False) accept_global_perms = kwargs.pop('accept_global_perms', False) # Check if perm is given as string in order not to decorate # view function itself which makes debugging harder if not isinstance(perm, basestring): raise GuardianError("First argument must be in format: " "'app_label.codename or a callable which return similar string'") def decorator(view_func): def _wrapped_view(request, *args, **kwargs): # if more than one parameter is passed to the decorator we try to # fetch object for which check would be made obj = None if lookup_variables: model, lookups = lookup_variables[0], lookup_variables[1:] # Parse model if isinstance(model, basestring): splitted = model.split('.') if len(splitted) != 2: raise GuardianError("If model should be looked up from " "string it needs format: 'app_label.ModelClass'") model = get_model(*splitted) elif type(model) in (Model, ModelBase, QuerySet): pass else: raise GuardianError("First lookup argument must always be " "a model, string pointing at app/model or queryset. " "Given: %s (type: %s)" % (model, type(model))) # Parse lookups if len(lookups) % 2 != 0: raise GuardianError("Lookup variables must be provided " "as pairs of lookup_string and view_arg") lookup_dict = {} for lookup, view_arg in zip(lookups[::2], lookups[1::2]): if view_arg not in kwargs: raise GuardianError("Argument %s was not passed " "into view function" % view_arg) lookup_dict[lookup] = kwargs[view_arg] obj = get_object_or_404(model, **lookup_dict) # Handles both original and with object provided permission check # as ``obj`` defaults to None has_perm = accept_global_perms and request.user.has_perm(perm) if not has_perm and not request.user.has_perm(perm, obj): if return_403: if guardian_settings.RENDER_403: try: response = render_to_response( guardian_settings.TEMPLATE_403, {}, RequestContext(request)) response.status_code = 403 return response except TemplateDoesNotExist, e: if settings.DEBUG: raise e elif guardian_settings.RAISE_403: raise PermissionDenied return HttpResponseForbidden() else: path = urlquote(request.get_full_path()) tup = login_url, redirect_field_name, path return HttpResponseRedirect("%s?%s=%s" % tup) return view_func(request, *args, **kwargs) return wraps(view_func)(_wrapped_view) return decorator def permission_required_or_403(perm, *args, **kwargs): """ Simple wrapper for permission_required decorator. Standard Django's permission_required decorator redirects user to login page in case permission check failed. This decorator may be used to return HttpResponseForbidden (status 403) instead of redirection. The only difference between ``permission_required`` decorator is that this one always set ``return_403`` parameter to ``True``. """ kwargs['return_403'] = True return permission_required(perm, *args, **kwargs)
1.351563
1
signin/tests.py
pptnz/swa_team2
0
1343
import json from django.test import TestCase from django.contrib.auth.models import User from .models import CustomUser from django.apps import apps from .apps import SigninConfig class SignInTest(TestCase): def setUp(self): self.django_user = User.objects.create_user(username='testusername', password='<PASSWORD>') self.custom_user = CustomUser.objects.create(django_user=self.django_user) def test_apps(self): self.assertEqual(SigninConfig.name, 'signin') self.assertEqual(apps.get_app_config('signin').name, 'signin') def test_sign_in_redirect_page(self): response = self.client.get('/') self.assertRedirects(response, '/sign_in/') def test_get(self): response = self.client.get('/sign_in/') self.assertEqual(response.status_code, 200) def test_wrong_username(self): response = self.client.post('/sign_in/', {'username': 'wrongusername', 'password': '<PASSWORD>'}) self.assertEqual(response.status_code, 200) def test_wrong_password(self): response = self.client.post('/sign_in/', {'username': 'testusername', 'password': '<PASSWORD>'}) self.assertEqual(response.status_code, 200) def test_login(self): response = self.client.post('/sign_in/', {'username': 'testusername', 'password': '<PASSWORD>'}) self.assertRedirects(response, '/habitmaker/') # todo: change this link def test_login_other_page(self): response = self.client.post('/sign_in/?next=/habitmaker/', {'username': 'testusername', 'password': '<PASSWORD>'}) self.assertRedirects(response, '/habitmaker/') def test_form_not_valid(self): response = self.client.post('/sign_in/', {'username': 'testusername'}) self.assertEqual(response.status_code, 200) def test_email_verification(self): self.custom_user.authenticate_email() self.assertTrue(self.custom_user.is_email_authenticated) def test_already_signed_in(self): self.client.login(username='testusername', password='<PASSWORD>') response = self.client.get('/sign_in/') self.assertRedirects(response, '/habitmaker/')
1.757813
2
config.py
RomashkaGang/Update_Checker
0
1367
#!/usr/bin/env python3 # encoding: utf-8 import os # 是否启用调试 若启用 将不再忽略检查过程中发生的任何异常 # 建议在开发环境中启用 在生产环境中禁用 DEBUG_ENABLE = False # SQLite 数据库文件名 SQLITE_FILE = "saved.db" # 日志文件名 LOG_FILE = "log.txt" # 是否启用日志 ENABLE_LOGGER = True # 循环检查的间隔时间(默认: 180分钟) LOOP_CHECK_INTERVAL = 180 * 60 # 代理服务器 PROXIES = "127.0.0.1:1080" # 请求超时 TIMEOUT = 20 # 是否为 Socks5 代理 IS_SOCKS = False # 是否启用 TG BOT 发送消息的功能 ENABLE_SENDMESSAGE = False # TG BOT TOKEN TG_TOKEN = os.environ.get("TG_TOKEN", "") # 发送消息到... TG_SENDTO = os.environ.get("TG_SENDTO", "") if IS_SOCKS: _PROXIES_DIC = {"http": "socks5h://%s" % PROXIES, "https": "socks5h://%s" % PROXIES} else: _PROXIES_DIC = {"http": PROXIES, "https": PROXIES}
1.335938
1
lingvo/core/egdd.py
ramonsanabria/lingvo
0
1375
# Lint as: python2, python3 # Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Exponentiated Gradient Delta-Delta optimizer.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=g-direct-tensorflow-import from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import clip_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import linalg_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import state_ops from tensorflow.python.training import optimizer # pylint: enable=g-direct-tensorflow-import class EGDD(optimizer.Optimizer): """A version of GD Momentum with adaptive gain and learning rate. Exponentiated Gradient Delta-delta optimizer starts with a local gain of 1.0 for every weight and a lr_scale of 1.0 for all weights. The EGDD update rule applies: momentum <- mu * momentum + learning_rate * gain * grad var <- var - lr_scale * momentum The gain as well as the lr_scale are updated using the unnormalized exponentiated gradient algorithm [KW97]. Reference: TBA [KW97] <NAME>., & <NAME>. Exponentiated gradient versus gradient descent for linear predictors. Information and Computation, 1997. """ def __init__(self, learning_rate, momentum, beta=0.9, gain_learning_rate=0.01, scale_learning_rate=0.001, initial_gain=1.0, min_gain=1e-2, max_gain=1e2, initial_scale=1.0, min_scale=1e-1, max_scale=1e1, use_directions=True, use_signs=True, name="EGDD"): """Construct a new EG-DD optimizer. Args: learning_rate: A `Tensor` or a floating point value. The learning rate. momentum: A `Tensor` or a floating point value. beta: `float` decay rate of the gradient EMA. gain_learning_rate: `float` gain learning rate. scale_learning_rate: `float` scale learning rate. initial_gain: `float` initial gain. min_gain: `float` minimum gain. max_gain: `float` maximum gain, initial_scale: `float` initial scale. min_scale: `float` minimum learning rate scale. max_scale: `float` maximum learning rate scale. use_directions: `bool` whether to use directions only for scale updates. use_signs: `bool` whether to use the signs for updating gains. name: Optional name prefix for the operations created when applying gradients. Raises: ValueError: If the `initial_accumulator_value` is invalid. """ super(EGDD, self).__init__(False, name) self._learning_rate = learning_rate self._momentum = momentum self._beta = beta self._gain_learning_rate = gain_learning_rate self._scale_learning_rate = scale_learning_rate self._initial_gain = initial_gain self._min_gain = min_gain self._max_gain = max_gain self._initial_scale = initial_scale self._min_scale = min_scale self._max_scale = max_scale self._use_directions = use_directions self._use_signs = use_signs def _create_slots(self, var_list): for v in var_list: self._zeros_slot(v, "momentum", self._name) self._zeros_slot(v, "gbar", self._name) g_tensor = ops.convert_to_tensor(v) gain_init = self._initial_gain * array_ops.ones_like(g_tensor) _ = self._get_or_make_slot(v, self._initial_scale * array_ops.ones((1)), "lr_scale", self._name) _ = self._get_or_make_slot(v, gain_init, "gain", self._name) _ = self._get_or_make_slot(v, array_ops.zeros((1)), "counter", self._name) def _prepare(self): learning_rate = self._call_if_callable(self._learning_rate) self._learning_rate_tensor = ops.convert_to_tensor( learning_rate, name="learning_rate") momentum = self._call_if_callable(self._momentum) self._momentum_tensor = ops.convert_to_tensor(momentum, name="momentum") def _apply_dense(self, grad, var): lr_scale = self.get_slot(var, "lr_scale") momentum = self.get_slot(var, "momentum") gbar = self.get_slot(var, "gbar") gain = self.get_slot(var, "gain") counter = self.get_slot(var, "counter") counter_updated = state_ops.assign(counter, counter + 1) # lr_scale update uses normalized grad and momentum to be independent of dim normalized_grad = grad / (linalg_ops.norm(grad) + 1e-10) normalized_momentum = momentum / (linalg_ops.norm(momentum) + 1e-10) # Apply EG updates on lr_scale: # grad_lr_scale = -inner_product(current_grad, old_momentum) # lr_scale <- lr_scale * exp(-scale_learning_rate * grad_lr_scale) lr_scale_unnormalized_updated = clip_ops.clip_by_value( lr_scale * math_ops.exp( self._scale_learning_rate * math_ops.reduce_sum(grad * momentum)), self._min_scale, self._max_scale) lr_scale_normalized_updated = clip_ops.clip_by_value( lr_scale * math_ops.exp(self._scale_learning_rate * math_ops.reduce_sum( normalized_grad * normalized_momentum)), self._min_scale, self._max_scale) lr_scale_updated = state_ops.assign( lr_scale, array_ops.where(self._use_directions, lr_scale_normalized_updated, lr_scale_unnormalized_updated)) # remove the bias of zero initialization in gbar corrected_gbar = gbar / ( 1.0 - self._beta**math_ops.maximum(counter_updated - 1, 1)) # Apply EG updates on gain: # grad_gain = - current_grad * old_gbar # gain <- gain * exp(-gain_learning_rate * grad_gain) gain_unnormalized_updated = clip_ops.clip_by_value( gain * math_ops.exp(self._gain_learning_rate * grad * corrected_gbar), self._min_gain, self._max_gain) # Normalized update uses sign(grad) * sign(gbar) as a proxy for grad_gain. gain_normalized_updated = clip_ops.clip_by_value( gain * math_ops.exp(self._gain_learning_rate * math_ops.sign(grad) * math_ops.sign(gbar)), self._min_gain, self._max_gain) gain_updated = state_ops.assign( gain, array_ops.where(self._use_signs, gain_normalized_updated, gain_unnormalized_updated)) scaled_g = self._learning_rate_tensor * gain_updated * grad with ops.control_dependencies([lr_scale_updated, scaled_g]): momentum_updated = state_ops.assign( momentum, self._momentum_tensor * momentum + scaled_g) gbar_updated = state_ops.assign( gbar, self._beta * gbar + (1.0 - self._beta) * grad) with ops.control_dependencies([gbar_updated]): return state_ops.assign_sub(var, lr_scale_updated * momentum_updated) def _resource_apply_dense(self, grad, var): return self._apply_dense(grad, var) # Sparse gradients are not handled currently and is part of future work. def _resource_apply_sparse(self, grad_values, var, grad_indices): return control_flow_ops.no_op() def _apply_sparse(self, grad, var): return control_flow_ops.no_op()
1.859375
2
TM1py/Objects/ElementAttribute.py
damirishpreet/TM1py
19
1383
# -*- coding: utf-8 -*- import json from TM1py.Objects.TM1Object import TM1Object class ElementAttribute(TM1Object): """ Abstraction of TM1 Element Attributes """ valid_types = ['NUMERIC', 'STRING', 'ALIAS'] def __init__(self, name, attribute_type): self.name = name self.attribute_type = attribute_type @property def name(self): return self._name @name.setter def name(self, value): self._name = value @property def attribute_type(self): return self._attribute_type @attribute_type.setter def attribute_type(self, value): if value.upper() in ElementAttribute.valid_types: self._attribute_type = value else: raise Exception('{} not a valid Attribute Type.'.format(value)) @property def body_as_dict(self): return {"Name": self._name, "Type": self._attribute_type} @property def body(self): return json.dumps(self.body_as_dict, ensure_ascii=False) @classmethod def from_json(cls, element_attribute_as_json): return cls.from_dict(json.loads(element_attribute_as_json)) @classmethod def from_dict(cls, element_attribute_as_dict): return cls(name=element_attribute_as_dict['Name'], attribute_type=element_attribute_as_dict['Type']) def __eq__(self, other): return self.name == other
2.015625
2
pommerman/__init__.py
rmccann01/playground
725
1407
'''Entry point into the pommerman module''' import gym import inspect from . import agents from . import configs from . import constants from . import forward_model from . import helpers from . import utility from . import network gym.logger.set_level(40) REGISTRY = None def _register(): global REGISTRY REGISTRY = [] for name, f in inspect.getmembers(configs, inspect.isfunction): if not name.endswith('_env'): continue config = f() gym.envs.registration.register( id=config['env_id'], entry_point=config['env_entry_point'], kwargs=config['env_kwargs'] ) REGISTRY.append(config['env_id']) # Register environments with gym _register() def make(config_id, agent_list, game_state_file=None, render_mode='human'): '''Makes the pommerman env and registers it with gym''' assert config_id in REGISTRY, "Unknown configuration '{}'. " \ "Possible values: {}".format(config_id, REGISTRY) env = gym.make(config_id) for id_, agent in enumerate(agent_list): assert isinstance(agent, agents.BaseAgent) # NOTE: This is IMPORTANT so that the agent character is initialized agent.init_agent(id_, env.spec._kwargs['game_type']) env.set_agents(agent_list) env.set_init_game_state(game_state_file) env.set_render_mode(render_mode) return env from . import cli
1.710938
2
app1.py
FreakX23/EBook_Training
0
1423
# This Part will gather Infos and demonstrate the use of Variables. usrName = input("What is your Name?") usrAge = int(input("What is your Age?")) usrGPA = float(input("What is your GPA?")) print () #cheap way to get a new line print ("Hello, %s" % (usrName)) print ("Did you know that in two years you will be %d years old? " % (usrAge +2)) print ("Also you need to improve your GPA by %f points to have a perfect score." % (4.0 - usrGPA)) print ()
2.5625
3
run_locally.py
nationalarchives/tdr-service-unavailable
0
1431
from app import app app.run()
0.077637
0
nlpgnn/gnn/RGCNConv.py
ojipadeson/NLPGNN
263
1447
#! usr/bin/env python3 # -*- coding:utf-8 -*- """ @Author:<NAME> Usage: node_embeddings = tf.random.normal(shape=(5, 3)) adjacency_lists = [ tf.constant([[0, 1], [2, 4], [2, 4]], dtype=tf.int32), tf.constant([[0, 1], [2, 4], [2, 4]], dtype=tf.int32) ] layer = RGraphConvolution(out_features=12) x = layer(GNNInput(node_embeddings, adjacency_lists)) """ import tensorflow as tf from nlpgnn.gnn.messagepassing import MessagePassing class RGraphConvolution(MessagePassing): def __init__(self, out_features, epsion=1e-7, aggr="sum", normalize=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', use_bias=True, **kwargs): super(RGraphConvolution, self).__init__(aggr, **kwargs) self.kernel_initializer = tf.keras.initializers.get(kernel_initializer) self.bias_initializer = tf.keras.initializers.get(bias_initializer) self.use_bias = use_bias self.normalize = normalize self.out_features = out_features self.epsion = epsion def build(self, input_shapes): node_embedding_shapes = input_shapes.node_embeddings adjacency_list_shapes = input_shapes.adjacency_lists num_edge_type = len(adjacency_list_shapes) in_features = node_embedding_shapes[-1] self._edge_type_weights = [] self._edge_type_bias = [] for i in range(num_edge_type): weight = self.add_weight( shape=(in_features, self.out_features), initializer=self.kernel_initializer, name='wt_{}'.format(i), ) self._edge_type_weights.append(weight) if self.use_bias: self.bias = self.add_weight( shape=(self.out_features), initializer=self.bias_initializer, name='b', ) else: self.bias = None self.weight_o = self.add_weight( shape=(in_features, self.out_features), initializer=self.kernel_initializer, name='wo', ) self.built = True def message_function(self, edge_source_states, edge_target_states, num_incoming_to_node_per_message, num_outing_to_node_per_message, edge_type_idx): """ :param edge_source_states: [M,H] :param edge_target_states: [M,H] :param num_incoming_to_node_per_message:[M] :param edge_type_idx: :param training: :return: """ weight_r = self._edge_type_weights[edge_type_idx] messages = tf.linalg.matmul(edge_source_states, weight_r) if self.normalize: messages = ( tf.expand_dims(1.0 / (tf.cast(num_incoming_to_node_per_message, tf.float32) + self.epsion), axis=-1) * messages ) return messages def call(self, inputs): aggr_out = self.propagate(inputs) # message_passing + update aggr_out += tf.linalg.matmul(inputs.node_embeddings, self.weight_o) if self.bias is not None: aggr_out += self.bias return aggr_out
2.125
2
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/transforms.py
BadDevCode/lumberyard
1,738
1455
""" Implement transformation on Numba IR """ from __future__ import absolute_import, print_function from collections import namedtuple, defaultdict import logging from numba.analysis import compute_cfg_from_blocks, find_top_level_loops from numba import ir, errors, ir_utils from numba.analysis import compute_use_defs _logger = logging.getLogger(__name__) def _extract_loop_lifting_candidates(cfg, blocks): """ Returns a list of loops that are candidate for loop lifting """ # check well-formed-ness of the loop def same_exit_point(loop): "all exits must point to the same location" outedges = set() for k in loop.exits: succs = set(x for x, _ in cfg.successors(k)) if not succs: # If the exit point has no successor, it contains an return # statement, which is not handled by the looplifting code. # Thus, this loop is not a candidate. _logger.debug("return-statement in loop.") return False outedges |= succs ok = len(outedges) == 1 _logger.debug("same_exit_point=%s (%s)", ok, outedges) return ok def one_entry(loop): "there is one entry" ok = len(loop.entries) == 1 _logger.debug("one_entry=%s", ok) return ok def cannot_yield(loop): "cannot have yield inside the loop" insiders = set(loop.body) | set(loop.entries) | set(loop.exits) for blk in map(blocks.__getitem__, insiders): for inst in blk.body: if isinstance(inst, ir.Assign): if isinstance(inst.value, ir.Yield): _logger.debug("has yield") return False _logger.debug("no yield") return True _logger.info('finding looplift candidates') # the check for cfg.entry_point in the loop.entries is to prevent a bad # rewrite where a prelude for a lifted loop would get written into block -1 # if a loop entry were in block 0 candidates = [] for loop in find_top_level_loops(cfg): _logger.debug("top-level loop: %s", loop) if (same_exit_point(loop) and one_entry(loop) and cannot_yield(loop) and cfg.entry_point() not in loop.entries): candidates.append(loop) _logger.debug("add candidate: %s", loop) return candidates def find_region_inout_vars(blocks, livemap, callfrom, returnto, body_block_ids): """Find input and output variables to a block region. """ inputs = livemap[callfrom] outputs = livemap[returnto] # ensure live variables are actually used in the blocks, else remove, # saves having to create something valid to run through postproc # to achieve similar loopblocks = {} for k in body_block_ids: loopblocks[k] = blocks[k] used_vars = set() def_vars = set() defs = compute_use_defs(loopblocks) for vs in defs.usemap.values(): used_vars |= vs for vs in defs.defmap.values(): def_vars |= vs used_or_defined = used_vars | def_vars # note: sorted for stable ordering inputs = sorted(set(inputs) & used_or_defined) outputs = sorted(set(outputs) & used_or_defined & def_vars) return inputs, outputs _loop_lift_info = namedtuple('loop_lift_info', 'loop,inputs,outputs,callfrom,returnto') def _loop_lift_get_candidate_infos(cfg, blocks, livemap): """ Returns information on looplifting candidates. """ loops = _extract_loop_lifting_candidates(cfg, blocks) loopinfos = [] for loop in loops: [callfrom] = loop.entries # requirement checked earlier an_exit = next(iter(loop.exits)) # anyone of the exit block if len(loop.exits) > 1: # Pre-Py3.8 may have multiple exits [(returnto, _)] = cfg.successors(an_exit) # requirement checked earlier else: # Post-Py3.8 DO NOT have multiple exits returnto = an_exit local_block_ids = set(loop.body) | set(loop.entries) inputs, outputs = find_region_inout_vars( blocks=blocks, livemap=livemap, callfrom=callfrom, returnto=returnto, body_block_ids=local_block_ids, ) lli = _loop_lift_info(loop=loop, inputs=inputs, outputs=outputs, callfrom=callfrom, returnto=returnto) loopinfos.append(lli) return loopinfos def _loop_lift_modify_call_block(liftedloop, block, inputs, outputs, returnto): """ Transform calling block from top-level function to call the lifted loop. """ scope = block.scope loc = block.loc blk = ir.Block(scope=scope, loc=loc) ir_utils.fill_block_with_call( newblock=blk, callee=liftedloop, label_next=returnto, inputs=inputs, outputs=outputs, ) return blk def _loop_lift_prepare_loop_func(loopinfo, blocks): """ Inplace transform loop blocks for use as lifted loop. """ entry_block = blocks[loopinfo.callfrom] scope = entry_block.scope loc = entry_block.loc # Lowering assumes the first block to be the one with the smallest offset firstblk = min(blocks) - 1 blocks[firstblk] = ir_utils.fill_callee_prologue( block=ir.Block(scope=scope, loc=loc), inputs=loopinfo.inputs, label_next=loopinfo.callfrom, ) blocks[loopinfo.returnto] = ir_utils.fill_callee_epilogue( block=ir.Block(scope=scope, loc=loc), outputs=loopinfo.outputs, ) def _loop_lift_modify_blocks(func_ir, loopinfo, blocks, typingctx, targetctx, flags, locals): """ Modify the block inplace to call to the lifted-loop. Returns a dictionary of blocks of the lifted-loop. """ from numba.dispatcher import LiftedLoop # Copy loop blocks loop = loopinfo.loop loopblockkeys = set(loop.body) | set(loop.entries) if len(loop.exits) > 1: # Pre-Py3.8 may have multiple exits loopblockkeys |= loop.exits loopblocks = dict((k, blocks[k].copy()) for k in loopblockkeys) # Modify the loop blocks _loop_lift_prepare_loop_func(loopinfo, loopblocks) # Create a new IR for the lifted loop lifted_ir = func_ir.derive(blocks=loopblocks, arg_names=tuple(loopinfo.inputs), arg_count=len(loopinfo.inputs), force_non_generator=True) liftedloop = LiftedLoop(lifted_ir, typingctx, targetctx, flags, locals) # modify for calling into liftedloop callblock = _loop_lift_modify_call_block(liftedloop, blocks[loopinfo.callfrom], loopinfo.inputs, loopinfo.outputs, loopinfo.returnto) # remove blocks for k in loopblockkeys: del blocks[k] # update main interpreter callsite into the liftedloop blocks[loopinfo.callfrom] = callblock return liftedloop def loop_lifting(func_ir, typingctx, targetctx, flags, locals): """ Loop lifting transformation. Given a interpreter `func_ir` returns a 2 tuple of `(toplevel_interp, [loop0_interp, loop1_interp, ....])` """ blocks = func_ir.blocks.copy() cfg = compute_cfg_from_blocks(blocks) loopinfos = _loop_lift_get_candidate_infos(cfg, blocks, func_ir.variable_lifetime.livemap) loops = [] if loopinfos: _logger.debug('loop lifting this IR with %d candidates:\n%s', len(loopinfos), func_ir.dump_to_string()) for loopinfo in loopinfos: lifted = _loop_lift_modify_blocks(func_ir, loopinfo, blocks, typingctx, targetctx, flags, locals) loops.append(lifted) # Make main IR main = func_ir.derive(blocks=blocks) return main, loops def canonicalize_cfg_single_backedge(blocks): """ Rewrite loops that have multiple backedges. """ cfg = compute_cfg_from_blocks(blocks) newblocks = blocks.copy() def new_block_id(): return max(newblocks.keys()) + 1 def has_multiple_backedges(loop): count = 0 for k in loop.body: blk = blocks[k] edges = blk.terminator.get_targets() # is a backedge? if loop.header in edges: count += 1 if count > 1: # early exit return True return False def yield_loops_with_multiple_backedges(): for lp in cfg.loops().values(): if has_multiple_backedges(lp): yield lp def replace_target(term, src, dst): def replace(target): return (dst if target == src else target) if isinstance(term, ir.Branch): return ir.Branch(cond=term.cond, truebr=replace(term.truebr), falsebr=replace(term.falsebr), loc=term.loc) elif isinstance(term, ir.Jump): return ir.Jump(target=replace(term.target), loc=term.loc) else: assert not term.get_targets() return term def rewrite_single_backedge(loop): """ Add new tail block that gathers all the backedges """ header = loop.header tailkey = new_block_id() for blkkey in loop.body: blk = newblocks[blkkey] if header in blk.terminator.get_targets(): newblk = blk.copy() # rewrite backedge into jumps to new tail block newblk.body[-1] = replace_target(blk.terminator, header, tailkey) newblocks[blkkey] = newblk # create new tail block entryblk = newblocks[header] tailblk = ir.Block(scope=entryblk.scope, loc=entryblk.loc) # add backedge tailblk.append(ir.Jump(target=header, loc=tailblk.loc)) newblocks[tailkey] = tailblk for loop in yield_loops_with_multiple_backedges(): rewrite_single_backedge(loop) return newblocks def canonicalize_cfg(blocks): """ Rewrite the given blocks to canonicalize the CFG. Returns a new dictionary of blocks. """ return canonicalize_cfg_single_backedge(blocks) def with_lifting(func_ir, typingctx, targetctx, flags, locals): """With-lifting transformation Rewrite the IR to extract all withs. Only the top-level withs are extracted. Returns the (the_new_ir, the_lifted_with_ir) """ from numba import postproc def dispatcher_factory(func_ir, objectmode=False, **kwargs): from numba.dispatcher import LiftedWith, ObjModeLiftedWith myflags = flags.copy() if objectmode: # Lifted with-block cannot looplift myflags.enable_looplift = False # Lifted with-block uses object mode myflags.enable_pyobject = True myflags.force_pyobject = True myflags.no_cpython_wrapper = False cls = ObjModeLiftedWith else: cls = LiftedWith return cls(func_ir, typingctx, targetctx, myflags, locals, **kwargs) postproc.PostProcessor(func_ir).run() # ensure we have variable lifetime assert func_ir.variable_lifetime vlt = func_ir.variable_lifetime blocks = func_ir.blocks.copy() # find where with-contexts regions are withs = find_setupwiths(blocks) cfg = vlt.cfg _legalize_withs_cfg(withs, cfg, blocks) # For each with-regions, mutate them according to # the kind of contextmanager sub_irs = [] for (blk_start, blk_end) in withs: body_blocks = [] for node in _cfg_nodes_in_region(cfg, blk_start, blk_end): body_blocks.append(node) _legalize_with_head(blocks[blk_start]) # Find the contextmanager cmkind, extra = _get_with_contextmanager(func_ir, blocks, blk_start) # Mutate the body and get new IR sub = cmkind.mutate_with_body(func_ir, blocks, blk_start, blk_end, body_blocks, dispatcher_factory, extra) sub_irs.append(sub) if not sub_irs: # Unchanged new_ir = func_ir else: new_ir = func_ir.derive(blocks) return new_ir, sub_irs def _get_with_contextmanager(func_ir, blocks, blk_start): """Get the global object used for the context manager """ _illegal_cm_msg = "Illegal use of context-manager." def get_var_dfn(var): """Get the definition given a variable""" return func_ir.get_definition(var) def get_ctxmgr_obj(var_ref): """Return the context-manager object and extra info. The extra contains the arguments if the context-manager is used as a call. """ # If the contextmanager used as a Call dfn = func_ir.get_definition(var_ref) if isinstance(dfn, ir.Expr) and dfn.op == 'call': args = [get_var_dfn(x) for x in dfn.args] kws = {k: get_var_dfn(v) for k, v in dfn.kws} extra = {'args': args, 'kwargs': kws} var_ref = dfn.func else: extra = None ctxobj = ir_utils.guard(ir_utils.find_global_value, func_ir, var_ref) # check the contextmanager object if ctxobj is ir.UNDEFINED: raise errors.CompilerError( "Undefined variable used as context manager", loc=blocks[blk_start].loc, ) if ctxobj is None: raise errors.CompilerError(_illegal_cm_msg, loc=dfn.loc) return ctxobj, extra # Scan the start of the with-region for the contextmanager for stmt in blocks[blk_start].body: if isinstance(stmt, ir.EnterWith): var_ref = stmt.contextmanager ctxobj, extra = get_ctxmgr_obj(var_ref) if not hasattr(ctxobj, 'mutate_with_body'): raise errors.CompilerError( "Unsupported context manager in use", loc=blocks[blk_start].loc, ) return ctxobj, extra # No contextmanager found? raise errors.CompilerError( "malformed with-context usage", loc=blocks[blk_start].loc, ) def _legalize_with_head(blk): """Given *blk*, the head block of the with-context, check that it doesn't do anything else. """ counters = defaultdict(int) for stmt in blk.body: counters[type(stmt)] += 1 if counters.pop(ir.EnterWith) != 1: raise errors.CompilerError( "with's head-block must have exactly 1 ENTER_WITH", loc=blk.loc, ) if counters.pop(ir.Jump) != 1: raise errors.CompilerError( "with's head-block must have exactly 1 JUMP", loc=blk.loc, ) # Can have any number of del counters.pop(ir.Del, None) # There MUST NOT be any other statements if counters: raise errors.CompilerError( "illegal statements in with's head-block", loc=blk.loc, ) def _cfg_nodes_in_region(cfg, region_begin, region_end): """Find the set of CFG nodes that are in the given region """ region_nodes = set() stack = [region_begin] while stack: tos = stack.pop() succs, _ = zip(*cfg.successors(tos)) nodes = set([node for node in succs if node not in region_nodes and node != region_end]) stack.extend(nodes) region_nodes |= nodes return region_nodes def _legalize_withs_cfg(withs, cfg, blocks): """Verify the CFG of the with-context(s). """ doms = cfg.dominators() postdoms = cfg.post_dominators() # Verify that the with-context has no side-exits for s, e in withs: loc = blocks[s].loc if s not in doms[e]: # Not sure what condition can trigger this error. msg = "Entry of with-context not dominating the exit." raise errors.CompilerError(msg, loc=loc) if e not in postdoms[s]: msg = ( "Does not support with-context that contain branches " "(i.e. break/return/raise) that can leave the with-context. " "Details: exit of with-context not post-dominating the entry. " ) raise errors.CompilerError(msg, loc=loc) def find_setupwiths(blocks): """Find all top-level with. Returns a list of ranges for the with-regions. """ def find_ranges(blocks): for blk in blocks.values(): for ew in blk.find_insts(ir.EnterWith): yield ew.begin, ew.end def previously_occurred(start, known_ranges): for a, b in known_ranges: if s >= a and s < b: return True return False known_ranges = [] for s, e in sorted(find_ranges(blocks)): if not previously_occurred(s, known_ranges): if e not in blocks: # this's possible if there's an exit path in the with-block raise errors.CompilerError( 'unsupported controlflow due to return/raise ' 'statements inside with block' ) assert s in blocks, 'starting offset is not a label' known_ranges.append((s, e)) return known_ranges
2.390625
2
frappe/patches/v13_0/remove_web_view.py
chentaoz/frappe
3,755
1463
import frappe def execute(): frappe.delete_doc_if_exists("DocType", "Web View") frappe.delete_doc_if_exists("DocType", "Web View Component") frappe.delete_doc_if_exists("DocType", "CSS Class")
0.910156
1
letsencrypt/setup.py
ccppuu/certbot
1
1471
import codecs import os import sys from setuptools import setup from setuptools import find_packages def read_file(filename, encoding='utf8'): """Read unicode from given file.""" with codecs.open(filename, encoding=encoding) as fd: return fd.read() here = os.path.abspath(os.path.dirname(__file__)) readme = read_file(os.path.join(here, 'README.rst')) # This package is a simple shim around certbot install_requires = ['certbot'] version = '0.7.0.dev0' setup( name='letsencrypt', version=version, description="ACME client", long_description=readme, url='https://github.com/letsencrypt/letsencrypt', author="Certbot Project", author_email='<EMAIL>', license='Apache License 2.0', classifiers=[ 'Development Status :: 3 - Alpha', 'Environment :: Console', 'Environment :: Console :: Curses', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: Apache Software License', 'Operating System :: POSIX :: Linux', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Topic :: Internet :: WWW/HTTP', 'Topic :: Security', 'Topic :: System :: Installation/Setup', 'Topic :: System :: Networking', 'Topic :: System :: Systems Administration', 'Topic :: Utilities', ], packages=find_packages(), include_package_data=True, install_requires=install_requires, entry_points={ 'console_scripts': [ 'letsencrypt = certbot.main:main', ], }, )
1.46875
1
API/migrations/0005_alter_news_date_time_alter_news_headline.py
kgarchie/ReSTful-Django-API
0
1479
# Generated by Django 4.0.3 on 2022-03-23 14:31 import datetime from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('API', '0004_alter_news_date_time_alter_news_headline'), ] operations = [ migrations.AlterField( model_name='news', name='date_time', field=models.DateTimeField(default=datetime.datetime(2022, 3, 23, 17, 31, 17, 27766)), ), migrations.AlterField( model_name='news', name='headline', field=models.CharField(max_length=100), ), ]
1.0625
1
hoomd/mpcd/test-py/stream_slit_test.py
schwendp/hoomd-blue
2
1495
# Copyright (c) 2009-2019 The Regents of the University of Michigan # This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. # Maintainer: mphoward import unittest import numpy as np import hoomd from hoomd import md from hoomd import mpcd # unit tests for mpcd slit streaming geometry class mpcd_stream_slit_test(unittest.TestCase): def setUp(self): # establish the simulation context hoomd.context.initialize() # set the decomposition in z for mpi builds if hoomd.comm.get_num_ranks() > 1: hoomd.comm.decomposition(nz=2) # default testing configuration hoomd.init.read_snapshot(hoomd.data.make_snapshot(N=0, box=hoomd.data.boxdim(L=10.))) # initialize the system from the starting snapshot snap = mpcd.data.make_snapshot(N=2) snap.particles.position[:] = [[4.95,-4.95,3.85],[0.,0.,-3.8]] snap.particles.velocity[:] = [[1.,-1.,1.],[-1.,-1.,-1.]] self.s = mpcd.init.read_snapshot(snap) mpcd.integrator(dt=0.1) # test creation can happen (with all parameters set) def test_create(self): mpcd.stream.slit(H=4., V=0.1, boundary="no_slip", period=2) # test for setting parameters def test_set_params(self): slit = mpcd.stream.slit(H=4.) self.assertAlmostEqual(slit.H, 4.) self.assertAlmostEqual(slit.V, 0.) self.assertEqual(slit.boundary, "no_slip") self.assertAlmostEqual(slit._cpp.geometry.getH(), 4.) self.assertAlmostEqual(slit._cpp.geometry.getVelocity(), 0.) self.assertEqual(slit._cpp.geometry.getBoundaryCondition(), mpcd._mpcd.boundary.no_slip) # change H and also ensure other parameters stay the same slit.set_params(H=2.) self.assertAlmostEqual(slit.H, 2.) self.assertAlmostEqual(slit.V, 0.) self.assertEqual(slit.boundary, "no_slip") self.assertAlmostEqual(slit._cpp.geometry.getH(), 2.) self.assertAlmostEqual(slit._cpp.geometry.getVelocity(), 0.) self.assertEqual(slit._cpp.geometry.getBoundaryCondition(), mpcd._mpcd.boundary.no_slip) # change V slit.set_params(V=0.1) self.assertAlmostEqual(slit.V, 0.1) self.assertAlmostEqual(slit._cpp.geometry.getVelocity(), 0.1) # change BCs slit.set_params(boundary="slip") self.assertEqual(slit.boundary, "slip") self.assertEqual(slit._cpp.geometry.getBoundaryCondition(), mpcd._mpcd.boundary.slip) # test for invalid boundary conditions being set def test_bad_boundary(self): slit = mpcd.stream.slit(H=4.) slit.set_params(boundary="no_slip") slit.set_params(boundary="slip") with self.assertRaises(ValueError): slit.set_params(boundary="invalid") # test basic stepping behavior with no slip boundary conditions def test_step_noslip(self): mpcd.stream.slit(H=4.) # take one step hoomd.run(1) snap = self.s.take_snapshot() if hoomd.comm.get_rank() == 0: np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.95,4.95,3.95]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,-1.,1.]) np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.1,-0.1,-3.9]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1.,-1.,-1.]) # take another step where one particle will now hit the wall hoomd.run(1) snap = self.s.take_snapshot() if hoomd.comm.get_rank() == 0: np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.95,4.95,3.95]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [-1.,1.,-1.]) np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.2,-0.2,-4.0]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1.,-1.,-1.]) # take another step, wrapping the second particle through the boundary hoomd.run(1) snap = self.s.take_snapshot() if hoomd.comm.get_rank() == 0: np.testing.assert_array_almost_equal(snap.particles.position[0], [4.95,-4.95,3.85]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [-1.,1.,-1.]) np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.1,-0.1,-3.9]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [1.,1.,1.]) def test_step_moving_wall(self): mpcd.stream.slit(H=4., boundary="no_slip", V=1.0, period=3) # change velocity of lower particle so it is translating relative to wall snap = self.s.take_snapshot() if hoomd.comm.get_rank() == 0: snap.particles.velocity[1] = [-2.,-1.,-1.] self.s.restore_snapshot(snap) # run one step and check bounce back of particles hoomd.run(1) snap = self.s.take_snapshot() if hoomd.comm.get_rank() == 0: # the first particle is matched exactly to the wall speed, and so it will translate at # same velocity along +x for 3 steps. It will bounce back in y and z to where it started. # (vx stays the same, and vy and vz flip.) np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.75,-4.95,3.85]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,1.,-1.]) # the second particle has y and z velocities flip again, and since it started closer, # it moves relative to original position. np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.4,-0.1,-3.9]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [0.,1.,1.]) # test basic stepping behavior with slip boundary conditions def test_step_slip(self): mpcd.stream.slit(H=4., boundary="slip") # take one step hoomd.run(1) snap = self.s.take_snapshot() if hoomd.comm.get_rank() == 0: np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.95,4.95,3.95]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,-1.,1.]) np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.1,-0.1,-3.9]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1.,-1.,-1.]) # take another step where one particle will now hit the wall hoomd.run(1) snap = self.s.take_snapshot() if hoomd.comm.get_rank() == 0: np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.85,4.85,3.95]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,-1.,-1.]) np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.2,-0.2,-4.0]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1.,-1.,-1.]) # take another step, wrapping the second particle through the boundary hoomd.run(1) snap = self.s.take_snapshot() if hoomd.comm.get_rank() == 0: np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.75,4.75,3.85]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,-1.,-1.]) np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.3,-0.3,-3.9]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1.,-1.,1.]) # test that setting the slit size too large raises an error def test_validate_box(self): # initial configuration is invalid slit = mpcd.stream.slit(H=10.) with self.assertRaises(RuntimeError): hoomd.run(1) # now it should be valid slit.set_params(H=4.) hoomd.run(2) # make sure we can invalidate it again slit.set_params(H=10.) with self.assertRaises(RuntimeError): hoomd.run(1) # test that particles out of bounds can be caught def test_out_of_bounds(self): slit = mpcd.stream.slit(H=3.8) with self.assertRaises(RuntimeError): hoomd.run(1) slit.set_params(H=3.85) hoomd.run(1) # test that virtual particle filler can be attached, removed, and updated def test_filler(self): # initialization of a filler slit = mpcd.stream.slit(H=4.) slit.set_filler(density=5., kT=1.0, seed=42, type='A') self.assertTrue(slit._filler is not None) # run should be able to setup the filler, although this all happens silently hoomd.run(1) # changing the geometry should still be OK with a run slit.set_params(V=1.0) hoomd.run(1) # changing filler should be allowed slit.set_filler(density=10., kT=1.5, seed=7) self.assertTrue(slit._filler is not None) hoomd.run(1) # assert an error is raised if we set a bad particle type with self.assertRaises(RuntimeError): slit.set_filler(density=5., kT=1.0, seed=42, type='B') # assert an error is raised if we set a bad density with self.assertRaises(RuntimeError): slit.set_filler(density=-1.0, kT=1.0, seed=42) # removing the filler should still allow a run slit.remove_filler() self.assertTrue(slit._filler is None) hoomd.run(1) def tearDown(self): del self.s if __name__ == '__main__': unittest.main(argv = ['test.py', '-v'])
1.765625
2
models/layers/mesh_conv.py
CallumMcMahon/MeshCNN
2
1503
import torch import torch.nn as nn import torch.nn.functional as F class MeshConv(nn.Module): """ Computes convolution between edges and 4 incident (1-ring) edge neighbors in the forward pass takes: x: edge features (Batch x Features x Edges) mesh: list of mesh data-structure (len(mesh) == Batch) and applies convolution """ def __init__(self, in_channels, out_channels, k=5, bias=True): super(MeshConv, self).__init__() self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=(1, k), bias=bias) self.k = k def forward(self, x, mesh): x = x.squeeze(-1) # pad gemm G = torch.cat([self.pad_gemm(i, x.shape[2], x.device) for i in mesh], 0) # build 'neighborhood image' and apply convolution G = self.create_GeMM(x, G) x = self.conv(G) return x def flatten_gemm_inds(self, Gi): (b, ne, nn) = Gi.shape ne += 1 batch_n = torch.floor(torch.arange(b * ne, device=Gi.device).float() / ne).view(b, ne) add_fac = batch_n * ne add_fac = add_fac.view(b, ne, 1) add_fac = add_fac.repeat(1, 1, nn) # flatten Gi Gi = Gi.float() + add_fac[:, 1:, :] return Gi def create_GeMM(self, x, Gi): """ gathers the edge features (x) with from the 1-ring indices (Gi) applys symmetric functions to handle order invariance returns a 'fake image' which can use 2d convolution on output dimensions: Batch x Channels x Edges x 5 """ Gishape = Gi.shape # pad the first row of every sample in batch with zeros padding = torch.zeros((x.shape[0], x.shape[1], 1), requires_grad=True, device=x.device) # add zero feature vector then shift all indices. border edges now reference zero vector x = torch.cat((padding, x), dim=2) Gi = Gi + 1 #shift # first flatten indices Gi_flat = self.flatten_gemm_inds(Gi) Gi_flat = Gi_flat.view(-1).long() # odim = x.shape x = x.permute(0, 2, 1).contiguous() x = x.view(odim[0] * odim[2], odim[1]) # indices of gemm never reference padded section of x so padded section never used f = torch.index_select(x, dim=0, index=Gi_flat) f = f.view(Gishape[0], Gishape[1], Gishape[2], -1) f = f.permute(0, 3, 1, 2) # apply the symmetric functions for an equivariant convolution x_1 = f[:, :, :, 1] + f[:, :, :, 3] x_2 = f[:, :, :, 2] + f[:, :, :, 4] x_3 = torch.abs(f[:, :, :, 1] - f[:, :, :, 3]) x_4 = torch.abs(f[:, :, :, 2] - f[:, :, :, 4]) f = torch.stack([f[:, :, :, 0], x_1, x_2, x_3, x_4], dim=3) return f def pad_gemm(self, m, xsz, device): """ extracts one-ring neighbors (4x) -> m.gemm_edges which is of size #edges x 4 add the edge_id itself to make #edges x 5 then pad to desired size e.g., xsz x 5 """ padded_gemm = torch.tensor(m.gemm_edges, device=device).float() padded_gemm = padded_gemm.requires_grad_() padded_gemm = torch.cat((torch.arange(int(m.edges_count), device=device).float().unsqueeze(1), padded_gemm), dim=1) # pad using F padded_gemm = F.pad(padded_gemm, (0, 0, 0, xsz - m.edges_count), "constant", 0) padded_gemm = padded_gemm.unsqueeze(0) return padded_gemm
2.453125
2